# File: lerobot-main/lerobot/__init__.py """""" import itertools from lerobot.__version__ import __version__ available_tasks_per_env = {'aloha': ['AlohaInsertion-v0', 'AlohaTransferCube-v0'], 'pusht': ['PushT-v0'], 'xarm': ['XarmLift-v0'], 'dora_aloha_real': ['DoraAloha-v0', 'DoraKoch-v0', 'DoraReachy2-v0']} available_envs = list(available_tasks_per_env.keys()) available_datasets_per_env = {'aloha': ['lerobot/aloha_sim_insertion_human', 'lerobot/aloha_sim_insertion_scripted', 'lerobot/aloha_sim_transfer_cube_human', 'lerobot/aloha_sim_transfer_cube_scripted', 'lerobot/aloha_sim_insertion_human_image', 'lerobot/aloha_sim_insertion_scripted_image', 'lerobot/aloha_sim_transfer_cube_human_image', 'lerobot/aloha_sim_transfer_cube_scripted_image'], 'pusht': ['lerobot/pusht', 'lerobot/pusht_image'], 'xarm': ['lerobot/xarm_lift_medium', 'lerobot/xarm_lift_medium_replay', 'lerobot/xarm_push_medium', 'lerobot/xarm_push_medium_replay', 'lerobot/xarm_lift_medium_image', 'lerobot/xarm_lift_medium_replay_image', 'lerobot/xarm_push_medium_image', 'lerobot/xarm_push_medium_replay_image'], 'dora_aloha_real': ['lerobot/aloha_static_battery', 'lerobot/aloha_static_candy', 'lerobot/aloha_static_coffee', 'lerobot/aloha_static_coffee_new', 'lerobot/aloha_static_cups_open', 'lerobot/aloha_static_fork_pick_up', 'lerobot/aloha_static_pingpong_test', 'lerobot/aloha_static_pro_pencil', 'lerobot/aloha_static_screw_driver', 'lerobot/aloha_static_tape', 'lerobot/aloha_static_thread_velcro', 'lerobot/aloha_static_towel', 'lerobot/aloha_static_vinh_cup', 'lerobot/aloha_static_vinh_cup_left', 'lerobot/aloha_static_ziploc_slide']} available_real_world_datasets = ['lerobot/aloha_mobile_cabinet', 'lerobot/aloha_mobile_chair', 'lerobot/aloha_mobile_elevator', 'lerobot/aloha_mobile_shrimp', 'lerobot/aloha_mobile_wash_pan', 'lerobot/aloha_mobile_wipe_wine', 'lerobot/aloha_static_battery', 'lerobot/aloha_static_candy', 'lerobot/aloha_static_coffee', 'lerobot/aloha_static_coffee_new', 'lerobot/aloha_static_cups_open', 'lerobot/aloha_static_fork_pick_up', 'lerobot/aloha_static_pingpong_test', 'lerobot/aloha_static_pro_pencil', 'lerobot/aloha_static_screw_driver', 'lerobot/aloha_static_tape', 'lerobot/aloha_static_thread_velcro', 'lerobot/aloha_static_towel', 'lerobot/aloha_static_vinh_cup', 'lerobot/aloha_static_vinh_cup_left', 'lerobot/aloha_static_ziploc_slide', 'lerobot/umi_cup_in_the_wild', 'lerobot/unitreeh1_fold_clothes', 'lerobot/unitreeh1_rearrange_objects', 'lerobot/unitreeh1_two_robot_greeting', 'lerobot/unitreeh1_warehouse', 'lerobot/nyu_rot_dataset', 'lerobot/utokyo_saytap', 'lerobot/imperialcollege_sawyer_wrist_cam', 'lerobot/utokyo_xarm_bimanual', 'lerobot/tokyo_u_lsmo', 'lerobot/utokyo_pr2_opening_fridge', 'lerobot/cmu_franka_exploration_dataset', 'lerobot/cmu_stretch', 'lerobot/asu_table_top', 'lerobot/utokyo_pr2_tabletop_manipulation', 'lerobot/utokyo_xarm_pick_and_place', 'lerobot/ucsd_kitchen_dataset', 'lerobot/austin_buds_dataset', 'lerobot/dlr_sara_grid_clamp', 'lerobot/conq_hose_manipulation', 'lerobot/columbia_cairlab_pusht_real', 'lerobot/dlr_sara_pour', 'lerobot/dlr_edan_shared_control', 'lerobot/ucsd_pick_and_place_dataset', 'lerobot/berkeley_cable_routing', 'lerobot/nyu_franka_play_dataset', 'lerobot/austin_sirius_dataset', 'lerobot/cmu_play_fusion', 'lerobot/berkeley_gnm_sac_son', 'lerobot/nyu_door_opening_surprising_effectiveness', 'lerobot/berkeley_fanuc_manipulation', 'lerobot/jaco_play', 'lerobot/viola', 'lerobot/kaist_nonprehensile', 'lerobot/berkeley_mvp', 'lerobot/uiuc_d3field', 'lerobot/berkeley_gnm_recon', 'lerobot/austin_sailor_dataset', 'lerobot/utaustin_mutex', 'lerobot/roboturk', 'lerobot/stanford_hydra_dataset', 'lerobot/berkeley_autolab_ur5', 'lerobot/stanford_robocook', 'lerobot/toto', 'lerobot/fmb', 'lerobot/droid_100', 'lerobot/berkeley_rpt', 'lerobot/stanford_kuka_multimodal_dataset', 'lerobot/iamlab_cmu_pickup_insert', 'lerobot/taco_play', 'lerobot/berkeley_gnm_cory_hall', 'lerobot/usc_cloth_sim'] available_datasets = list(itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets)) available_policies = ['act', 'diffusion', 'tdmpc', 'vqbet'] available_robots = ['koch', 'koch_bimanual', 'aloha'] available_policies_per_env = {'aloha': ['act'], 'pusht': ['diffusion', 'vqbet'], 'xarm': ['tdmpc'], 'dora_aloha_real': ['act_real']} env_task_pairs = [(env, task) for (env, tasks) in available_tasks_per_env.items() for task in tasks] env_dataset_pairs = [(env, dataset) for (env, datasets) in available_datasets_per_env.items() for dataset in datasets] env_dataset_policy_triplets = [(env, dataset, policy) for (env, datasets) in available_datasets_per_env.items() for dataset in datasets for policy in available_policies_per_env[env]] # File: lerobot-main/lerobot/common/datasets/compute_stats.py from copy import deepcopy from math import ceil import einops import torch import tqdm from datasets import Image from lerobot.common.datasets.video_utils import VideoFrame def get_stats_einops_patterns(dataset, num_workers=0): dataloader = torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_size=2, shuffle=False) batch = next(iter(dataloader)) stats_patterns = {} for (key, feats_type) in dataset.features.items(): if key == 'language_instruction': continue assert batch[key].dtype != torch.float64 if isinstance(feats_type, (VideoFrame, Image)): (_, c, h, w) = batch[key].shape assert c < h and c < w, f'expect channel first images, but instead {batch[key].shape}' assert batch[key].dtype == torch.float32, f'expect torch.float32, but instead batch[key].dtype={batch[key].dtype!r}' assert batch[key].max() <= 1, f'expect pixels lower than 1, but instead batch[key].max()={batch[key].max()!r}' assert batch[key].min() >= 0, f'expect pixels greater than 1, but instead batch[key].min()={batch[key].min()!r}' stats_patterns[key] = 'b c h w -> c 1 1' elif batch[key].ndim == 2: stats_patterns[key] = 'b c -> c ' elif batch[key].ndim == 1: stats_patterns[key] = 'b -> 1' else: raise ValueError(f'{key}, {feats_type}, {batch[key].shape}') return stats_patterns def compute_stats(dataset, batch_size=32, num_workers=16, max_num_samples=None): if max_num_samples is None: max_num_samples = len(dataset) stats_patterns = get_stats_einops_patterns(dataset, num_workers) (mean, std, max, min) = ({}, {}, {}, {}) for key in stats_patterns: mean[key] = torch.tensor(0.0).float() std[key] = torch.tensor(0.0).float() max[key] = torch.tensor(-float('inf')).float() min[key] = torch.tensor(float('inf')).float() def create_seeded_dataloader(dataset, batch_size, seed): generator = torch.Generator() generator.manual_seed(seed) dataloader = torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, shuffle=True, drop_last=False, generator=generator) return dataloader first_batch = None running_item_count = 0 dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337) for (i, batch) in enumerate(tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc='Compute mean, min, max')): this_batch_size = len(batch['index']) running_item_count += this_batch_size if first_batch is None: first_batch = deepcopy(batch) for (key, pattern) in stats_patterns.items(): batch[key] = batch[key].float() batch_mean = einops.reduce(batch[key], pattern, 'mean') mean[key] = mean[key] + this_batch_size * (batch_mean - mean[key]) / running_item_count max[key] = torch.maximum(max[key], einops.reduce(batch[key], pattern, 'max')) min[key] = torch.minimum(min[key], einops.reduce(batch[key], pattern, 'min')) if i == ceil(max_num_samples / batch_size) - 1: break first_batch_ = None running_item_count = 0 dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337) for (i, batch) in enumerate(tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc='Compute std')): this_batch_size = len(batch['index']) running_item_count += this_batch_size if first_batch_ is None: first_batch_ = deepcopy(batch) for key in stats_patterns: assert torch.equal(first_batch_[key], first_batch[key]) for (key, pattern) in stats_patterns.items(): batch[key] = batch[key].float() batch_std = einops.reduce((batch[key] - mean[key]) ** 2, pattern, 'mean') std[key] = std[key] + this_batch_size * (batch_std - std[key]) / running_item_count if i == ceil(max_num_samples / batch_size) - 1: break for key in stats_patterns: std[key] = torch.sqrt(std[key]) stats = {} for key in stats_patterns: stats[key] = {'mean': mean[key], 'std': std[key], 'max': max[key], 'min': min[key]} return stats def aggregate_stats(ls_datasets) -> dict[str, torch.Tensor]: data_keys = set() for dataset in ls_datasets: data_keys.update(dataset.stats.keys()) stats = {k: {} for k in data_keys} for data_key in data_keys: for stat_key in ['min', 'max']: stats[data_key][stat_key] = einops.reduce(torch.stack([d.stats[data_key][stat_key] for d in ls_datasets if data_key in d.stats], dim=0), 'n ... -> ...', stat_key) total_samples = sum((d.num_samples for d in ls_datasets if data_key in d.stats)) stats[data_key]['mean'] = sum((d.stats[data_key]['mean'] * (d.num_samples / total_samples) for d in ls_datasets if data_key in d.stats)) stats[data_key]['std'] = torch.sqrt(sum(((d.stats[data_key]['std'] ** 2 + (d.stats[data_key]['mean'] - stats[data_key]['mean']) ** 2) * (d.num_samples / total_samples) for d in ls_datasets if data_key in d.stats))) return stats # File: lerobot-main/lerobot/common/datasets/factory.py import logging import torch from omegaconf import ListConfig, OmegaConf from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, MultiLeRobotDataset from lerobot.common.datasets.transforms import get_image_transforms def resolve_delta_timestamps(cfg): delta_timestamps = cfg.training.get('delta_timestamps') if delta_timestamps is not None: for key in delta_timestamps: if isinstance(delta_timestamps[key], str): cfg.training.delta_timestamps[key] = eval(delta_timestamps[key]) def make_dataset(cfg, split: str='train') -> LeRobotDataset | MultiLeRobotDataset: if not isinstance(cfg.dataset_repo_id, (str, ListConfig)): raise ValueError('Expected cfg.dataset_repo_id to be either a single string to load one dataset or a list of strings to load multiple datasets.') if cfg.env.name != 'dora': if isinstance(cfg.dataset_repo_id, str): dataset_repo_ids = [cfg.dataset_repo_id] else: dataset_repo_ids = cfg.dataset_repo_id for dataset_repo_id in dataset_repo_ids: if cfg.env.name not in dataset_repo_id: logging.warning(f'There might be a mismatch between your training dataset (dataset_repo_id={dataset_repo_id!r}) and your environment (cfg.env.name={cfg.env.name!r}).') resolve_delta_timestamps(cfg) image_transforms = None if cfg.training.image_transforms.enable: cfg_tf = cfg.training.image_transforms image_transforms = get_image_transforms(brightness_weight=cfg_tf.brightness.weight, brightness_min_max=cfg_tf.brightness.min_max, contrast_weight=cfg_tf.contrast.weight, contrast_min_max=cfg_tf.contrast.min_max, saturation_weight=cfg_tf.saturation.weight, saturation_min_max=cfg_tf.saturation.min_max, hue_weight=cfg_tf.hue.weight, hue_min_max=cfg_tf.hue.min_max, sharpness_weight=cfg_tf.sharpness.weight, sharpness_min_max=cfg_tf.sharpness.min_max, max_num_transforms=cfg_tf.max_num_transforms, random_order=cfg_tf.random_order) if isinstance(cfg.dataset_repo_id, str): dataset = LeRobotDataset(cfg.dataset_repo_id, split=split, delta_timestamps=cfg.training.get('delta_timestamps'), image_transforms=image_transforms, video_backend=cfg.video_backend) else: dataset = MultiLeRobotDataset(cfg.dataset_repo_id, split=split, delta_timestamps=cfg.training.get('delta_timestamps'), image_transforms=image_transforms, video_backend=cfg.video_backend) if cfg.get('override_dataset_stats'): for (key, stats_dict) in cfg.override_dataset_stats.items(): for (stats_type, listconfig) in stats_dict.items(): stats = OmegaConf.to_container(listconfig, resolve=True) dataset.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) return dataset # File: lerobot-main/lerobot/common/datasets/lerobot_dataset.py import logging import os from pathlib import Path from typing import Callable import datasets import torch import torch.utils from lerobot.common.datasets.compute_stats import aggregate_stats from lerobot.common.datasets.utils import calculate_episode_data_index, load_episode_data_index, load_hf_dataset, load_info, load_previous_and_future_frames, load_stats, load_videos, reset_episode_index from lerobot.common.datasets.video_utils import VideoFrame, load_from_videos CODEBASE_VERSION = 'v1.6' DATA_DIR = Path(os.environ['DATA_DIR']) if 'DATA_DIR' in os.environ else None class LeRobotDataset(torch.utils.data.Dataset): def __init__(self, repo_id: str, root: Path | None=DATA_DIR, split: str='train', image_transforms: Callable | None=None, delta_timestamps: dict[list[float]] | None=None, video_backend: str | None=None): super().__init__() self.repo_id = repo_id self.root = root self.split = split self.image_transforms = image_transforms self.delta_timestamps = delta_timestamps self.hf_dataset = load_hf_dataset(repo_id, CODEBASE_VERSION, root, split) if split == 'train': self.episode_data_index = load_episode_data_index(repo_id, CODEBASE_VERSION, root) else: self.episode_data_index = calculate_episode_data_index(self.hf_dataset) self.hf_dataset = reset_episode_index(self.hf_dataset) self.stats = load_stats(repo_id, CODEBASE_VERSION, root) self.info = load_info(repo_id, CODEBASE_VERSION, root) if self.video: self.videos_dir = load_videos(repo_id, CODEBASE_VERSION, root) self.video_backend = video_backend if video_backend is not None else 'pyav' @property def fps(self) -> int: return self.info['fps'] @property def video(self) -> bool: return self.info.get('video', False) @property def features(self) -> datasets.Features: return self.hf_dataset.features @property def camera_keys(self) -> list[str]: keys = [] for (key, feats) in self.hf_dataset.features.items(): if isinstance(feats, (datasets.Image, VideoFrame)): keys.append(key) return keys @property def video_frame_keys(self) -> list[str]: video_frame_keys = [] for (key, feats) in self.hf_dataset.features.items(): if isinstance(feats, VideoFrame): video_frame_keys.append(key) return video_frame_keys @property def num_samples(self) -> int: return len(self.hf_dataset) @property def num_episodes(self) -> int: return len(self.hf_dataset.unique('episode_index')) @property def tolerance_s(self) -> float: return 1 / self.fps - 0.0001 def __len__(self): return self.num_samples def __getitem__(self, idx): item = self.hf_dataset[idx] if self.delta_timestamps is not None: item = load_previous_and_future_frames(item, self.hf_dataset, self.episode_data_index, self.delta_timestamps, self.tolerance_s) if self.video: item = load_from_videos(item, self.video_frame_keys, self.videos_dir, self.tolerance_s, self.video_backend) if self.image_transforms is not None: for cam in self.camera_keys: item[cam] = self.image_transforms(item[cam]) return item def __repr__(self): return f"{self.__class__.__name__}(\n Repository ID: '{self.repo_id}',\n Split: '{self.split}',\n Number of Samples: {self.num_samples},\n Number of Episodes: {self.num_episodes},\n Type: {('video (.mp4)' if self.video else 'image (.png)')},\n Recorded Frames per Second: {self.fps},\n Camera Keys: {self.camera_keys},\n Video Frame Keys: {(self.video_frame_keys if self.video else 'N/A')},\n Transformations: {self.image_transforms},\n Codebase Version: {self.info.get('codebase_version', '< v1.6')},\n)" @classmethod def from_preloaded(cls, repo_id: str='from_preloaded', root: Path | None=None, split: str='train', transform: callable=None, delta_timestamps: dict[list[float]] | None=None, hf_dataset=None, episode_data_index=None, stats=None, info=None, videos_dir=None, video_backend=None) -> 'LeRobotDataset': obj = cls.__new__(cls) obj.repo_id = repo_id obj.root = root obj.split = split obj.image_transforms = transform obj.delta_timestamps = delta_timestamps obj.hf_dataset = hf_dataset obj.episode_data_index = episode_data_index obj.stats = stats obj.info = info if info is not None else {} obj.videos_dir = videos_dir obj.video_backend = video_backend if video_backend is not None else 'pyav' return obj class MultiLeRobotDataset(torch.utils.data.Dataset): def __init__(self, repo_ids: list[str], root: Path | None=DATA_DIR, split: str='train', image_transforms: Callable | None=None, delta_timestamps: dict[list[float]] | None=None, video_backend: str | None=None): super().__init__() self.repo_ids = repo_ids self._datasets = [LeRobotDataset(repo_id, root=root, split=split, delta_timestamps=delta_timestamps, image_transforms=image_transforms, video_backend=video_backend) for repo_id in repo_ids] for (repo_id, dataset) in zip(self.repo_ids, self._datasets, strict=True): if dataset.info != self._datasets[0].info: raise ValueError(f'Detected a mismatch in dataset info between {self.repo_ids[0]} and {repo_id}. This is not yet supported.') self.disabled_data_keys = set() intersection_data_keys = set(self._datasets[0].hf_dataset.features) for dataset in self._datasets: intersection_data_keys.intersection_update(dataset.hf_dataset.features) if len(intersection_data_keys) == 0: raise RuntimeError('Multiple datasets were provided but they had no keys common to all of them. The multi-dataset functionality currently only keeps common keys.') for (repo_id, dataset) in zip(self.repo_ids, self._datasets, strict=True): extra_keys = set(dataset.hf_dataset.features).difference(intersection_data_keys) logging.warning(f'keys {extra_keys} of {repo_id} were disabled as they are not contained in all the other datasets.') self.disabled_data_keys.update(extra_keys) self.root = root self.split = split self.image_transforms = image_transforms self.delta_timestamps = delta_timestamps self.stats = aggregate_stats(self._datasets) @property def repo_id_to_index(self): return {repo_id: i for (i, repo_id) in enumerate(self.repo_ids)} @property def repo_index_to_id(self): return {v: k for (k, v) in self.repo_id_to_index} @property def fps(self) -> int: return self._datasets[0].info['fps'] @property def video(self) -> bool: return self._datasets[0].info.get('video', False) @property def features(self) -> datasets.Features: features = {} for dataset in self._datasets: features.update({k: v for (k, v) in dataset.features.items() if k not in self.disabled_data_keys}) return features @property def camera_keys(self) -> list[str]: keys = [] for (key, feats) in self.features.items(): if isinstance(feats, (datasets.Image, VideoFrame)): keys.append(key) return keys @property def video_frame_keys(self) -> list[str]: video_frame_keys = [] for (key, feats) in self.features.items(): if isinstance(feats, VideoFrame): video_frame_keys.append(key) return video_frame_keys @property def num_samples(self) -> int: return sum((d.num_samples for d in self._datasets)) @property def num_episodes(self) -> int: return sum((d.num_episodes for d in self._datasets)) @property def tolerance_s(self) -> float: return 1 / self.fps - 0.0001 def __len__(self): return self.num_samples def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: if idx >= len(self): raise IndexError(f'Index {idx} out of bounds.') start_idx = 0 dataset_idx = 0 for dataset in self._datasets: if idx >= start_idx + dataset.num_samples: start_idx += dataset.num_samples dataset_idx += 1 continue break else: raise AssertionError('We expect the loop to break out as long as the index is within bounds.') item = self._datasets[dataset_idx][idx - start_idx] item['dataset_index'] = torch.tensor(dataset_idx) for data_key in self.disabled_data_keys: if data_key in item: del item[data_key] return item def __repr__(self): return f"{self.__class__.__name__}(\n Repository IDs: '{self.repo_ids}',\n Split: '{self.split}',\n Number of Samples: {self.num_samples},\n Number of Episodes: {self.num_episodes},\n Type: {('video (.mp4)' if self.video else 'image (.png)')},\n Recorded Frames per Second: {self.fps},\n Camera Keys: {self.camera_keys},\n Video Frame Keys: {(self.video_frame_keys if self.video else 'N/A')},\n Transformations: {self.image_transforms},\n)" # File: lerobot-main/lerobot/common/datasets/online_buffer.py """""" import os from pathlib import Path from typing import Any import numpy as np import torch from lerobot.common.datasets.lerobot_dataset import LeRobotDataset def _make_memmap_safe(**kwargs) -> np.memmap: if kwargs['mode'].startswith('w'): required_space = kwargs['dtype'].itemsize * np.prod(kwargs['shape']) stats = os.statvfs(Path(kwargs['filename']).parent) available_space = stats.f_bavail * stats.f_frsize if required_space >= available_space * 0.8: raise RuntimeError(f"You're about to take up {required_space} of {available_space} bytes available.") return np.memmap(**kwargs) class OnlineBuffer(torch.utils.data.Dataset): NEXT_INDEX_KEY = '_next_index' OCCUPANCY_MASK_KEY = '_occupancy_mask' INDEX_KEY = 'index' FRAME_INDEX_KEY = 'frame_index' EPISODE_INDEX_KEY = 'episode_index' TIMESTAMP_KEY = 'timestamp' IS_PAD_POSTFIX = '_is_pad' def __init__(self, write_dir: str | Path, data_spec: dict[str, Any] | None, buffer_capacity: int | None, fps: float | None=None, delta_timestamps: dict[str, list[float]] | dict[str, np.ndarray] | None=None): self.set_delta_timestamps(delta_timestamps) self._fps = fps self.tolerance_s = 1 / self.fps - 0.0001 if fps is not None else None self._buffer_capacity = buffer_capacity data_spec = self._make_data_spec(data_spec, buffer_capacity) Path(write_dir).mkdir(parents=True, exist_ok=True) self._data = {} for (k, v) in data_spec.items(): self._data[k] = _make_memmap_safe(filename=Path(write_dir) / k, dtype=v['dtype'] if v is not None else None, mode='r+' if (Path(write_dir) / k).exists() else 'w+', shape=tuple(v['shape']) if v is not None else None) @property def delta_timestamps(self) -> dict[str, np.ndarray] | None: return self._delta_timestamps def set_delta_timestamps(self, value: dict[str, list[float]] | None): if value is not None: self._delta_timestamps = {k: np.array(v) for (k, v) in value.items()} else: self._delta_timestamps = None def _make_data_spec(self, data_spec: dict[str, Any], buffer_capacity: int) -> dict[str, dict[str, Any]]: if any((k.startswith('_') for k in data_spec)): raise ValueError("data_spec keys should not start with '_'. This prefix is reserved for internal logic.") preset_keys = {OnlineBuffer.INDEX_KEY, OnlineBuffer.FRAME_INDEX_KEY, OnlineBuffer.EPISODE_INDEX_KEY, OnlineBuffer.TIMESTAMP_KEY} if len((intersection := set(data_spec).intersection(preset_keys))) > 0: raise ValueError(f'data_spec should not contain any of {preset_keys} as these are handled internally. The provided data_spec has {intersection}.') complete_data_spec = {OnlineBuffer.NEXT_INDEX_KEY: {'dtype': np.dtype('int64'), 'shape': ()}, OnlineBuffer.OCCUPANCY_MASK_KEY: {'dtype': np.dtype('?'), 'shape': (buffer_capacity,)}, OnlineBuffer.INDEX_KEY: {'dtype': np.dtype('int64'), 'shape': (buffer_capacity,)}, OnlineBuffer.FRAME_INDEX_KEY: {'dtype': np.dtype('int64'), 'shape': (buffer_capacity,)}, OnlineBuffer.EPISODE_INDEX_KEY: {'dtype': np.dtype('int64'), 'shape': (buffer_capacity,)}, OnlineBuffer.TIMESTAMP_KEY: {'dtype': np.dtype('float64'), 'shape': (buffer_capacity,)}} for (k, v) in data_spec.items(): complete_data_spec[k] = {'dtype': v['dtype'], 'shape': (buffer_capacity, *v['shape'])} return complete_data_spec def add_data(self, data: dict[str, np.ndarray]): if len((missing_keys := set(self.data_keys).difference(set(data)))) > 0: raise ValueError(f'Missing data keys: {missing_keys}') new_data_length = len(data[self.data_keys[0]]) if not all((len(data[k]) == new_data_length for k in self.data_keys)): raise ValueError('All data items should have the same length') next_index = self._data[OnlineBuffer.NEXT_INDEX_KEY] assert data[OnlineBuffer.EPISODE_INDEX_KEY][0].item() == 0 assert data[OnlineBuffer.INDEX_KEY][0].item() == 0 if self.num_samples > 0: last_episode_index = self._data[OnlineBuffer.EPISODE_INDEX_KEY][next_index - 1] last_data_index = self._data[OnlineBuffer.INDEX_KEY][next_index - 1] data[OnlineBuffer.EPISODE_INDEX_KEY] += last_episode_index + 1 data[OnlineBuffer.INDEX_KEY] += last_data_index + 1 n_surplus = max(0, new_data_length - (self._buffer_capacity - next_index)) for k in self.data_keys: if n_surplus == 0: slc = slice(next_index, next_index + new_data_length) self._data[k][slc] = data[k] self._data[OnlineBuffer.OCCUPANCY_MASK_KEY][slc] = True else: self._data[k][next_index:] = data[k][:-n_surplus] self._data[OnlineBuffer.OCCUPANCY_MASK_KEY][next_index:] = True self._data[k][:n_surplus] = data[k][-n_surplus:] if n_surplus == 0: self._data[OnlineBuffer.NEXT_INDEX_KEY] = next_index + new_data_length else: self._data[OnlineBuffer.NEXT_INDEX_KEY] = n_surplus @property def data_keys(self) -> list[str]: keys = set(self._data) keys.remove(OnlineBuffer.OCCUPANCY_MASK_KEY) keys.remove(OnlineBuffer.NEXT_INDEX_KEY) return sorted(keys) @property def fps(self) -> float | None: return self._fps @property def num_episodes(self) -> int: return len(np.unique(self._data[OnlineBuffer.EPISODE_INDEX_KEY][self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]])) @property def num_samples(self) -> int: return np.count_nonzero(self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]) def __len__(self): return self.num_samples def _item_to_tensors(self, item: dict) -> dict: item_ = {} for (k, v) in item.items(): if isinstance(v, torch.Tensor): item_[k] = v elif isinstance(v, np.ndarray): item_[k] = torch.from_numpy(v) else: item_[k] = torch.tensor(v) return item_ def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: if idx >= len(self) or idx < -len(self): raise IndexError item = {k: v[idx] for (k, v) in self._data.items() if not k.startswith('_')} if self.delta_timestamps is None: return self._item_to_tensors(item) episode_index = item[OnlineBuffer.EPISODE_INDEX_KEY] current_ts = item[OnlineBuffer.TIMESTAMP_KEY] episode_data_indices = np.where(np.bitwise_and(self._data[OnlineBuffer.EPISODE_INDEX_KEY] == episode_index, self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]))[0] episode_timestamps = self._data[OnlineBuffer.TIMESTAMP_KEY][episode_data_indices] for data_key in self.delta_timestamps: query_ts = current_ts + self.delta_timestamps[data_key] dist = np.abs(query_ts[:, None] - episode_timestamps[None, :]) argmin_ = np.argmin(dist, axis=1) min_ = dist[np.arange(dist.shape[0]), argmin_] is_pad = min_ > self.tolerance_s assert ((query_ts[is_pad] < episode_timestamps[0]) | (episode_timestamps[-1] < query_ts[is_pad])).all(), f'One or several timestamps unexpectedly violate the tolerance ({min_} > self.tolerance_s={self.tolerance_s!r}) inside the episode range.' item[data_key] = self._data[data_key][episode_data_indices[argmin_]] item[f'{data_key}{OnlineBuffer.IS_PAD_POSTFIX}'] = is_pad return self._item_to_tensors(item) def get_data_by_key(self, key: str) -> torch.Tensor: return torch.from_numpy(self._data[key][self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]]) def compute_sampler_weights(offline_dataset: LeRobotDataset, offline_drop_n_last_frames: int=0, online_dataset: OnlineBuffer | None=None, online_sampling_ratio: float | None=None, online_drop_n_last_frames: int=0) -> torch.Tensor: if len(offline_dataset) == 0 and (online_dataset is None or len(online_dataset) == 0): raise ValueError('At least one of `offline_dataset` or `online_dataset` should be contain data.') if (online_dataset is None) ^ (online_sampling_ratio is None): raise ValueError('`online_dataset` and `online_sampling_ratio` must be provided together or not at all.') offline_sampling_ratio = 0 if online_sampling_ratio is None else 1 - online_sampling_ratio weights = [] if len(offline_dataset) > 0: offline_data_mask_indices = [] for (start_index, end_index) in zip(offline_dataset.episode_data_index['from'], offline_dataset.episode_data_index['to'], strict=True): offline_data_mask_indices.extend(range(start_index.item(), end_index.item() - offline_drop_n_last_frames)) offline_data_mask = torch.zeros(len(offline_dataset), dtype=torch.bool) offline_data_mask[torch.tensor(offline_data_mask_indices)] = True weights.append(torch.full(size=(len(offline_dataset),), fill_value=offline_sampling_ratio / offline_data_mask.sum()) * offline_data_mask) if online_dataset is not None and len(online_dataset) > 0: online_data_mask_indices = [] episode_indices = online_dataset.get_data_by_key('episode_index') for episode_idx in torch.unique(episode_indices): where_episode = torch.where(episode_indices == episode_idx) start_index = where_episode[0][0] end_index = where_episode[0][-1] + 1 online_data_mask_indices.extend(range(start_index.item(), end_index.item() - online_drop_n_last_frames)) online_data_mask = torch.zeros(len(online_dataset), dtype=torch.bool) online_data_mask[torch.tensor(online_data_mask_indices)] = True weights.append(torch.full(size=(len(online_dataset),), fill_value=online_sampling_ratio / online_data_mask.sum()) * online_data_mask) weights = torch.cat(weights) if weights.sum() == 0: weights += 1 / len(weights) else: weights /= weights.sum() return weights # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/_diffusion_policy_replay_buffer.py """""" from __future__ import annotations import math import numbers import os from functools import cached_property import numcodecs import numpy as np import zarr def check_chunks_compatible(chunks: tuple, shape: tuple): assert len(shape) == len(chunks) for c in chunks: assert isinstance(c, numbers.Integral) assert c > 0 def rechunk_recompress_array(group, name, chunks=None, chunk_length=None, compressor=None, tmp_key='_temp'): old_arr = group[name] if chunks is None: chunks = (chunk_length,) + old_arr.chunks[1:] if chunk_length is not None else old_arr.chunks check_chunks_compatible(chunks, old_arr.shape) if compressor is None: compressor = old_arr.compressor if chunks == old_arr.chunks and compressor == old_arr.compressor: return old_arr group.move(name, tmp_key) old_arr = group[tmp_key] (n_copied, n_skipped, n_bytes_copied) = zarr.copy(source=old_arr, dest=group, name=name, chunks=chunks, compressor=compressor) del group[tmp_key] arr = group[name] return arr def get_optimal_chunks(shape, dtype, target_chunk_bytes=2000000.0, max_chunk_length=None): itemsize = np.dtype(dtype).itemsize rshape = list(shape[::-1]) if max_chunk_length is not None: rshape[-1] = int(max_chunk_length) split_idx = len(shape) - 1 for i in range(len(shape) - 1): this_chunk_bytes = itemsize * np.prod(rshape[:i]) next_chunk_bytes = itemsize * np.prod(rshape[:i + 1]) if this_chunk_bytes <= target_chunk_bytes and next_chunk_bytes > target_chunk_bytes: split_idx = i rchunks = rshape[:split_idx] item_chunk_bytes = itemsize * np.prod(rshape[:split_idx]) this_max_chunk_length = rshape[split_idx] next_chunk_length = min(this_max_chunk_length, math.ceil(target_chunk_bytes / item_chunk_bytes)) rchunks.append(next_chunk_length) len_diff = len(shape) - len(rchunks) rchunks.extend([1] * len_diff) chunks = tuple(rchunks[::-1]) return chunks class ReplayBuffer: def __init__(self, root: zarr.Group | dict[str, dict]): assert 'data' in root assert 'meta' in root assert 'episode_ends' in root['meta'] for value in root['data'].values(): assert value.shape[0] == root['meta']['episode_ends'][-1] self.root = root @classmethod def create_empty_zarr(cls, storage=None, root=None): if root is None: if storage is None: storage = zarr.MemoryStore() root = zarr.group(store=storage) root.require_group('data', overwrite=False) meta = root.require_group('meta', overwrite=False) if 'episode_ends' not in meta: meta.zeros('episode_ends', shape=(0,), dtype=np.int64, compressor=None, overwrite=False) return cls(root=root) @classmethod def create_empty_numpy(cls): root = {'data': {}, 'meta': {'episode_ends': np.zeros((0,), dtype=np.int64)}} return cls(root=root) @classmethod def create_from_group(cls, group, **kwargs): if 'data' not in group: buffer = cls.create_empty_zarr(root=group, **kwargs) else: buffer = cls(root=group, **kwargs) return buffer @classmethod def create_from_path(cls, zarr_path, mode='r', **kwargs): group = zarr.open(os.path.expanduser(zarr_path), mode) return cls.create_from_group(group, **kwargs) @classmethod def copy_from_store(cls, src_store, store=None, keys=None, chunks: dict[str, tuple] | None=None, compressors: dict | str | numcodecs.abc.Codec | None=None, if_exists='replace', **kwargs): src_root = zarr.group(src_store) if chunks is None: chunks = {} if compressors is None: compressors = {} root = None if store is None: meta = {} for (key, value) in src_root['meta'].items(): if len(value.shape) == 0: meta[key] = np.array(value) else: meta[key] = value[:] if keys is None: keys = src_root['data'].keys() data = {} for key in keys: arr = src_root['data'][key] data[key] = arr[:] root = {'meta': meta, 'data': data} else: root = zarr.group(store=store) (n_copied, n_skipped, n_bytes_copied) = zarr.copy_store(source=src_store, dest=store, source_path='/meta', dest_path='/meta', if_exists=if_exists) data_group = root.create_group('data', overwrite=True) if keys is None: keys = src_root['data'].keys() for key in keys: value = src_root['data'][key] cks = cls._resolve_array_chunks(chunks=chunks, key=key, array=value) cpr = cls._resolve_array_compressor(compressors=compressors, key=key, array=value) if cks == value.chunks and cpr == value.compressor: this_path = '/data/' + key (n_copied, n_skipped, n_bytes_copied) = zarr.copy_store(source=src_store, dest=store, source_path=this_path, dest_path=this_path, if_exists=if_exists) else: (n_copied, n_skipped, n_bytes_copied) = zarr.copy(source=value, dest=data_group, name=key, chunks=cks, compressor=cpr, if_exists=if_exists) buffer = cls(root=root) return buffer @classmethod def copy_from_path(cls, zarr_path, backend=None, store=None, keys=None, chunks: dict[str, tuple] | None=None, compressors: dict | str | numcodecs.abc.Codec | None=None, if_exists='replace', **kwargs): if chunks is None: chunks = {} if compressors is None: compressors = {} if backend == 'numpy': print('backend argument is deprecated!') store = None group = zarr.open(os.path.expanduser(zarr_path), 'r') return cls.copy_from_store(src_store=group.store, store=store, keys=keys, chunks=chunks, compressors=compressors, if_exists=if_exists, **kwargs) def save_to_store(self, store, chunks: dict[str, tuple] | None=None, compressors: str | numcodecs.abc.Codec | dict | None=None, if_exists='replace', **kwargs): root = zarr.group(store) if chunks is None: chunks = {} if compressors is None: compressors = {} if self.backend == 'zarr': (n_copied, n_skipped, n_bytes_copied) = zarr.copy_store(source=self.root.store, dest=store, source_path='/meta', dest_path='/meta', if_exists=if_exists) else: meta_group = root.create_group('meta', overwrite=True) for (key, value) in self.root['meta'].items(): _ = meta_group.array(name=key, data=value, shape=value.shape, chunks=value.shape) data_group = root.create_group('data', overwrite=True) for (key, value) in self.root['data'].items(): cks = self._resolve_array_chunks(chunks=chunks, key=key, array=value) cpr = self._resolve_array_compressor(compressors=compressors, key=key, array=value) if isinstance(value, zarr.Array): if cks == value.chunks and cpr == value.compressor: this_path = '/data/' + key (n_copied, n_skipped, n_bytes_copied) = zarr.copy_store(source=self.root.store, dest=store, source_path=this_path, dest_path=this_path, if_exists=if_exists) else: (n_copied, n_skipped, n_bytes_copied) = zarr.copy(source=value, dest=data_group, name=key, chunks=cks, compressor=cpr, if_exists=if_exists) else: _ = data_group.array(name=key, data=value, chunks=cks, compressor=cpr) return store def save_to_path(self, zarr_path, chunks: dict[str, tuple] | None=None, compressors: str | numcodecs.abc.Codec | dict | None=None, if_exists='replace', **kwargs): if chunks is None: chunks = {} if compressors is None: compressors = {} store = zarr.DirectoryStore(os.path.expanduser(zarr_path)) return self.save_to_store(store, chunks=chunks, compressors=compressors, if_exists=if_exists, **kwargs) @staticmethod def resolve_compressor(compressor='default'): if compressor == 'default': compressor = numcodecs.Blosc(cname='lz4', clevel=5, shuffle=numcodecs.Blosc.NOSHUFFLE) elif compressor == 'disk': compressor = numcodecs.Blosc('zstd', clevel=5, shuffle=numcodecs.Blosc.BITSHUFFLE) return compressor @classmethod def _resolve_array_compressor(cls, compressors: dict | str | numcodecs.abc.Codec, key, array): cpr = 'nil' if isinstance(compressors, dict): if key in compressors: cpr = cls.resolve_compressor(compressors[key]) elif isinstance(array, zarr.Array): cpr = array.compressor else: cpr = cls.resolve_compressor(compressors) if cpr == 'nil': cpr = cls.resolve_compressor('default') return cpr @classmethod def _resolve_array_chunks(cls, chunks: dict | tuple, key, array): cks = None if isinstance(chunks, dict): if key in chunks: cks = chunks[key] elif isinstance(array, zarr.Array): cks = array.chunks elif isinstance(chunks, tuple): cks = chunks else: raise TypeError(f'Unsupported chunks type {type(chunks)}') if cks is None: cks = get_optimal_chunks(shape=array.shape, dtype=array.dtype) check_chunks_compatible(chunks=cks, shape=array.shape) return cks @cached_property def data(self): return self.root['data'] @cached_property def meta(self): return self.root['meta'] def update_meta(self, data): np_data = {} for (key, value) in data.items(): if isinstance(value, np.ndarray): np_data[key] = value else: arr = np.array(value) if arr.dtype == object: raise TypeError(f'Invalid value type {type(value)}') np_data[key] = arr meta_group = self.meta if self.backend == 'zarr': for (key, value) in np_data.items(): _ = meta_group.array(name=key, data=value, shape=value.shape, chunks=value.shape, overwrite=True) else: meta_group.update(np_data) return meta_group @property def episode_ends(self): return self.meta['episode_ends'] def get_episode_idxs(self): import numba numba.jit(nopython=True) def _get_episode_idxs(episode_ends): result = np.zeros((episode_ends[-1],), dtype=np.int64) for i in range(len(episode_ends)): start = 0 if i > 0: start = episode_ends[i - 1] end = episode_ends[i] for idx in range(start, end): result[idx] = i return result return _get_episode_idxs(self.episode_ends) @property def backend(self): backend = 'numpy' if isinstance(self.root, zarr.Group): backend = 'zarr' return backend def __repr__(self) -> str: if self.backend == 'zarr': return str(self.root.tree()) else: return super().__repr__() def keys(self): return self.data.keys() def values(self): return self.data.values() def items(self): return self.data.items() def __getitem__(self, key): return self.data[key] def __contains__(self, key): return key in self.data @property def n_steps(self): if len(self.episode_ends) == 0: return 0 return self.episode_ends[-1] @property def n_episodes(self): return len(self.episode_ends) @property def chunk_size(self): if self.backend == 'zarr': return next(iter(self.data.arrays()))[-1].chunks[0] return None @property def episode_lengths(self): ends = self.episode_ends[:] ends = np.insert(ends, 0, 0) lengths = np.diff(ends) return lengths def add_episode(self, data: dict[str, np.ndarray], chunks: dict[str, tuple] | None=None, compressors: str | numcodecs.abc.Codec | dict | None=None): if chunks is None: chunks = {} if compressors is None: compressors = {} assert len(data) > 0 is_zarr = self.backend == 'zarr' curr_len = self.n_steps episode_length = None for value in data.values(): assert len(value.shape) >= 1 if episode_length is None: episode_length = len(value) else: assert episode_length == len(value) new_len = curr_len + episode_length for (key, value) in data.items(): new_shape = (new_len,) + value.shape[1:] if key not in self.data: if is_zarr: cks = self._resolve_array_chunks(chunks=chunks, key=key, array=value) cpr = self._resolve_array_compressor(compressors=compressors, key=key, array=value) arr = self.data.zeros(name=key, shape=new_shape, chunks=cks, dtype=value.dtype, compressor=cpr) else: arr = np.zeros(shape=new_shape, dtype=value.dtype) self.data[key] = arr else: arr = self.data[key] assert value.shape[1:] == arr.shape[1:] if is_zarr: arr.resize(new_shape) else: arr.resize(new_shape, refcheck=False) arr[-value.shape[0]:] = value episode_ends = self.episode_ends if is_zarr: episode_ends.resize(episode_ends.shape[0] + 1) else: episode_ends.resize(episode_ends.shape[0] + 1, refcheck=False) episode_ends[-1] = new_len if is_zarr and episode_ends.chunks[0] < episode_ends.shape[0]: rechunk_recompress_array(self.meta, 'episode_ends', chunk_length=int(episode_ends.shape[0] * 1.5)) def drop_episode(self): is_zarr = self.backend == 'zarr' episode_ends = self.episode_ends[:].copy() assert len(episode_ends) > 0 start_idx = 0 if len(episode_ends) > 1: start_idx = episode_ends[-2] for value in self.data.values(): new_shape = (start_idx,) + value.shape[1:] if is_zarr: value.resize(new_shape) else: value.resize(new_shape, refcheck=False) if is_zarr: self.episode_ends.resize(len(episode_ends) - 1) else: self.episode_ends.resize(len(episode_ends) - 1, refcheck=False) def pop_episode(self): assert self.n_episodes > 0 episode = self.get_episode(self.n_episodes - 1, copy=True) self.drop_episode() return episode def extend(self, data): self.add_episode(data) def get_episode(self, idx, copy=False): idx = list(range(len(self.episode_ends)))[idx] start_idx = 0 if idx > 0: start_idx = self.episode_ends[idx - 1] end_idx = self.episode_ends[idx] result = self.get_steps_slice(start_idx, end_idx, copy=copy) return result def get_episode_slice(self, idx): start_idx = 0 if idx > 0: start_idx = self.episode_ends[idx - 1] end_idx = self.episode_ends[idx] return slice(start_idx, end_idx) def get_steps_slice(self, start, stop, step=None, copy=False): _slice = slice(start, stop, step) result = {} for (key, value) in self.data.items(): x = value[_slice] if copy and isinstance(value, np.ndarray): x = x.copy() result[key] = x return result def get_chunks(self) -> dict: assert self.backend == 'zarr' chunks = {} for (key, value) in self.data.items(): chunks[key] = value.chunks return chunks def set_chunks(self, chunks: dict): assert self.backend == 'zarr' for (key, value) in chunks.items(): if key in self.data: arr = self.data[key] if value != arr.chunks: check_chunks_compatible(chunks=value, shape=arr.shape) rechunk_recompress_array(self.data, key, chunks=value) def get_compressors(self) -> dict: assert self.backend == 'zarr' compressors = {} for (key, value) in self.data.items(): compressors[key] = value.compressor return compressors def set_compressors(self, compressors: dict): assert self.backend == 'zarr' for (key, value) in compressors.items(): if key in self.data: arr = self.data[key] compressor = self.resolve_compressor(value) if compressor != arr.compressor: rechunk_recompress_array(self.data, key, compressor=compressor) # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/_download_raw.py """""" import argparse import logging import warnings from pathlib import Path from huggingface_hub import snapshot_download from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id AVAILABLE_RAW_REPO_IDS = {'lerobot-raw/aloha_mobile_cabinet_raw': 'aloha_hdf5', 'lerobot-raw/aloha_mobile_chair_raw': 'aloha_hdf5', 'lerobot-raw/aloha_mobile_elevator_raw': 'aloha_hdf5', 'lerobot-raw/aloha_mobile_shrimp_raw': 'aloha_hdf5', 'lerobot-raw/aloha_mobile_wash_pan_raw': 'aloha_hdf5', 'lerobot-raw/aloha_mobile_wipe_wine_raw': 'aloha_hdf5', 'lerobot-raw/aloha_sim_insertion_human_raw': 'aloha_hdf5', 'lerobot-raw/aloha_sim_insertion_scripted_raw': 'aloha_hdf5', 'lerobot-raw/aloha_sim_transfer_cube_human_raw': 'aloha_hdf5', 'lerobot-raw/aloha_sim_transfer_cube_scripted_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_battery_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_candy_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_coffee_new_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_coffee_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_cups_open_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_fork_pick_up_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_pingpong_test_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_pro_pencil_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_screw_driver_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_tape_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_thread_velcro_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_towel_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_vinh_cup_left_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_vinh_cup_raw': 'aloha_hdf5', 'lerobot-raw/aloha_static_ziploc_slide_raw': 'aloha_hdf5', 'lerobot-raw/umi_cup_in_the_wild_raw': 'umi_zarr', 'lerobot-raw/pusht_raw': 'pusht_zarr', 'lerobot-raw/unitreeh1_fold_clothes_raw': 'aloha_hdf5', 'lerobot-raw/unitreeh1_rearrange_objects_raw': 'aloha_hdf5', 'lerobot-raw/unitreeh1_two_robot_greeting_raw': 'aloha_hdf5', 'lerobot-raw/unitreeh1_warehouse_raw': 'aloha_hdf5', 'lerobot-raw/xarm_lift_medium_raw': 'xarm_pkl', 'lerobot-raw/xarm_lift_medium_replay_raw': 'xarm_pkl', 'lerobot-raw/xarm_push_medium_raw': 'xarm_pkl', 'lerobot-raw/xarm_push_medium_replay_raw': 'xarm_pkl', 'lerobot-raw/fractal20220817_data_raw': 'openx_rlds.fractal20220817_data', 'lerobot-raw/kuka_raw': 'openx_rlds.kuka', 'lerobot-raw/bridge_openx_raw': 'openx_rlds.bridge_openx', 'lerobot-raw/taco_play_raw': 'openx_rlds.taco_play', 'lerobot-raw/jaco_play_raw': 'openx_rlds.jaco_play', 'lerobot-raw/berkeley_cable_routing_raw': 'openx_rlds.berkeley_cable_routing', 'lerobot-raw/roboturk_raw': 'openx_rlds.roboturk', 'lerobot-raw/nyu_door_opening_surprising_effectiveness_raw': 'openx_rlds.nyu_door_opening_surprising_effectiveness', 'lerobot-raw/viola_raw': 'openx_rlds.viola', 'lerobot-raw/berkeley_autolab_ur5_raw': 'openx_rlds.berkeley_autolab_ur5', 'lerobot-raw/toto_raw': 'openx_rlds.toto', 'lerobot-raw/language_table_raw': 'openx_rlds.language_table', 'lerobot-raw/columbia_cairlab_pusht_real_raw': 'openx_rlds.columbia_cairlab_pusht_real', 'lerobot-raw/stanford_kuka_multimodal_dataset_raw': 'openx_rlds.stanford_kuka_multimodal_dataset', 'lerobot-raw/nyu_rot_dataset_raw': 'openx_rlds.nyu_rot_dataset', 'lerobot-raw/io_ai_tech_raw': 'openx_rlds.io_ai_tech', 'lerobot-raw/stanford_hydra_dataset_raw': 'openx_rlds.stanford_hydra_dataset', 'lerobot-raw/austin_buds_dataset_raw': 'openx_rlds.austin_buds_dataset', 'lerobot-raw/nyu_franka_play_dataset_raw': 'openx_rlds.nyu_franka_play_dataset', 'lerobot-raw/maniskill_dataset_raw': 'openx_rlds.maniskill_dataset', 'lerobot-raw/furniture_bench_dataset_raw': 'openx_rlds.furniture_bench_dataset', 'lerobot-raw/cmu_franka_exploration_dataset_raw': 'openx_rlds.cmu_franka_exploration_dataset', 'lerobot-raw/ucsd_kitchen_dataset_raw': 'openx_rlds.ucsd_kitchen_dataset', 'lerobot-raw/ucsd_pick_and_place_dataset_raw': 'openx_rlds.ucsd_pick_and_place_dataset', 'lerobot-raw/spoc_raw': 'openx_rlds.spoc', 'lerobot-raw/austin_sailor_dataset_raw': 'openx_rlds.austin_sailor_dataset', 'lerobot-raw/austin_sirius_dataset_raw': 'openx_rlds.austin_sirius_dataset', 'lerobot-raw/bc_z_raw': 'openx_rlds.bc_z', 'lerobot-raw/utokyo_pr2_opening_fridge_raw': 'openx_rlds.utokyo_pr2_opening_fridge', 'lerobot-raw/utokyo_pr2_tabletop_manipulation_raw': 'openx_rlds.utokyo_pr2_tabletop_manipulation', 'lerobot-raw/utokyo_xarm_pick_and_place_raw': 'openx_rlds.utokyo_xarm_pick_and_place', 'lerobot-raw/utokyo_xarm_bimanual_raw': 'openx_rlds.utokyo_xarm_bimanual', 'lerobot-raw/utokyo_saytap_raw': 'openx_rlds.utokyo_saytap', 'lerobot-raw/robo_net_raw': 'openx_rlds.robo_net', 'lerobot-raw/robo_set_raw': 'openx_rlds.robo_set', 'lerobot-raw/berkeley_mvp_raw': 'openx_rlds.berkeley_mvp', 'lerobot-raw/berkeley_rpt_raw': 'openx_rlds.berkeley_rpt', 'lerobot-raw/kaist_nonprehensile_raw': 'openx_rlds.kaist_nonprehensile', 'lerobot-raw/stanford_mask_vit_raw': 'openx_rlds.stanford_mask_vit', 'lerobot-raw/tokyo_u_lsmo_raw': 'openx_rlds.tokyo_u_lsmo', 'lerobot-raw/dlr_sara_pour_raw': 'openx_rlds.dlr_sara_pour', 'lerobot-raw/dlr_sara_grid_clamp_raw': 'openx_rlds.dlr_sara_grid_clamp', 'lerobot-raw/dlr_edan_shared_control_raw': 'openx_rlds.dlr_edan_shared_control', 'lerobot-raw/asu_table_top_raw': 'openx_rlds.asu_table_top', 'lerobot-raw/stanford_robocook_raw': 'openx_rlds.stanford_robocook', 'lerobot-raw/imperialcollege_sawyer_wrist_cam_raw': 'openx_rlds.imperialcollege_sawyer_wrist_cam', 'lerobot-raw/iamlab_cmu_pickup_insert_raw': 'openx_rlds.iamlab_cmu_pickup_insert', 'lerobot-raw/uiuc_d3field_raw': 'openx_rlds.uiuc_d3field', 'lerobot-raw/utaustin_mutex_raw': 'openx_rlds.utaustin_mutex', 'lerobot-raw/berkeley_fanuc_manipulation_raw': 'openx_rlds.berkeley_fanuc_manipulation', 'lerobot-raw/cmu_playing_with_food_raw': 'openx_rlds.cmu_playing_with_food', 'lerobot-raw/cmu_play_fusion_raw': 'openx_rlds.cmu_play_fusion', 'lerobot-raw/cmu_stretch_raw': 'openx_rlds.cmu_stretch', 'lerobot-raw/berkeley_gnm_recon_raw': 'openx_rlds.berkeley_gnm_recon', 'lerobot-raw/berkeley_gnm_cory_hall_raw': 'openx_rlds.berkeley_gnm_cory_hall', 'lerobot-raw/berkeley_gnm_sac_son_raw': 'openx_rlds.berkeley_gnm_sac_son', 'lerobot-raw/droid_raw': 'openx_rlds.droid', 'lerobot-raw/droid_100_raw': 'openx_rlds.droid100', 'lerobot-raw/fmb_raw': 'openx_rlds.fmb', 'lerobot-raw/dobbe_raw': 'openx_rlds.dobbe', 'lerobot-raw/usc_cloth_sim_raw': 'openx_rlds.usc_cloth_sim', 'lerobot-raw/plex_robosuite_raw': 'openx_rlds.plex_robosuite', 'lerobot-raw/conq_hose_manipulation_raw': 'openx_rlds.conq_hose_manipulation', 'lerobot-raw/vima_raw': 'openx_rlds.vima', 'lerobot-raw/robot_vqa_raw': 'openx_rlds.robot_vqa', 'lerobot-raw/mimic_play_raw': 'openx_rlds.mimic_play', 'lerobot-raw/tidybot_raw': 'openx_rlds.tidybot', 'lerobot-raw/eth_agent_affordances_raw': 'openx_rlds.eth_agent_affordances'} def download_raw(raw_dir: Path, repo_id: str): check_repo_id(repo_id) (user_id, dataset_id) = repo_id.split('/') if not dataset_id.endswith('_raw'): warnings.warn(f"`dataset_id` ({dataset_id}) doesn't end with '_raw' (e.g. 'lerobot/pusht_raw'). Following this\n naming convention by renaming your repository is advised, but not mandatory.", stacklevel=1) if raw_dir.parts[-2] != user_id or raw_dir.parts[-1] != dataset_id: warnings.warn(f"`raw_dir` ({raw_dir}) doesn't contain a community or user id `/` the name of the dataset that\n match the `repo_id` (e.g. 'data/lerobot/pusht_raw'). Following this naming convention is advised,\n but not mandatory.", stacklevel=1) raw_dir.mkdir(parents=True, exist_ok=True) logging.info(f'Start downloading from huggingface.co/{user_id} for {dataset_id}') snapshot_download(repo_id, repo_type='dataset', local_dir=raw_dir) logging.info(f'Finish downloading from huggingface.co/{user_id} for {dataset_id}') def download_all_raw_datasets(data_dir: Path | None=None): if data_dir is None: data_dir = Path('data') for repo_id in AVAILABLE_RAW_REPO_IDS: raw_dir = data_dir / repo_id download_raw(raw_dir, repo_id) def main(): parser = argparse.ArgumentParser(description=f'A script to download raw datasets from Hugging Face hub to a local directory. Here is a\n non exhaustive list of available repositories to use in `--repo-id`: {list(AVAILABLE_RAW_REPO_IDS.keys())}') parser.add_argument('--raw-dir', type=Path, required=True, help='Directory containing input raw datasets (e.g. `data/aloha_mobile_chair_raw` or `data/pusht_raw).') parser.add_argument('--repo-id', type=str, required=True, help='Repositery identifier on Hugging Face: a community or a user name `/` the name of\n the dataset (e.g. `lerobot/pusht_raw`, `cadene/aloha_sim_insertion_human_raw`).') args = parser.parse_args() download_raw(**vars(args)) if __name__ == '__main__': main() # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py """""" import argparse from pathlib import Path from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub._download_raw import AVAILABLE_RAW_REPO_IDS from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id from lerobot.scripts.push_dataset_to_hub import push_dataset_to_hub def get_push_repo_id_from_raw(raw_repo_id: str, push_repo: str) -> str: dataset_id_raw = raw_repo_id.split('/')[1] dataset_id = dataset_id_raw.removesuffix('_raw') return f'{push_repo}/{dataset_id}' def encode_datasets(raw_dir: Path, raw_repo_ids: list[str], push_repo: str, vcodec: str, pix_fmt: str, g: int, crf: int, local_dir: Path | None=None, tests_data_dir: Path | None=None, raw_format: str | None=None, dry_run: bool=False) -> None: if len(raw_repo_ids) == 1 and raw_repo_ids[0].lower() == 'lerobot-raw': raw_repo_ids_format = AVAILABLE_RAW_REPO_IDS else: if raw_format is None: raise ValueError(raw_format) raw_repo_ids_format = {id_: raw_format for id_ in raw_repo_ids} for (raw_repo_id, repo_raw_format) in raw_repo_ids_format.items(): check_repo_id(raw_repo_id) dataset_repo_id_push = get_push_repo_id_from_raw(raw_repo_id, push_repo) dataset_raw_dir = raw_dir / raw_repo_id dataset_dir = local_dir / dataset_repo_id_push if local_dir is not None else None encoding = {'vcodec': vcodec, 'pix_fmt': pix_fmt, 'g': g, 'crf': crf} if not dataset_raw_dir.is_dir(): raise NotADirectoryError(dataset_raw_dir) if not dry_run: push_dataset_to_hub(dataset_raw_dir, raw_format=repo_raw_format, repo_id=dataset_repo_id_push, local_dir=dataset_dir, resume=True, encoding=encoding, tests_data_dir=tests_data_dir) else: print(f'DRY RUN: {dataset_raw_dir} --> {dataset_dir} --> {dataset_repo_id_push}@{CODEBASE_VERSION}') def main(): parser = argparse.ArgumentParser() parser.add_argument('--raw-dir', type=Path, default=Path('data'), help='Directory where raw datasets are located.') parser.add_argument('--raw-repo-ids', type=str, nargs='*', default=['lerobot-raw'], help="Raw dataset repo ids. if 'lerobot-raw', the keys from `AVAILABLE_RAW_REPO_IDS` will be\n used and raw datasets will be fetched from the 'lerobot-raw/' repo and pushed with their\n associated format. It is assumed that each dataset is located at `raw_dir / raw_repo_id` ") parser.add_argument('--raw-format', type=str, default=None, help="Raw format to use for the raw repo-ids. Must be specified if --raw-repo-ids is not\n 'lerobot-raw'") parser.add_argument('--local-dir', type=Path, default=None, help='When provided, writes the dataset converted to LeRobotDataset format in this directory\n (e.g. `data/lerobot/aloha_mobile_chair`).') parser.add_argument('--push-repo', type=str, default='lerobot', help='Repo to upload datasets to') parser.add_argument('--vcodec', type=str, default='libsvtav1', help='Codec to use for encoding videos') parser.add_argument('--pix-fmt', type=str, default='yuv420p', help='Pixel formats (chroma subsampling) to be used for encoding') parser.add_argument('--g', type=int, default=2, help='Group of pictures sizes to be used for encoding.') parser.add_argument('--crf', type=int, default=30, help='Constant rate factors to be used for encoding.') parser.add_argument('--tests-data-dir', type=Path, default=None, help='When provided, save tests artifacts into the given directory (e.g. `--tests-data-dir tests/data` will save to tests/data/{--repo-id}).') parser.add_argument('--dry-run', type=int, default=0, help="If not set to 0, this script won't download or upload anything.") args = parser.parse_args() encode_datasets(**vars(args)) if __name__ == '__main__': main() # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/_umi_imagecodecs_numcodecs.py """""" __version__ = '2022.9.26' __all__ = ('register_codecs',) import imagecodecs import numpy from numcodecs.abc import Codec from numcodecs.registry import get_codec, register_codec def protective_squeeze(x: numpy.ndarray): img_shape = x.shape[-3:] if len(x.shape) > 3: n_imgs = numpy.prod(x.shape[:-3]) if n_imgs > 1: img_shape = (-1,) + img_shape return x.reshape(img_shape) def get_default_image_compressor(**kwargs): if imagecodecs.JPEGXL: this_kwargs = {'effort': 3, 'distance': 0.3, 'decodingspeed': 1} this_kwargs.update(kwargs) return JpegXl(**this_kwargs) else: this_kwargs = {'level': 50} this_kwargs.update(kwargs) return Jpeg2k(**this_kwargs) class Jpeg2k(Codec): codec_id = 'imagecodecs_jpeg2k' def __init__(self, level=None, codecformat=None, colorspace=None, tile=None, reversible=None, bitspersample=None, resolutions=None, numthreads=None, verbose=0): self.level = level self.codecformat = codecformat self.colorspace = colorspace self.tile = None if tile is None else tuple(tile) self.reversible = reversible self.bitspersample = bitspersample self.resolutions = resolutions self.numthreads = numthreads self.verbose = verbose def encode(self, buf): buf = protective_squeeze(numpy.asarray(buf)) return imagecodecs.jpeg2k_encode(buf, level=self.level, codecformat=self.codecformat, colorspace=self.colorspace, tile=self.tile, reversible=self.reversible, bitspersample=self.bitspersample, resolutions=self.resolutions, numthreads=self.numthreads, verbose=self.verbose) def decode(self, buf, out=None): return imagecodecs.jpeg2k_decode(buf, verbose=self.verbose, numthreads=self.numthreads, out=out) class JpegXl(Codec): codec_id = 'imagecodecs_jpegxl' def __init__(self, level=None, effort=None, distance=None, lossless=None, decodingspeed=None, photometric=None, planar=None, usecontainer=None, index=None, keeporientation=None, numthreads=None): self.level = level self.effort = effort self.distance = distance self.lossless = bool(lossless) self.decodingspeed = decodingspeed self.photometric = photometric self.planar = planar self.usecontainer = usecontainer self.index = index self.keeporientation = keeporientation self.numthreads = numthreads def encode(self, buf): buf = protective_squeeze(numpy.asarray(buf)) return imagecodecs.jpegxl_encode(buf, level=self.level, effort=self.effort, distance=self.distance, lossless=self.lossless, decodingspeed=self.decodingspeed, photometric=self.photometric, planar=self.planar, usecontainer=self.usecontainer, numthreads=self.numthreads) def decode(self, buf, out=None): return imagecodecs.jpegxl_decode(buf, index=self.index, keeporientation=self.keeporientation, numthreads=self.numthreads, out=out) def _flat(out): if out is None: return None view = memoryview(out) if view.readonly or not view.contiguous: return None return view.cast('B') def register_codecs(codecs=None, force=False, verbose=True): for (name, cls) in globals().items(): if not hasattr(cls, 'codec_id') or name == 'Codec': continue if codecs is not None and cls.codec_id not in codecs: continue try: try: get_codec({'id': cls.codec_id}) except TypeError: pass except ValueError: pass else: if not force: if verbose: log_warning(f'numcodec {cls.codec_id!r} already registered') continue if verbose: log_warning(f'replacing registered numcodec {cls.codec_id!r}') register_codec(cls) def log_warning(msg, *args, **kwargs): import logging logging.getLogger(__name__).warning(msg, *args, **kwargs) # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py """""" import gc import shutil from pathlib import Path import h5py import numpy as np import torch import tqdm from datasets import Dataset, Features, Image, Sequence, Value from PIL import Image as PILImage from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding, save_images_concurrently from lerobot.common.datasets.utils import calculate_episode_data_index, hf_transform_to_torch from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames def get_cameras(hdf5_data): rgb_cameras = [key for key in hdf5_data['/observations/images'].keys() if 'depth' not in key] return rgb_cameras def check_format(raw_dir) -> bool: compressed_images = 'sim' not in raw_dir.name hdf5_paths = list(raw_dir.glob('episode_*.hdf5')) assert len(hdf5_paths) != 0 for hdf5_path in hdf5_paths: with h5py.File(hdf5_path, 'r') as data: assert '/action' in data assert '/observations/qpos' in data assert data['/action'].ndim == 2 assert data['/observations/qpos'].ndim == 2 num_frames = data['/action'].shape[0] assert num_frames == data['/observations/qpos'].shape[0] for camera in get_cameras(data): assert num_frames == data[f'/observations/images/{camera}'].shape[0] if compressed_images: assert data[f'/observations/images/{camera}'].ndim == 2 else: assert data[f'/observations/images/{camera}'].ndim == 4 (b, h, w, c) = data[f'/observations/images/{camera}'].shape assert c < h and c < w, f'Expect (h,w,c) image format but (h={h!r},w={w!r},c={c!r}) provided.' def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None=None, encoding: dict | None=None): compressed_images = 'sim' not in raw_dir.name hdf5_files = sorted(raw_dir.glob('episode_*.hdf5')) num_episodes = len(hdf5_files) ep_dicts = [] ep_ids = episodes if episodes else range(num_episodes) for ep_idx in tqdm.tqdm(ep_ids): ep_path = hdf5_files[ep_idx] with h5py.File(ep_path, 'r') as ep: num_frames = ep['/action'].shape[0] done = torch.zeros(num_frames, dtype=torch.bool) done[-1] = True state = torch.from_numpy(ep['/observations/qpos'][:]) action = torch.from_numpy(ep['/action'][:]) if '/observations/qvel' in ep: velocity = torch.from_numpy(ep['/observations/qvel'][:]) if '/observations/effort' in ep: effort = torch.from_numpy(ep['/observations/effort'][:]) ep_dict = {} for camera in get_cameras(ep): img_key = f'observation.images.{camera}' if compressed_images: import cv2 imgs_array = [] for data in ep[f'/observations/images/{camera}']: imgs_array.append(cv2.imdecode(data, 1)) imgs_array = np.array(imgs_array) else: imgs_array = ep[f'/observations/images/{camera}'][:] if video: tmp_imgs_dir = videos_dir / 'tmp_images' save_images_concurrently(imgs_array, tmp_imgs_dir) fname = f'{img_key}_episode_{ep_idx:06d}.mp4' video_path = videos_dir / fname encode_video_frames(tmp_imgs_dir, video_path, fps, **encoding or {}) shutil.rmtree(tmp_imgs_dir) ep_dict[img_key] = [{'path': f'videos/{fname}', 'timestamp': i / fps} for i in range(num_frames)] else: ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array] ep_dict['observation.state'] = state if '/observations/velocity' in ep: ep_dict['observation.velocity'] = velocity if '/observations/effort' in ep: ep_dict['observation.effort'] = effort ep_dict['action'] = action ep_dict['episode_index'] = torch.tensor([ep_idx] * num_frames) ep_dict['frame_index'] = torch.arange(0, num_frames, 1) ep_dict['timestamp'] = torch.arange(0, num_frames, 1) / fps ep_dict['next.done'] = done assert isinstance(ep_idx, int) ep_dicts.append(ep_dict) gc.collect() data_dict = concatenate_episodes(ep_dicts) total_frames = data_dict['frame_index'].shape[0] data_dict['index'] = torch.arange(0, total_frames, 1) return data_dict def to_hf_dataset(data_dict, video) -> Dataset: features = {} keys = [key for key in data_dict if 'observation.images.' in key] for key in keys: if video: features[key] = VideoFrame() else: features[key] = Image() features['observation.state'] = Sequence(length=data_dict['observation.state'].shape[1], feature=Value(dtype='float32', id=None)) if 'observation.velocity' in data_dict: features['observation.velocity'] = Sequence(length=data_dict['observation.velocity'].shape[1], feature=Value(dtype='float32', id=None)) if 'observation.effort' in data_dict: features['observation.effort'] = Sequence(length=data_dict['observation.effort'].shape[1], feature=Value(dtype='float32', id=None)) features['action'] = Sequence(length=data_dict['action'].shape[1], feature=Value(dtype='float32', id=None)) features['episode_index'] = Value(dtype='int64', id=None) features['frame_index'] = Value(dtype='int64', id=None) features['timestamp'] = Value(dtype='float32', id=None) features['next.done'] = Value(dtype='bool', id=None) features['index'] = Value(dtype='int64', id=None) hf_dataset = Dataset.from_dict(data_dict, features=Features(features)) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def from_raw_to_lerobot_format(raw_dir: Path, videos_dir: Path, fps: int | None=None, video: bool=True, episodes: list[int] | None=None, encoding: dict | None=None): check_format(raw_dir) if fps is None: fps = 50 data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding) hf_dataset = to_hf_dataset(data_dict, video) episode_data_index = calculate_episode_data_index(hf_dataset) info = {'codebase_version': CODEBASE_VERSION, 'fps': fps, 'video': video} if video: info['encoding'] = get_default_encoding() return (hf_dataset, episode_data_index, info) # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/cam_png_format.py """""" from pathlib import Path import torch from datasets import Dataset, Features, Image, Value from PIL import Image as PILImage from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes from lerobot.common.datasets.utils import calculate_episode_data_index, hf_transform_to_torch from lerobot.common.datasets.video_utils import VideoFrame def check_format(raw_dir: Path) -> bool: image_paths = list(raw_dir.glob('frame_*.png')) if len(image_paths) == 0: raise ValueError def load_from_raw(raw_dir: Path, fps: int, episodes: list[int] | None=None): if episodes is not None: raise NotImplementedError() ep_dict = {} ep_idx = 0 image_paths = sorted(raw_dir.glob('frame_*.png')) num_frames = len(image_paths) ep_dict['observation.image'] = [PILImage.open(x) for x in image_paths] ep_dict['episode_index'] = torch.tensor([ep_idx] * num_frames) ep_dict['frame_index'] = torch.arange(0, num_frames, 1) ep_dict['timestamp'] = torch.arange(0, num_frames, 1) / fps ep_dicts = [ep_dict] data_dict = concatenate_episodes(ep_dicts) total_frames = data_dict['frame_index'].shape[0] data_dict['index'] = torch.arange(0, total_frames, 1) return data_dict def to_hf_dataset(data_dict, video) -> Dataset: features = {} if video: features['observation.image'] = VideoFrame() else: features['observation.image'] = Image() features['episode_index'] = Value(dtype='int64', id=None) features['frame_index'] = Value(dtype='int64', id=None) features['timestamp'] = Value(dtype='float32', id=None) features['index'] = Value(dtype='int64', id=None) hf_dataset = Dataset.from_dict(data_dict, features=Features(features)) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def from_raw_to_lerobot_format(raw_dir: Path, videos_dir: Path, fps: int | None=None, video: bool=True, episodes: list[int] | None=None, encoding: dict | None=None): if video or episodes or encoding is not None: raise NotImplementedError check_format(raw_dir) if fps is None: fps = 30 data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes) hf_dataset = to_hf_dataset(data_dict, video) episode_data_index = calculate_episode_data_index(hf_dataset) info = {'codebase_version': CODEBASE_VERSION, 'fps': fps, 'video': video} return (hf_dataset, episode_data_index, info) # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/dora_parquet_format.py """""" import re import warnings from pathlib import Path import pandas as pd import torch from datasets import Dataset, Features, Image, Sequence, Value from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.utils import calculate_episode_data_index, hf_transform_to_torch from lerobot.common.datasets.video_utils import VideoFrame def check_format(raw_dir) -> bool: assert raw_dir.exists() leader_file = list(raw_dir.glob('*.parquet')) if len(leader_file) == 0: raise ValueError(f"Missing parquet files in '{raw_dir}'") return True def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None=None): reference_files = list(raw_dir.glob('observation.images.cam_*.parquet')) if len(reference_files) == 0: raise ValueError(f"Missing reference files for camera, starting with in '{raw_dir}'") reference_key = sorted(reference_files)[0].stem reference_df = pd.read_parquet(raw_dir / f'{reference_key}.parquet') reference_df = reference_df[['timestamp_utc', reference_key]] df = reference_df for path in raw_dir.glob('*.parquet'): key = path.stem if key == reference_key: continue if 'failed_episode_index' in key: continue modality_df = pd.read_parquet(path) modality_df = modality_df[['timestamp_utc', key]] df = pd.merge_asof(df, modality_df, on='timestamp_utc', direction='nearest', tolerance=pd.Timedelta(f'{1 / fps} seconds')) df = df[df['episode_index'] != -1] image_keys = [key for key in df if 'observation.images.' in key] def get_episode_index(row): episode_index_per_cam = {} for key in image_keys: path = row[key][0]['path'] match = re.search('_(\\d{6}).mp4', path) if not match: raise ValueError(path) episode_index = int(match.group(1)) episode_index_per_cam[key] = episode_index if len(set(episode_index_per_cam.values())) != 1: raise ValueError(f'All cameras are expected to belong to the same episode, but getting {episode_index_per_cam}') return episode_index df['episode_index'] = df.apply(get_episode_index, axis=1) df['frame_index'] = df.groupby('episode_index').cumcount() df = df.reset_index() df['index'] = df.index df['next.done'] = False df.loc[df.groupby('episode_index').tail(1).index, 'next.done'] = True df['timestamp'] = df['timestamp_utc'].map(lambda x: x.timestamp()) df['timestamp'] = df.groupby('episode_index')['timestamp'].transform(lambda x: x - x.iloc[0]) del df['timestamp_utc'] has_nan = df.isna().any().any() if has_nan: raise ValueError('Dataset contains Nan values.') ep_ids = [ep_idx for (ep_idx, _) in df.groupby('episode_index')] expected_ep_ids = list(range(df['episode_index'].max() + 1)) if ep_ids != expected_ep_ids: raise ValueError(f'Episodes indices go from {ep_ids} instead of {expected_ep_ids}') videos_dir.parent.mkdir(parents=True, exist_ok=True) videos_dir.symlink_to((raw_dir / 'videos').absolute()) for key in df: if 'observation.images.' not in key: continue for ep_idx in ep_ids: video_path = videos_dir / f'{key}_episode_{ep_idx:06d}.mp4' if not video_path.exists(): raise ValueError(f'Video file not found in {video_path}') data_dict = {} for key in df: if 'observation.images.' in key: data_dict[key] = [video_frame[0] for video_frame in df[key].values] video_path = videos_dir.parent / data_dict[key][0]['path'] if not video_path.exists(): raise ValueError(f'Video file not found in {video_path}') elif df[key].iloc[0].ndim == 0 or df[key].iloc[0].shape[0] == 1: data_dict[key] = torch.from_numpy(df[key].values) elif df[key].iloc[0].shape[0] > 1: data_dict[key] = torch.stack([torch.from_numpy(x.copy()) for x in df[key].values]) else: raise ValueError(key) return data_dict def to_hf_dataset(data_dict, video) -> Dataset: features = {} keys = [key for key in data_dict if 'observation.images.' in key] for key in keys: if video: features[key] = VideoFrame() else: features[key] = Image() features['observation.state'] = Sequence(length=data_dict['observation.state'].shape[1], feature=Value(dtype='float32', id=None)) if 'observation.velocity' in data_dict: features['observation.velocity'] = Sequence(length=data_dict['observation.velocity'].shape[1], feature=Value(dtype='float32', id=None)) if 'observation.effort' in data_dict: features['observation.effort'] = Sequence(length=data_dict['observation.effort'].shape[1], feature=Value(dtype='float32', id=None)) features['action'] = Sequence(length=data_dict['action'].shape[1], feature=Value(dtype='float32', id=None)) features['episode_index'] = Value(dtype='int64', id=None) features['frame_index'] = Value(dtype='int64', id=None) features['timestamp'] = Value(dtype='float32', id=None) features['next.done'] = Value(dtype='bool', id=None) features['index'] = Value(dtype='int64', id=None) hf_dataset = Dataset.from_dict(data_dict, features=Features(features)) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def from_raw_to_lerobot_format(raw_dir: Path, videos_dir: Path, fps: int | None=None, video: bool=True, episodes: list[int] | None=None, encoding: dict | None=None): check_format(raw_dir) if fps is None: fps = 30 else: raise NotImplementedError() if not video: raise NotImplementedError() if encoding is not None: warnings.warn('Video encoding is currently done outside of LeRobot for the dora_parquet format.', stacklevel=1) data_df = load_from_raw(raw_dir, videos_dir, fps, episodes) hf_dataset = to_hf_dataset(data_df, video) episode_data_index = calculate_episode_data_index(hf_dataset) info = {'codebase_version': CODEBASE_VERSION, 'fps': fps, 'video': video} if video: info['encoding'] = 'unknown' return (hf_dataset, episode_data_index, info) # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/openx/data_utils.py """""" from typing import Any, Dict, List import tensorflow as tf def binarize_gripper_actions(actions: tf.Tensor) -> tf.Tensor: (open_mask, closed_mask) = (actions > 0.95, actions < 0.05) in_between_mask = tf.logical_not(tf.logical_or(open_mask, closed_mask)) is_open_float = tf.cast(open_mask, tf.float32) def scan_fn(carry, i): return tf.cond(in_between_mask[i], lambda : tf.cast(carry, tf.float32), lambda : is_open_float[i]) return tf.scan(scan_fn, tf.range(tf.shape(actions)[0]), actions[-1], reverse=True) def invert_gripper_actions(actions: tf.Tensor) -> tf.Tensor: return 1 - actions def rel2abs_gripper_actions(actions: tf.Tensor) -> tf.Tensor: (opening_mask, closing_mask) = (actions < -0.1, actions > 0.1) thresholded_actions = tf.where(opening_mask, 1, tf.where(closing_mask, -1, 0)) def scan_fn(carry, i): return tf.cond(thresholded_actions[i] == 0, lambda : carry, lambda : thresholded_actions[i]) start = -1 * thresholded_actions[tf.argmax(thresholded_actions != 0, axis=0)] start = tf.cond(start == 0, lambda : 1, lambda : start) new_actions = tf.scan(scan_fn, tf.range(tf.shape(actions)[0]), start) new_actions = tf.cast(new_actions, tf.float32) / 2 + 0.5 return new_actions def relabel_bridge_actions(traj: Dict[str, Any]) -> Dict[str, Any]: movement_actions = traj['observation']['state'][1:, :6] - traj['observation']['state'][:-1, :6] traj_truncated = tf.nest.map_structure(lambda x: x[:-1], traj) traj_truncated['action'] = tf.concat([movement_actions, traj['action'][:-1, -1:]], axis=1) return traj_truncated def pprint_data_mixture(dataset_kwargs_list: List[Dict[str, Any]], dataset_weights: List[int]) -> None: print('\n######################################################################################') print(f"# Loading the following {len(dataset_kwargs_list)} datasets (incl. sampling weight):{'': >24} #") for (dataset_kwargs, weight) in zip(dataset_kwargs_list, dataset_weights, strict=False): pad = 80 - len(dataset_kwargs['name']) print(f"# {dataset_kwargs['name']}: {weight:=>{pad}f} #") print('######################################################################################\n') # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/openx/droid_utils.py """""" from typing import Any, Dict import tensorflow as tf import tensorflow_graphics.geometry.transformation as tfg def rmat_to_euler(rot_mat): return tfg.euler.from_rotation_matrix(rot_mat) def euler_to_rmat(euler): return tfg.rotation_matrix_3d.from_euler(euler) def invert_rmat(rot_mat): return tfg.rotation_matrix_3d.inverse(rot_mat) def rotmat_to_rot6d(mat): r6 = mat[..., :2, :] (r6_0, r6_1) = (r6[..., 0, :], r6[..., 1, :]) r6_flat = tf.concat([r6_0, r6_1], axis=-1) return r6_flat def velocity_act_to_wrist_frame(velocity, wrist_in_robot_frame): r_frame = euler_to_rmat(wrist_in_robot_frame[:, 3:6]) r_frame_inv = invert_rmat(r_frame) vel_t = (r_frame_inv @ velocity[:, :3][..., None])[..., 0] dr_ = euler_to_rmat(velocity[:, 3:6]) dr_ = r_frame_inv @ (dr_ @ r_frame) dr_r6 = rotmat_to_rot6d(dr_) return tf.concat([vel_t, dr_r6], axis=-1) def rand_swap_exterior_images(img1, img2): return tf.cond(tf.random.uniform(shape=[]) > 0.5, lambda : (img1, img2), lambda : (img2, img1)) def droid_baseact_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: dt = trajectory['action_dict']['cartesian_velocity'][:, :3] dr_ = trajectory['action_dict']['cartesian_velocity'][:, 3:6] trajectory['action'] = tf.concat((dt, dr_, 1 - trajectory['action_dict']['gripper_position']), axis=-1) (trajectory['observation']['exterior_image_1_left'], trajectory['observation']['exterior_image_2_left']) = rand_swap_exterior_images(trajectory['observation']['exterior_image_1_left'], trajectory['observation']['exterior_image_2_left']) trajectory['observation']['proprio'] = tf.concat((trajectory['observation']['cartesian_position'], trajectory['observation']['gripper_position']), axis=-1) return trajectory def droid_wristact_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: wrist_act = velocity_act_to_wrist_frame(trajectory['action_dict']['cartesian_velocity'], trajectory['observation']['cartesian_position']) trajectory['action'] = tf.concat((wrist_act, trajectory['action_dict']['gripper_position']), axis=-1) (trajectory['observation']['exterior_image_1_left'], trajectory['observation']['exterior_image_2_left']) = rand_swap_exterior_images(trajectory['observation']['exterior_image_1_left'], trajectory['observation']['exterior_image_2_left']) trajectory['observation']['proprio'] = tf.concat((trajectory['observation']['cartesian_position'], trajectory['observation']['gripper_position']), axis=-1) return trajectory def droid_finetuning_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: dt = trajectory['action_dict']['cartesian_velocity'][:, :3] dr_ = trajectory['action_dict']['cartesian_velocity'][:, 3:6] trajectory['action'] = tf.concat((dt, dr_, 1 - trajectory['action_dict']['gripper_position']), axis=-1) trajectory['observation']['proprio'] = tf.concat((trajectory['observation']['cartesian_position'], trajectory['observation']['gripper_position']), axis=-1) return trajectory def zero_action_filter(traj: Dict) -> bool: droid_q01 = tf.convert_to_tensor([-0.7776297926902771, -0.5803514122962952, -0.5795090794563293, -0.6464047729969025, -0.7041108310222626, -0.8895104378461838]) droid_q99 = tf.convert_to_tensor([0.7597932070493698, 0.5726242214441299, 0.7351000607013702, 0.6705610305070877, 0.6464948207139969, 0.8897542208433151]) droid_norm_0_act = 2 * (tf.zeros_like(traj['action'][:, :6]) - droid_q01) / (droid_q99 - droid_q01 + 1e-08) - 1 return tf.reduce_any(tf.math.abs(traj['action'][:, :6] - droid_norm_0_act) > 1e-05) # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/openx/transforms.py """""" from typing import Any, Dict import tensorflow as tf from lerobot.common.datasets.push_dataset_to_hub.openx.data_utils import binarize_gripper_actions, invert_gripper_actions, rel2abs_gripper_actions, relabel_bridge_actions def droid_baseact_transform_fn(): from lerobot.common.datasets.push_dataset_to_hub.openx.droid_utils import droid_baseact_transform return droid_baseact_transform def bridge_openx_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: for key in trajectory: if key == 'traj_metadata': continue elif key in ['observation', 'action']: for key2 in trajectory[key]: trajectory[key][key2] = trajectory[key][key2][1:] else: trajectory[key] = trajectory[key][1:] trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], tf.cast(trajectory['action']['open_gripper'][:, None], tf.float32)), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] trajectory = relabel_bridge_actions(trajectory) trajectory['observation']['EEF_state'] = trajectory['observation']['state'][:, :6] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] return trajectory def bridge_orig_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: for key in trajectory: if key == 'traj_metadata': continue elif key == 'observation': for key2 in trajectory[key]: trajectory[key][key2] = trajectory[key][key2][1:] else: trajectory[key] = trajectory[key][1:] trajectory['action'] = tf.concat([trajectory['action'][:, :6], binarize_gripper_actions(trajectory['action'][:, -1])[:, None]], axis=1) trajectory = relabel_bridge_actions(trajectory) trajectory['observation']['EEF_state'] = trajectory['observation']['state'][:, :6] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] return trajectory def ppgm_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat([trajectory['action'][:, :6], binarize_gripper_actions(trajectory['action'][:, -1])[:, None]], axis=1) trajectory['observation']['EEF_state'] = trajectory['observation']['cartesian_position'][:, :6] trajectory['observation']['gripper_state'] = trajectory['observation']['gripper_position'][:, -1:] return trajectory def rt1_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: gripper_action = trajectory['action']['gripper_closedness_action'][:, 0] gripper_action = rel2abs_gripper_actions(gripper_action) trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], gripper_action[:, None]), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def kuka_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: gripper_action = trajectory['action']['gripper_closedness_action'][:, 0] gripper_action = rel2abs_gripper_actions(gripper_action) trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], gripper_action[:, None]), axis=-1) eef_value = tf.io.decode_compressed(trajectory['observation']['clip_function_input/base_pose_tool_reached'], compression_type='ZLIB') eef_value = tf.io.decode_raw(eef_value, tf.float32) trajectory['observation']['clip_function_input/base_pose_tool_reached'] = tf.reshape(eef_value, (-1, 7)) gripper_value = tf.io.decode_compressed(trajectory['observation']['gripper_closed'], compression_type='ZLIB') gripper_value = tf.io.decode_raw(gripper_value, tf.float32) trajectory['observation']['gripper_closed'] = tf.reshape(gripper_value, (-1, 1)) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def taco_play_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['state_eef'] = trajectory['observation']['robot_obs'][:, :6] trajectory['observation']['state_gripper'] = trajectory['observation']['robot_obs'][:, 7:8] trajectory['action'] = trajectory['action']['rel_actions_world'] trajectory['action'] = tf.concat((trajectory['action'][:, :6], tf.clip_by_value(trajectory['action'][:, -1:], 0, 1)), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def jaco_play_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['state_eef'] = trajectory['observation']['end_effector_cartesian_pos'][:, :6] trajectory['observation']['state_gripper'] = trajectory['observation']['end_effector_cartesian_pos'][:, -1:] gripper_action = trajectory['action']['gripper_closedness_action'][:, 0] gripper_action = rel2abs_gripper_actions(gripper_action) trajectory['action'] = tf.concat((trajectory['action']['world_vector'], tf.zeros_like(trajectory['action']['world_vector']), gripper_action[:, None]), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def berkeley_cable_routing_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], tf.zeros_like(trajectory['action']['world_vector'][:, :1])), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def roboturk_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: gripper_action = invert_gripper_actions(tf.clip_by_value(trajectory['action']['gripper_closedness_action'], 0, 1)) trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], gripper_action), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] trajectory['language_embedding'] = trajectory['observation']['natural_language_embedding'] return trajectory def nyu_door_opening_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: gripper_action = trajectory['action']['gripper_closedness_action'][:, 0] gripper_action = rel2abs_gripper_actions(gripper_action) trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], gripper_action[:, None]), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def viola_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: gripper_action = trajectory['action']['gripper_closedness_action'][:, None] gripper_action = tf.clip_by_value(gripper_action, 0, 1) gripper_action = invert_gripper_actions(gripper_action) trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], gripper_action), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def berkeley_autolab_ur5_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['state'] = trajectory['observation']['robot_state'][:, 6:14] gripper_action = trajectory['action']['gripper_closedness_action'] gripper_action = rel2abs_gripper_actions(gripper_action) trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], gripper_action[:, None]), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def toto_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], tf.cast(trajectory['action']['open_gripper'][:, None], tf.float32)), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def language_table_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action'], tf.zeros_like(trajectory['action']), tf.zeros_like(trajectory['action']), tf.ones_like(trajectory['action'][:, :1])), axis=-1) instruction_bytes = trajectory['observation']['instruction'] instruction_encoded = tf.strings.unicode_encode(instruction_bytes, output_encoding='UTF-8') trajectory['language_instruction'] = tf.strings.split(instruction_encoded, '\x00')[:, :1].to_tensor()[:, 0] return trajectory def pusht_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action']['world_vector'], trajectory['action']['rotation_delta'], trajectory['action']['gripper_closedness_action'][:, None]), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def stanford_kuka_multimodal_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['depth_image'] = trajectory['observation']['depth_image'][..., 0] trajectory['action'] = tf.concat((trajectory['action'][:, :3], tf.zeros_like(trajectory['action'][:, :3]), trajectory['action'][:, -1:]), axis=-1) return trajectory def nyu_rot_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = trajectory['observation']['state'][..., :6] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][..., -1:] trajectory['action'] = trajectory['action'][..., :7] return trajectory def stanford_hydra_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action'][:, :6], invert_gripper_actions(trajectory['action'][:, -1:])), axis=-1) trajectory['observation']['eef_state'] = tf.concat((trajectory['observation']['state'][:, :3], trajectory['observation']['state'][:, 7:10]), axis=-1) trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -3:-2] return trajectory def austin_buds_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action'][:, :6], invert_gripper_actions(tf.clip_by_value(trajectory['action'][:, -1:], 0, 1))), axis=-1) trajectory['observation']['state'] = trajectory['observation']['state'][:, :8] return trajectory def nyu_franka_play_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['depth'] = tf.cast(trajectory['observation']['depth'][..., 0], tf.float32) trajectory['observation']['depth_additional_view'] = tf.cast(trajectory['observation']['depth_additional_view'][..., 0], tf.float32) trajectory['observation']['eef_state'] = trajectory['observation']['state'][:, -6:] trajectory['action'] = tf.concat((trajectory['action'][:, -8:-2], tf.clip_by_value(trajectory['action'][:, -2:-1], 0, 1)), axis=-1) return trajectory def maniskill_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['gripper_state'] = trajectory['observation']['state'][..., 7:8] return trajectory def furniture_bench_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: import tensorflow_graphics.geometry.transformation as tft trajectory['observation']['state'] = tf.concat((trajectory['observation']['state'][:, :7], trajectory['observation']['state'][:, -1:]), axis=-1) trajectory['action'] = tf.concat((trajectory['action'][:, :3], tft.euler.from_quaternion(trajectory['action'][:, 3:7]), invert_gripper_actions(tf.clip_by_value(trajectory['action'][:, -1:], 0, 1))), axis=-1) return trajectory def cmu_franka_exploration_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = trajectory['action'][..., :-1] return trajectory def ucsd_kitchen_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['joint_state'] = trajectory['observation']['state'][:, :7] trajectory['action'] = trajectory['action'][..., :-1] return trajectory def ucsd_pick_place_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = trajectory['observation']['state'][:, :6] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] trajectory['action'] = tf.concat((trajectory['action'][:, :3], tf.zeros_like(trajectory['action'][:, :3]), trajectory['action'][:, -1:]), axis=-1) return trajectory def austin_sailor_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action'][:, :6], invert_gripper_actions(tf.clip_by_value(trajectory['action'][:, -1:], 0, 1))), axis=-1) return trajectory def austin_sirius_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action'][:, :6], invert_gripper_actions(tf.clip_by_value(trajectory['action'][:, -1:], 0, 1))), axis=-1) return trajectory def bc_z_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action']['future/xyz_residual'][:, :3], trajectory['action']['future/axis_angle_residual'][:, :3], invert_gripper_actions(tf.cast(trajectory['action']['future/target_close'][:, :1], tf.float32))), axis=-1) trajectory['language_instruction'] = trajectory['observation']['natural_language_instruction'] return trajectory def tokyo_pr2_opening_fridge_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = trajectory['observation']['state'][:, :6] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] trajectory['action'] = trajectory['action'][..., :-1] return trajectory def tokyo_pr2_tabletop_manipulation_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = trajectory['observation']['state'][:, :6] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] trajectory['action'] = trajectory['action'][..., :-1] return trajectory def utokyo_xarm_bimanual_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = trajectory['action'][..., -7:] return trajectory def robo_net_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = tf.concat((trajectory['observation']['state'][:, :4], tf.zeros_like(trajectory['observation']['state'][:, :2])), axis=-1) trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] trajectory['action'] = tf.concat((trajectory['action'][:, :4], tf.zeros_like(trajectory['action'][:, :2]), trajectory['action'][:, -1:]), axis=-1) return trajectory def berkeley_mvp_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['gripper'] = tf.cast(trajectory['observation']['gripper'][:, None], tf.float32) return trajectory def berkeley_rpt_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['gripper'] = tf.cast(trajectory['observation']['gripper'][:, None], tf.float32) return trajectory def kaist_nonprehensible_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['state'] = trajectory['observation']['state'][:, -7:] trajectory['action'] = tf.concat((trajectory['action'][:, :6], tf.zeros_like(trajectory['action'][:, :1])), axis=-1) return trajectory def stanford_mask_vit_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = tf.concat((trajectory['observation']['end_effector_pose'][:, :4], tf.zeros_like(trajectory['observation']['end_effector_pose'][:, :2])), axis=-1) trajectory['observation']['gripper_state'] = trajectory['observation']['end_effector_pose'][:, -1:] trajectory['action'] = tf.concat((trajectory['action'][:, :4], tf.zeros_like(trajectory['action'][:, :2]), trajectory['action'][:, -1:]), axis=-1) return trajectory def tokyo_lsmo_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = trajectory['observation']['state'][:, :6] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] return trajectory def dlr_sara_grid_clamp_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['state'] = trajectory['observation']['state'][:, :6] return trajectory def dlr_edan_shared_control_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action'][:, :6], invert_gripper_actions(trajectory['action'][:, -1:])), axis=-1) return trajectory def asu_table_top_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = trajectory['ground_truth_states']['EE'] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] return trajectory def robocook_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = trajectory['observation']['state'][:, :6] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] return trajectory def imperial_wristcam_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = trajectory['action'][..., :-1] return trajectory def iamlab_pick_insert_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: import tensorflow_graphics.geometry.transformation as tft trajectory['observation']['joint_state'] = trajectory['observation']['state'][:, :7] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, 7:8] trajectory['action'] = tf.concat((trajectory['action'][:, :3], tft.euler.from_quaternion(trajectory['action'][:, 3:7]), trajectory['action'][:, 7:8]), axis=-1) return trajectory def uiuc_d3field_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action'], tf.zeros_like(trajectory['action']), tf.zeros_like(trajectory['action'][:, :1])), axis=-1) return trajectory def utaustin_mutex_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['state'] = trajectory['observation']['state'][:, :8] trajectory['action'] = tf.concat((trajectory['action'][:, :6], invert_gripper_actions(tf.clip_by_value(trajectory['action'][:, -1:], 0, 1))), axis=-1) return trajectory def berkeley_fanuc_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['joint_state'] = trajectory['observation']['state'][:, :6] trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, 6:7] trajectory['action'] = tf.concat((trajectory['action'], invert_gripper_actions(trajectory['observation']['gripper_state'])), axis=-1) return trajectory def cmu_playing_with_food_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: import tensorflow_graphics.geometry.transformation as tft trajectory['action'] = tf.concat((trajectory['action'][:, :3], tft.euler.from_quaternion(trajectory['action'][:, 3:7]), trajectory['action'][:, -1:]), axis=-1) return trajectory def playfusion_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['action'] = tf.concat((trajectory['action'][:, :3], trajectory['action'][:, -4:]), axis=-1) return trajectory def cmu_stretch_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['eef_state'] = tf.concat((trajectory['observation']['state'][:, :3], tf.zeros_like(trajectory['observation']['state'][:, :3])), axis=-1) trajectory['observation']['gripper_state'] = trajectory['observation']['state'][:, -1:] trajectory['action'] = trajectory['action'][..., :-1] return trajectory def gnm_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['state'] = tf.concat((trajectory['observation']['position'], tf.zeros_like(trajectory['observation']['state'][:, :3]), trajectory['observation']['yaw']), axis=-1) trajectory['action'] = tf.concat((trajectory['action'], tf.zeros_like(trajectory['action']), tf.zeros_like(trajectory['action']), tf.zeros_like(trajectory['action'][:, :1])), axis=-1) return trajectory def fmb_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['proprio'] = tf.concat((trajectory['observation']['eef_pose'], trajectory['observation']['state_gripper_pose'][..., None]), axis=-1) return trajectory def dobbe_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: trajectory['observation']['proprio'] = trajectory['observation']['state'] return trajectory def robo_set_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: gripper_action = trajectory['action'][:, -1:] gripper_action = invert_gripper_actions(tf.clip_by_value(gripper_action, 0, 1)) trajectory['action'] = tf.concat((trajectory['action'][:, :7], gripper_action), axis=-1) return trajectory def identity_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: return trajectory OPENX_STANDARDIZATION_TRANSFORMS = {'bridge_openx': bridge_openx_dataset_transform, 'bridge_orig': bridge_orig_dataset_transform, 'bridge_dataset': bridge_orig_dataset_transform, 'ppgm': ppgm_dataset_transform, 'ppgm_static': ppgm_dataset_transform, 'ppgm_wrist': ppgm_dataset_transform, 'fractal20220817_data': rt1_dataset_transform, 'kuka': kuka_dataset_transform, 'taco_play': taco_play_dataset_transform, 'jaco_play': jaco_play_dataset_transform, 'berkeley_cable_routing': berkeley_cable_routing_dataset_transform, 'roboturk': roboturk_dataset_transform, 'nyu_door_opening_surprising_effectiveness': nyu_door_opening_dataset_transform, 'viola': viola_dataset_transform, 'berkeley_autolab_ur5': berkeley_autolab_ur5_dataset_transform, 'toto': toto_dataset_transform, 'language_table': language_table_dataset_transform, 'columbia_cairlab_pusht_real': pusht_dataset_transform, 'stanford_kuka_multimodal_dataset_converted_externally_to_rlds': stanford_kuka_multimodal_dataset_transform, 'nyu_rot_dataset_converted_externally_to_rlds': nyu_rot_dataset_transform, 'stanford_hydra_dataset_converted_externally_to_rlds': stanford_hydra_dataset_transform, 'austin_buds_dataset_converted_externally_to_rlds': austin_buds_dataset_transform, 'nyu_franka_play_dataset_converted_externally_to_rlds': nyu_franka_play_dataset_transform, 'maniskill_dataset_converted_externally_to_rlds': maniskill_dataset_transform, 'furniture_bench_dataset_converted_externally_to_rlds': furniture_bench_dataset_transform, 'cmu_franka_exploration_dataset_converted_externally_to_rlds': cmu_franka_exploration_dataset_transform, 'ucsd_kitchen_dataset_converted_externally_to_rlds': ucsd_kitchen_dataset_transform, 'ucsd_pick_and_place_dataset_converted_externally_to_rlds': ucsd_pick_place_dataset_transform, 'austin_sailor_dataset_converted_externally_to_rlds': austin_sailor_dataset_transform, 'austin_sirius_dataset_converted_externally_to_rlds': austin_sirius_dataset_transform, 'bc_z': bc_z_dataset_transform, 'utokyo_pr2_opening_fridge_converted_externally_to_rlds': tokyo_pr2_opening_fridge_dataset_transform, 'utokyo_pr2_tabletop_manipulation_converted_externally_to_rlds': tokyo_pr2_tabletop_manipulation_dataset_transform, 'utokyo_xarm_pick_and_place_converted_externally_to_rlds': identity_transform, 'utokyo_xarm_bimanual_converted_externally_to_rlds': utokyo_xarm_bimanual_dataset_transform, 'robo_net': robo_net_dataset_transform, 'berkeley_mvp_converted_externally_to_rlds': berkeley_mvp_dataset_transform, 'berkeley_rpt_converted_externally_to_rlds': berkeley_rpt_dataset_transform, 'kaist_nonprehensile_converted_externally_to_rlds': kaist_nonprehensible_dataset_transform, 'stanford_mask_vit_converted_externally_to_rlds': stanford_mask_vit_dataset_transform, 'tokyo_u_lsmo_converted_externally_to_rlds': tokyo_lsmo_dataset_transform, 'dlr_sara_pour_converted_externally_to_rlds': identity_transform, 'dlr_sara_grid_clamp_converted_externally_to_rlds': dlr_sara_grid_clamp_dataset_transform, 'dlr_edan_shared_control_converted_externally_to_rlds': dlr_edan_shared_control_dataset_transform, 'asu_table_top_converted_externally_to_rlds': asu_table_top_dataset_transform, 'stanford_robocook_converted_externally_to_rlds': robocook_dataset_transform, 'imperialcollege_sawyer_wrist_cam': imperial_wristcam_dataset_transform, 'iamlab_cmu_pickup_insert_converted_externally_to_rlds': iamlab_pick_insert_dataset_transform, 'uiuc_d3field': uiuc_d3field_dataset_transform, 'utaustin_mutex': utaustin_mutex_dataset_transform, 'berkeley_fanuc_manipulation': berkeley_fanuc_dataset_transform, 'cmu_playing_with_food': cmu_playing_with_food_dataset_transform, 'cmu_play_fusion': playfusion_dataset_transform, 'cmu_stretch': cmu_stretch_dataset_transform, 'berkeley_gnm_recon': gnm_dataset_transform, 'berkeley_gnm_cory_hall': gnm_dataset_transform, 'berkeley_gnm_sac_son': gnm_dataset_transform, 'droid': droid_baseact_transform_fn(), 'droid_100': droid_baseact_transform_fn(), 'fmb': fmb_transform, 'dobbe': dobbe_dataset_transform, 'robo_set': robo_set_dataset_transform, 'usc_cloth_sim_converted_externally_to_rlds': identity_transform, 'plex_robosuite': identity_transform, 'conq_hose_manipulation': identity_transform, 'io_ai_tech': identity_transform, 'spoc': identity_transform} # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py """""" import shutil from pathlib import Path import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import torch import tqdm import yaml from datasets import Dataset, Features, Image, Sequence, Value from PIL import Image as PILImage from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.openx.transforms import OPENX_STANDARDIZATION_TRANSFORMS from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding, save_images_concurrently from lerobot.common.datasets.utils import calculate_episode_data_index, hf_transform_to_torch from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames with open('lerobot/common/datasets/push_dataset_to_hub/openx/configs.yaml') as f: _openx_list = yaml.safe_load(f) OPENX_DATASET_CONFIGS = _openx_list['OPENX_DATASET_CONFIGS'] np.set_printoptions(precision=2) def tf_to_torch(data): return torch.from_numpy(data.numpy()) def tf_img_convert(img): if img.dtype == tf.string: img = tf.io.decode_image(img, expand_animations=False, dtype=tf.uint8) elif img.dtype != tf.uint8: raise ValueError(f'Unsupported image dtype: found with dtype {img.dtype}') return img.numpy() def _broadcast_metadata_rlds(i: tf.Tensor, traj: dict) -> dict: steps = traj.pop('steps') traj_len = tf.shape(tf.nest.flatten(steps)[0])[0] metadata = tf.nest.map_structure(lambda x: tf.repeat(x, traj_len), traj) assert 'traj_metadata' not in steps traj = {**steps, 'traj_metadata': metadata} assert '_len' not in traj assert '_traj_index' not in traj assert '_frame_index' not in traj traj['_len'] = tf.repeat(traj_len, traj_len) traj['_traj_index'] = tf.repeat(i, traj_len) traj['_frame_index'] = tf.range(traj_len) return traj def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None=None, encoding: dict | None=None, openx_dataset_name: str | None=None): ds_builder = tfds.builder_from_directory(str(raw_dir)) dataset = ds_builder.as_dataset(split='all', decoders={'steps': tfds.decode.SkipDecoding()}) dataset_info = ds_builder.info print('dataset_info: ', dataset_info) ds_length = len(dataset) dataset = dataset.take(ds_length) dataset = dataset.enumerate().map(_broadcast_metadata_rlds) if openx_dataset_name is not None: print(' - applying standardization transform for dataset: ', openx_dataset_name) assert openx_dataset_name in OPENX_STANDARDIZATION_TRANSFORMS transform_fn = OPENX_STANDARDIZATION_TRANSFORMS[openx_dataset_name] dataset = dataset.map(transform_fn) image_keys = OPENX_DATASET_CONFIGS[openx_dataset_name]['image_obs_keys'] else: obs_keys = dataset_info.features['steps']['observation'].keys() image_keys = [key for key in obs_keys if 'image' in key] lang_key = 'language_instruction' if 'language_instruction' in dataset.element_spec else None print(' - image_keys: ', image_keys) print(' - lang_key: ', lang_key) it = iter(dataset) ep_dicts = [] tmp_ep_dicts_dir = videos_dir.parent.joinpath('ep_dicts') tmp_ep_dicts_dir.mkdir(parents=True, exist_ok=True) starting_ep_idx = 0 saved_ep_dicts = [ep.__str__() for ep in tmp_ep_dicts_dir.iterdir()] if len(saved_ep_dicts) > 0: saved_ep_dicts.sort() starting_ep_idx = int(saved_ep_dicts[-1][-13:-3]) + 1 for i in range(starting_ep_idx): episode = next(it) ep_dicts.append(torch.load(saved_ep_dicts[i])) if episodes is not None: if ds_length == 0: raise ValueError('No episodes found.') episodes = sorted(episodes) for ep_idx in tqdm.tqdm(range(starting_ep_idx, ds_length)): episode = next(it) if episodes is not None: if len(episodes) == 0: break if ep_idx == episodes[0]: print(' selecting episode idx: ', ep_idx) episodes.pop(0) else: continue num_frames = episode['action'].shape[0] done = torch.zeros(num_frames, dtype=torch.bool) done[-1] = True ep_dict = {} langs = [] image_array_dict = {key: [] for key in image_keys} if openx_dataset_name is not None: state_obs_keys = OPENX_DATASET_CONFIGS[openx_dataset_name]['state_obs_keys'] states = [] for key in state_obs_keys: if key in episode['observation']: states.append(tf_to_torch(episode['observation'][key])) else: states.append(torch.zeros(num_frames, 1)) states = torch.cat(states, dim=1) else: states = tf_to_torch(episode['observation']['state']) actions = tf_to_torch(episode['action']) rewards = tf_to_torch(episode['reward']).float() if lang_key is not None: langs = [str(x) for x in episode[lang_key]] for im_key in image_keys: imgs = episode['observation'][im_key] image_array_dict[im_key] = [tf_img_convert(img) for img in imgs] for item in [states, actions, rewards, done]: assert len(item) == num_frames for im_key in image_keys: img_key = f'observation.images.{im_key}' imgs_array = image_array_dict[im_key] imgs_array = np.array(imgs_array) if video: tmp_imgs_dir = videos_dir / 'tmp_images' save_images_concurrently(imgs_array, tmp_imgs_dir) fname = f'{img_key}_episode_{ep_idx:06d}.mp4' video_path = videos_dir / fname encode_video_frames(tmp_imgs_dir, video_path, fps, **encoding or {}) shutil.rmtree(tmp_imgs_dir) ep_dict[img_key] = [{'path': f'videos/{fname}', 'timestamp': i / fps} for i in range(num_frames)] else: ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array] if lang_key is not None: ep_dict['language_instruction'] = langs ep_dict['observation.state'] = states ep_dict['action'] = actions ep_dict['timestamp'] = torch.arange(0, num_frames, 1) / fps ep_dict['episode_index'] = torch.tensor([ep_idx] * num_frames) ep_dict['frame_index'] = torch.arange(0, num_frames, 1) ep_dict['next.reward'] = rewards ep_dict['next.done'] = done path_ep_dict = tmp_ep_dicts_dir.joinpath('ep_dict_' + '0' * (10 - len(str(ep_idx))) + str(ep_idx) + '.pt') torch.save(ep_dict, path_ep_dict) ep_dicts.append(ep_dict) data_dict = concatenate_episodes(ep_dicts) total_frames = data_dict['frame_index'].shape[0] data_dict['index'] = torch.arange(0, total_frames, 1) return data_dict def to_hf_dataset(data_dict, video) -> Dataset: features = {} keys = [key for key in data_dict if 'observation.images.' in key] for key in keys: if video: features[key] = VideoFrame() else: features[key] = Image() features['observation.state'] = Sequence(length=data_dict['observation.state'].shape[1], feature=Value(dtype='float32', id=None)) if 'observation.velocity' in data_dict: features['observation.velocity'] = Sequence(length=data_dict['observation.velocity'].shape[1], feature=Value(dtype='float32', id=None)) if 'observation.effort' in data_dict: features['observation.effort'] = Sequence(length=data_dict['observation.effort'].shape[1], feature=Value(dtype='float32', id=None)) if 'language_instruction' in data_dict: features['language_instruction'] = Value(dtype='string', id=None) features['action'] = Sequence(length=data_dict['action'].shape[1], feature=Value(dtype='float32', id=None)) features['episode_index'] = Value(dtype='int64', id=None) features['frame_index'] = Value(dtype='int64', id=None) features['timestamp'] = Value(dtype='float32', id=None) features['next.reward'] = Value(dtype='float32', id=None) features['next.done'] = Value(dtype='bool', id=None) features['index'] = Value(dtype='int64', id=None) hf_dataset = Dataset.from_dict(data_dict, features=Features(features)) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def from_raw_to_lerobot_format(raw_dir: Path, videos_dir: Path, fps: int | None=None, video: bool=True, episodes: list[int] | None=None, encoding: dict | None=None, openx_dataset_name: str | None=None): if openx_dataset_name is None: fps = 30 elif 'fps' not in OPENX_DATASET_CONFIGS[openx_dataset_name]: raise ValueError('fps for this dataset is not specified in openx/configs.py yet,means it is not yet tested') fps = OPENX_DATASET_CONFIGS[openx_dataset_name]['fps'] data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding, openx_dataset_name) hf_dataset = to_hf_dataset(data_dict, video) episode_data_index = calculate_episode_data_index(hf_dataset) info = {'codebase_version': CODEBASE_VERSION, 'fps': fps, 'video': video} if video: info['encoding'] = get_default_encoding() return (hf_dataset, episode_data_index, info) # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py """""" import shutil from pathlib import Path import numpy as np import torch import tqdm import zarr from datasets import Dataset, Features, Image, Sequence, Value from PIL import Image as PILImage from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding, save_images_concurrently from lerobot.common.datasets.utils import calculate_episode_data_index, hf_transform_to_torch from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames def check_format(raw_dir): zarr_path = raw_dir / 'pusht_cchi_v7_replay.zarr' zarr_data = zarr.open(zarr_path, mode='r') required_datasets = {'data/action', 'data/img', 'data/keypoint', 'data/n_contacts', 'data/state', 'meta/episode_ends'} for dataset in required_datasets: assert dataset in zarr_data nb_frames = zarr_data['data/img'].shape[0] required_datasets.remove('meta/episode_ends') assert all((nb_frames == zarr_data[dataset].shape[0] for dataset in required_datasets)) def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None=None, keypoints_instead_of_image: bool=False, encoding: dict | None=None): try: import pymunk from gym_pusht.envs.pusht import PushTEnv, pymunk_to_shapely from lerobot.common.datasets.push_dataset_to_hub._diffusion_policy_replay_buffer import ReplayBuffer as DiffusionPolicyReplayBuffer except ModuleNotFoundError as e: print("`gym_pusht` is not installed. Please install it with `pip install 'lerobot[gym_pusht]'`") raise e success_threshold = 0.95 zarr_path = raw_dir / 'pusht_cchi_v7_replay.zarr' zarr_data = DiffusionPolicyReplayBuffer.copy_from_path(zarr_path) episode_ids = torch.from_numpy(zarr_data.get_episode_idxs()) assert len({zarr_data[key].shape[0] for key in zarr_data.keys()}), 'Some data type dont have the same number of total frames.' goal_pos_angle = np.array([256, 256, np.pi / 4]) goal_body = PushTEnv.get_goal_pose_body(goal_pos_angle) imgs = torch.from_numpy(zarr_data['img']) states = torch.from_numpy(zarr_data['state']) actions = torch.from_numpy(zarr_data['action']) (from_ids, to_ids) = ([], []) from_idx = 0 for to_idx in zarr_data.meta['episode_ends']: from_ids.append(from_idx) to_ids.append(to_idx) from_idx = to_idx num_episodes = len(from_ids) ep_dicts = [] ep_ids = episodes if episodes else range(num_episodes) for (ep_idx, selected_ep_idx) in tqdm.tqdm(enumerate(ep_ids)): from_idx = from_ids[selected_ep_idx] to_idx = to_ids[selected_ep_idx] num_frames = to_idx - from_idx assert (episode_ids[from_idx:to_idx] == ep_idx).all() if not keypoints_instead_of_image: image = imgs[from_idx:to_idx] assert image.min() >= 0.0 assert image.max() <= 255.0 image = image.type(torch.uint8) state = states[from_idx:to_idx] agent_pos = state[:, :2] block_pos = state[:, 2:4] block_angle = state[:, 4] reward = torch.zeros(num_frames) success = torch.zeros(num_frames, dtype=torch.bool) if keypoints_instead_of_image: keypoints = torch.zeros(num_frames, 16) done = torch.zeros(num_frames, dtype=torch.bool) for i in range(num_frames): space = pymunk.Space() space.gravity = (0, 0) space.damping = 0 walls = [PushTEnv.add_segment(space, (5, 506), (5, 5), 2), PushTEnv.add_segment(space, (5, 5), (506, 5), 2), PushTEnv.add_segment(space, (506, 5), (506, 506), 2), PushTEnv.add_segment(space, (5, 506), (506, 506), 2)] space.add(*walls) (block_body, block_shapes) = PushTEnv.add_tee(space, block_pos[i].tolist(), block_angle[i].item()) goal_geom = pymunk_to_shapely(goal_body, block_body.shapes) block_geom = pymunk_to_shapely(block_body, block_body.shapes) intersection_area = goal_geom.intersection(block_geom).area goal_area = goal_geom.area coverage = intersection_area / goal_area reward[i] = np.clip(coverage / success_threshold, 0, 1) success[i] = coverage > success_threshold if keypoints_instead_of_image: keypoints[i] = torch.from_numpy(PushTEnv.get_keypoints(block_shapes).flatten()) done[-1] = True ep_dict = {} if not keypoints_instead_of_image: imgs_array = [x.numpy() for x in image] img_key = 'observation.image' if video: tmp_imgs_dir = videos_dir / 'tmp_images' save_images_concurrently(imgs_array, tmp_imgs_dir) fname = f'{img_key}_episode_{ep_idx:06d}.mp4' video_path = videos_dir / fname encode_video_frames(tmp_imgs_dir, video_path, fps, **encoding or {}) shutil.rmtree(tmp_imgs_dir) ep_dict[img_key] = [{'path': f'videos/{fname}', 'timestamp': i / fps} for i in range(num_frames)] else: ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array] ep_dict['observation.state'] = agent_pos if keypoints_instead_of_image: ep_dict['observation.environment_state'] = keypoints ep_dict['action'] = actions[from_idx:to_idx] ep_dict['episode_index'] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64) ep_dict['frame_index'] = torch.arange(0, num_frames, 1) ep_dict['timestamp'] = torch.arange(0, num_frames, 1) / fps ep_dict['next.reward'] = torch.cat([reward[1:], reward[[-1]]]) ep_dict['next.done'] = torch.cat([done[1:], done[[-1]]]) ep_dict['next.success'] = torch.cat([success[1:], success[[-1]]]) ep_dicts.append(ep_dict) data_dict = concatenate_episodes(ep_dicts) total_frames = data_dict['frame_index'].shape[0] data_dict['index'] = torch.arange(0, total_frames, 1) return data_dict def to_hf_dataset(data_dict, video, keypoints_instead_of_image: bool=False): features = {} if not keypoints_instead_of_image: if video: features['observation.image'] = VideoFrame() else: features['observation.image'] = Image() features['observation.state'] = Sequence(length=data_dict['observation.state'].shape[1], feature=Value(dtype='float32', id=None)) if keypoints_instead_of_image: features['observation.environment_state'] = Sequence(length=data_dict['observation.environment_state'].shape[1], feature=Value(dtype='float32', id=None)) features['action'] = Sequence(length=data_dict['action'].shape[1], feature=Value(dtype='float32', id=None)) features['episode_index'] = Value(dtype='int64', id=None) features['frame_index'] = Value(dtype='int64', id=None) features['timestamp'] = Value(dtype='float32', id=None) features['next.reward'] = Value(dtype='float32', id=None) features['next.done'] = Value(dtype='bool', id=None) features['next.success'] = Value(dtype='bool', id=None) features['index'] = Value(dtype='int64', id=None) hf_dataset = Dataset.from_dict(data_dict, features=Features(features)) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def from_raw_to_lerobot_format(raw_dir: Path, videos_dir: Path, fps: int | None=None, video: bool=True, episodes: list[int] | None=None, encoding: dict | None=None): keypoints_instead_of_image = False check_format(raw_dir) if fps is None: fps = 10 data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, keypoints_instead_of_image, encoding) hf_dataset = to_hf_dataset(data_dict, video, keypoints_instead_of_image) episode_data_index = calculate_episode_data_index(hf_dataset) info = {'codebase_version': CODEBASE_VERSION, 'fps': fps, 'video': video if not keypoints_instead_of_image else 0} if video: info['encoding'] = get_default_encoding() return (hf_dataset, episode_data_index, info) # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py """""" import logging import shutil from pathlib import Path import torch import tqdm import zarr from datasets import Dataset, Features, Image, Sequence, Value from PIL import Image as PILImage from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub._umi_imagecodecs_numcodecs import register_codecs from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding, save_images_concurrently from lerobot.common.datasets.utils import calculate_episode_data_index, hf_transform_to_torch from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames def check_format(raw_dir) -> bool: zarr_path = raw_dir / 'cup_in_the_wild.zarr' zarr_data = zarr.open(zarr_path, mode='r') required_datasets = {'data/robot0_demo_end_pose', 'data/robot0_demo_start_pose', 'data/robot0_eef_pos', 'data/robot0_eef_rot_axis_angle', 'data/robot0_gripper_width', 'meta/episode_ends', 'data/camera0_rgb'} for dataset in required_datasets: if dataset not in zarr_data: return False register_codecs() nb_frames = zarr_data['data/camera0_rgb'].shape[0] required_datasets.remove('meta/episode_ends') assert all((nb_frames == zarr_data[dataset].shape[0] for dataset in required_datasets)) def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None=None, encoding: dict | None=None): zarr_path = raw_dir / 'cup_in_the_wild.zarr' zarr_data = zarr.open(zarr_path, mode='r') end_pose = torch.from_numpy(zarr_data['data/robot0_demo_end_pose'][:]) start_pos = torch.from_numpy(zarr_data['data/robot0_demo_start_pose'][:]) eff_pos = torch.from_numpy(zarr_data['data/robot0_eef_pos'][:]) eff_rot_axis_angle = torch.from_numpy(zarr_data['data/robot0_eef_rot_axis_angle'][:]) gripper_width = torch.from_numpy(zarr_data['data/robot0_gripper_width'][:]) states_pos = torch.cat([eff_pos, eff_rot_axis_angle], dim=1) states = torch.cat([states_pos, gripper_width], dim=1) episode_ends = zarr_data['meta/episode_ends'][:] num_episodes = episode_ends.shape[0] episode_ends = torch.from_numpy(episode_ends) (from_ids, to_ids) = ([], []) from_idx = 0 for to_idx in episode_ends: from_ids.append(from_idx) to_ids.append(to_idx) from_idx = to_idx ep_dicts_dir = videos_dir / 'ep_dicts' ep_dicts_dir.mkdir(exist_ok=True, parents=True) ep_dicts = [] ep_ids = episodes if episodes else range(num_episodes) for (ep_idx, selected_ep_idx) in tqdm.tqdm(enumerate(ep_ids)): ep_dict_path = ep_dicts_dir / f'{ep_idx}' if not ep_dict_path.is_file(): from_idx = from_ids[selected_ep_idx] to_idx = to_ids[selected_ep_idx] num_frames = to_idx - from_idx state = states[from_idx:to_idx] ep_dict = {} imgs_array = zarr_data['data/camera0_rgb'][from_idx:to_idx] img_key = 'observation.image' if video: fname = f'{img_key}_episode_{ep_idx:06d}.mp4' video_path = videos_dir / fname if not video_path.is_file(): tmp_imgs_dir = videos_dir / 'tmp_images' save_images_concurrently(imgs_array, tmp_imgs_dir) encode_video_frames(tmp_imgs_dir, video_path, fps, **encoding or {}) shutil.rmtree(tmp_imgs_dir) ep_dict[img_key] = [{'path': f'videos/{fname}', 'timestamp': i / fps} for i in range(num_frames)] else: ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array] ep_dict['observation.state'] = state ep_dict['episode_index'] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64) ep_dict['frame_index'] = torch.arange(0, num_frames, 1) ep_dict['timestamp'] = torch.arange(0, num_frames, 1) / fps ep_dict['episode_data_index_from'] = torch.tensor([from_idx] * num_frames) ep_dict['episode_data_index_to'] = torch.tensor([from_idx + num_frames] * num_frames) ep_dict['end_pose'] = end_pose[from_idx:to_idx] ep_dict['start_pos'] = start_pos[from_idx:to_idx] ep_dict['gripper_width'] = gripper_width[from_idx:to_idx] torch.save(ep_dict, ep_dict_path) else: ep_dict = torch.load(ep_dict_path) ep_dicts.append(ep_dict) data_dict = concatenate_episodes(ep_dicts) total_frames = data_dict['frame_index'].shape[0] data_dict['index'] = torch.arange(0, total_frames, 1) return data_dict def to_hf_dataset(data_dict, video): features = {} if video: features['observation.image'] = VideoFrame() else: features['observation.image'] = Image() features['observation.state'] = Sequence(length=data_dict['observation.state'].shape[1], feature=Value(dtype='float32', id=None)) features['episode_index'] = Value(dtype='int64', id=None) features['frame_index'] = Value(dtype='int64', id=None) features['timestamp'] = Value(dtype='float32', id=None) features['index'] = Value(dtype='int64', id=None) features['episode_data_index_from'] = Value(dtype='int64', id=None) features['episode_data_index_to'] = Value(dtype='int64', id=None) features['end_pose'] = Sequence(length=data_dict['end_pose'].shape[1], feature=Value(dtype='float32', id=None)) features['start_pos'] = Sequence(length=data_dict['start_pos'].shape[1], feature=Value(dtype='float32', id=None)) features['gripper_width'] = Sequence(length=data_dict['gripper_width'].shape[1], feature=Value(dtype='float32', id=None)) hf_dataset = Dataset.from_dict(data_dict, features=Features(features)) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def from_raw_to_lerobot_format(raw_dir: Path, videos_dir: Path, fps: int | None=None, video: bool=True, episodes: list[int] | None=None, encoding: dict | None=None): check_format(raw_dir) if fps is None: fps = 10 if not video: logging.warning('Generating UMI dataset without `video=True` creates ~150GB on disk and requires ~80GB in RAM.') data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding) hf_dataset = to_hf_dataset(data_dict, video) episode_data_index = calculate_episode_data_index(hf_dataset) info = {'codebase_version': CODEBASE_VERSION, 'fps': fps, 'video': video} if video: info['encoding'] = get_default_encoding() return (hf_dataset, episode_data_index, info) # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/utils.py import inspect from concurrent.futures import ThreadPoolExecutor from pathlib import Path import numpy import PIL import torch from lerobot.common.datasets.video_utils import encode_video_frames def concatenate_episodes(ep_dicts): data_dict = {} keys = ep_dicts[0].keys() for key in keys: if torch.is_tensor(ep_dicts[0][key][0]): data_dict[key] = torch.cat([ep_dict[key] for ep_dict in ep_dicts]) else: if key not in data_dict: data_dict[key] = [] for ep_dict in ep_dicts: for x in ep_dict[key]: data_dict[key].append(x) total_frames = data_dict['frame_index'].shape[0] data_dict['index'] = torch.arange(0, total_frames, 1) return data_dict def save_images_concurrently(imgs_array: numpy.array, out_dir: Path, max_workers: int=4): out_dir = Path(out_dir) out_dir.mkdir(parents=True, exist_ok=True) def save_image(img_array, i, out_dir): img = PIL.Image.fromarray(img_array) img.save(str(out_dir / f'frame_{i:06d}.png'), quality=100) num_images = len(imgs_array) with ThreadPoolExecutor(max_workers=max_workers) as executor: [executor.submit(save_image, imgs_array[i], i, out_dir) for i in range(num_images)] def get_default_encoding() -> dict: signature = inspect.signature(encode_video_frames) return {k: v.default for (k, v) in signature.parameters.items() if v.default is not inspect.Parameter.empty and k in ['vcodec', 'pix_fmt', 'g', 'crf']} def check_repo_id(repo_id: str) -> None: if len(repo_id.split('/')) != 2: raise ValueError(f"`repo_id` is expected to contain a community or user id `/` the name of the dataset\n (e.g. 'lerobot/pusht'), but contains '{repo_id}'.") # File: lerobot-main/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py """""" import pickle import shutil from pathlib import Path import einops import torch import tqdm from datasets import Dataset, Features, Image, Sequence, Value from PIL import Image as PILImage from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding, save_images_concurrently from lerobot.common.datasets.utils import calculate_episode_data_index, hf_transform_to_torch from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames def check_format(raw_dir): keys = {'actions', 'rewards', 'dones'} nested_keys = {'observations': {'rgb', 'state'}, 'next_observations': {'rgb', 'state'}} xarm_files = list(raw_dir.glob('*.pkl')) assert len(xarm_files) > 0 with open(xarm_files[0], 'rb') as f: dataset_dict = pickle.load(f) assert isinstance(dataset_dict, dict) assert all((k in dataset_dict for k in keys)) expected_len = len(dataset_dict['actions']) assert all((len(dataset_dict[key]) == expected_len for key in keys if key in dataset_dict)) for (key, subkeys) in nested_keys.items(): nested_dict = dataset_dict.get(key, {}) assert all((len(nested_dict[subkey]) == expected_len for subkey in subkeys if subkey in nested_dict)) def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None=None, encoding: dict | None=None): pkl_path = raw_dir / 'buffer.pkl' with open(pkl_path, 'rb') as f: pkl_data = pickle.load(f) (from_ids, to_ids) = ([], []) (from_idx, to_idx) = (0, 0) for done in pkl_data['dones']: to_idx += 1 if not done: continue from_ids.append(from_idx) to_ids.append(to_idx) from_idx = to_idx num_episodes = len(from_ids) ep_dicts = [] ep_ids = episodes if episodes else range(num_episodes) for (ep_idx, selected_ep_idx) in tqdm.tqdm(enumerate(ep_ids)): from_idx = from_ids[selected_ep_idx] to_idx = to_ids[selected_ep_idx] num_frames = to_idx - from_idx image = torch.tensor(pkl_data['observations']['rgb'][from_idx:to_idx]) image = einops.rearrange(image, 'b c h w -> b h w c') state = torch.tensor(pkl_data['observations']['state'][from_idx:to_idx]) action = torch.tensor(pkl_data['actions'][from_idx:to_idx]) next_reward = torch.tensor(pkl_data['rewards'][from_idx:to_idx]) next_done = torch.tensor(pkl_data['dones'][from_idx:to_idx]) ep_dict = {} imgs_array = [x.numpy() for x in image] img_key = 'observation.image' if video: tmp_imgs_dir = videos_dir / 'tmp_images' save_images_concurrently(imgs_array, tmp_imgs_dir) fname = f'{img_key}_episode_{ep_idx:06d}.mp4' video_path = videos_dir / fname encode_video_frames(tmp_imgs_dir, video_path, fps, **encoding or {}) shutil.rmtree(tmp_imgs_dir) ep_dict[img_key] = [{'path': f'videos/{fname}', 'timestamp': i / fps} for i in range(num_frames)] else: ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array] ep_dict['observation.state'] = state ep_dict['action'] = action ep_dict['episode_index'] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64) ep_dict['frame_index'] = torch.arange(0, num_frames, 1) ep_dict['timestamp'] = torch.arange(0, num_frames, 1) / fps ep_dict['next.reward'] = next_reward ep_dict['next.done'] = next_done ep_dicts.append(ep_dict) data_dict = concatenate_episodes(ep_dicts) total_frames = data_dict['frame_index'].shape[0] data_dict['index'] = torch.arange(0, total_frames, 1) return data_dict def to_hf_dataset(data_dict, video): features = {} if video: features['observation.image'] = VideoFrame() else: features['observation.image'] = Image() features['observation.state'] = Sequence(length=data_dict['observation.state'].shape[1], feature=Value(dtype='float32', id=None)) features['action'] = Sequence(length=data_dict['action'].shape[1], feature=Value(dtype='float32', id=None)) features['episode_index'] = Value(dtype='int64', id=None) features['frame_index'] = Value(dtype='int64', id=None) features['timestamp'] = Value(dtype='float32', id=None) features['next.reward'] = Value(dtype='float32', id=None) features['next.done'] = Value(dtype='bool', id=None) features['index'] = Value(dtype='int64', id=None) hf_dataset = Dataset.from_dict(data_dict, features=Features(features)) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def from_raw_to_lerobot_format(raw_dir: Path, videos_dir: Path, fps: int | None=None, video: bool=True, episodes: list[int] | None=None, encoding: dict | None=None): check_format(raw_dir) if fps is None: fps = 15 data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding) hf_dataset = to_hf_dataset(data_dict, video) episode_data_index = calculate_episode_data_index(hf_dataset) info = {'codebase_version': CODEBASE_VERSION, 'fps': fps, 'video': video} if video: info['encoding'] = get_default_encoding() return (hf_dataset, episode_data_index, info) # File: lerobot-main/lerobot/common/datasets/sampler.py from typing import Iterator, Union import torch class EpisodeAwareSampler: def __init__(self, episode_data_index: dict, episode_indices_to_use: Union[list, None]=None, drop_n_first_frames: int=0, drop_n_last_frames: int=0, shuffle: bool=False): indices = [] for (episode_idx, (start_index, end_index)) in enumerate(zip(episode_data_index['from'], episode_data_index['to'], strict=True)): if episode_indices_to_use is None or episode_idx in episode_indices_to_use: indices.extend(range(start_index.item() + drop_n_first_frames, end_index.item() - drop_n_last_frames)) self.indices = indices self.shuffle = shuffle def __iter__(self) -> Iterator[int]: if self.shuffle: for i in torch.randperm(len(self.indices)): yield self.indices[i] else: for i in self.indices: yield i def __len__(self) -> int: return len(self.indices) # File: lerobot-main/lerobot/common/datasets/transforms.py import collections from typing import Any, Callable, Dict, Sequence import torch from torchvision.transforms import v2 from torchvision.transforms.v2 import Transform from torchvision.transforms.v2 import functional as F class RandomSubsetApply(Transform): def __init__(self, transforms: Sequence[Callable], p: list[float] | None=None, n_subset: int | None=None, random_order: bool=False) -> None: super().__init__() if not isinstance(transforms, Sequence): raise TypeError('Argument transforms should be a sequence of callables') if p is None: p = [1] * len(transforms) elif len(p) != len(transforms): raise ValueError(f"Length of p doesn't match the number of transforms: {len(p)} != {len(transforms)}") if n_subset is None: n_subset = len(transforms) elif not isinstance(n_subset, int): raise TypeError('n_subset should be an int or None') elif not 1 <= n_subset <= len(transforms): raise ValueError(f'n_subset should be in the interval [1, {len(transforms)}]') self.transforms = transforms total = sum(p) self.p = [prob / total for prob in p] self.n_subset = n_subset self.random_order = random_order def forward(self, *inputs: Any) -> Any: needs_unpacking = len(inputs) > 1 selected_indices = torch.multinomial(torch.tensor(self.p), self.n_subset) if not self.random_order: selected_indices = selected_indices.sort().values selected_transforms = [self.transforms[i] for i in selected_indices] for transform in selected_transforms: outputs = transform(*inputs) inputs = outputs if needs_unpacking else (outputs,) return outputs def extra_repr(self) -> str: return f'transforms={self.transforms}, p={self.p}, n_subset={self.n_subset}, random_order={self.random_order}' class SharpnessJitter(Transform): def __init__(self, sharpness: float | Sequence[float]) -> None: super().__init__() self.sharpness = self._check_input(sharpness) def _check_input(self, sharpness): if isinstance(sharpness, (int, float)): if sharpness < 0: raise ValueError('If sharpness is a single number, it must be non negative.') sharpness = [1.0 - sharpness, 1.0 + sharpness] sharpness[0] = max(sharpness[0], 0.0) elif isinstance(sharpness, collections.abc.Sequence) and len(sharpness) == 2: sharpness = [float(v) for v in sharpness] else: raise TypeError(f'sharpness={sharpness!r} should be a single number or a sequence with length 2.') if not 0.0 <= sharpness[0] <= sharpness[1]: raise ValueError(f'sharpnesss values should be between (0., inf), but got {sharpness}.') return (float(sharpness[0]), float(sharpness[1])) def _generate_value(self, left: float, right: float) -> float: return torch.empty(1).uniform_(left, right).item() def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: sharpness_factor = self._generate_value(self.sharpness[0], self.sharpness[1]) return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor) def get_image_transforms(brightness_weight: float=1.0, brightness_min_max: tuple[float, float] | None=None, contrast_weight: float=1.0, contrast_min_max: tuple[float, float] | None=None, saturation_weight: float=1.0, saturation_min_max: tuple[float, float] | None=None, hue_weight: float=1.0, hue_min_max: tuple[float, float] | None=None, sharpness_weight: float=1.0, sharpness_min_max: tuple[float, float] | None=None, max_num_transforms: int | None=None, random_order: bool=False): def check_value(name, weight, min_max): if min_max is not None: if len(min_max) != 2: raise ValueError(f'`{name}_min_max` is expected to be a tuple of 2 dimensions, but {min_max} provided.') if weight < 0.0: raise ValueError(f'`{name}_weight` is expected to be 0 or positive, but is negative ({weight}).') check_value('brightness', brightness_weight, brightness_min_max) check_value('contrast', contrast_weight, contrast_min_max) check_value('saturation', saturation_weight, saturation_min_max) check_value('hue', hue_weight, hue_min_max) check_value('sharpness', sharpness_weight, sharpness_min_max) weights = [] transforms = [] if brightness_min_max is not None and brightness_weight > 0.0: weights.append(brightness_weight) transforms.append(v2.ColorJitter(brightness=brightness_min_max)) if contrast_min_max is not None and contrast_weight > 0.0: weights.append(contrast_weight) transforms.append(v2.ColorJitter(contrast=contrast_min_max)) if saturation_min_max is not None and saturation_weight > 0.0: weights.append(saturation_weight) transforms.append(v2.ColorJitter(saturation=saturation_min_max)) if hue_min_max is not None and hue_weight > 0.0: weights.append(hue_weight) transforms.append(v2.ColorJitter(hue=hue_min_max)) if sharpness_min_max is not None and sharpness_weight > 0.0: weights.append(sharpness_weight) transforms.append(SharpnessJitter(sharpness=sharpness_min_max)) n_subset = len(transforms) if max_num_transforms is not None: n_subset = min(n_subset, max_num_transforms) if n_subset == 0: return v2.Identity() else: return RandomSubsetApply(transforms, p=weights, n_subset=n_subset, random_order=random_order) # File: lerobot-main/lerobot/common/datasets/utils.py import json import re import warnings from functools import cache from pathlib import Path from typing import Dict import datasets import torch from datasets import load_dataset, load_from_disk from huggingface_hub import DatasetCard, HfApi, hf_hub_download, snapshot_download from PIL import Image as PILImage from safetensors.torch import load_file from torchvision import transforms DATASET_CARD_TEMPLATE = '\n---\n# Metadata will go there\n---\nThis dataset was created using [🤗 LeRobot](https://github.com/huggingface/lerobot).\n\n' def flatten_dict(d, parent_key='', sep='/'): items = [] for (k, v) in d.items(): new_key = f'{parent_key}{sep}{k}' if parent_key else k if isinstance(v, dict): items.extend(flatten_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items) def unflatten_dict(d, sep='/'): outdict = {} for (key, value) in d.items(): parts = key.split(sep) d = outdict for part in parts[:-1]: if part not in d: d[part] = {} d = d[part] d[parts[-1]] = value return outdict def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]): for key in items_dict: first_item = items_dict[key][0] if isinstance(first_item, PILImage.Image): to_tensor = transforms.ToTensor() items_dict[key] = [to_tensor(img) for img in items_dict[key]] elif isinstance(first_item, str): pass elif isinstance(first_item, dict) and 'path' in first_item and ('timestamp' in first_item): pass elif first_item is None: pass else: items_dict[key] = [torch.tensor(x) for x in items_dict[key]] return items_dict @cache def get_hf_dataset_safe_version(repo_id: str, version: str) -> str: api = HfApi() dataset_info = api.list_repo_refs(repo_id, repo_type='dataset') branches = [b.name for b in dataset_info.branches] if version not in branches: warnings.warn(f"You are trying to load a dataset from {repo_id} created with a previous version of the\n codebase. The following versions are available: {branches}.\n The requested version ('{version}') is not found. You should be fine since\n backward compatibility is maintained. If you encounter a problem, contact LeRobot maintainers on\n Discord ('https://discord.com/invite/s3KuuzsPFb') or open an issue on github.", stacklevel=1) if 'main' not in branches: raise ValueError(f"Version 'main' not found on {repo_id}") return 'main' else: return version def load_hf_dataset(repo_id: str, version: str, root: Path, split: str) -> datasets.Dataset: if root is not None: hf_dataset = load_from_disk(str(Path(root) / repo_id / 'train')) if split != 'train': if '%' in split: raise NotImplementedError(f'We dont support splitting based on percentage for now ({split}).') match_from = re.search('train\\[(\\d+):\\]', split) match_to = re.search('train\\[:(\\d+)\\]', split) if match_from: from_frame_index = int(match_from.group(1)) hf_dataset = hf_dataset.select(range(from_frame_index, len(hf_dataset))) elif match_to: to_frame_index = int(match_to.group(1)) hf_dataset = hf_dataset.select(range(to_frame_index)) else: raise ValueError(f'`split` ({split}) should either be "train", "train[INT:]", or "train[:INT]"') else: safe_version = get_hf_dataset_safe_version(repo_id, version) hf_dataset = load_dataset(repo_id, revision=safe_version, split=split) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def load_episode_data_index(repo_id, version, root) -> dict[str, torch.Tensor]: if root is not None: path = Path(root) / repo_id / 'meta_data' / 'episode_data_index.safetensors' else: safe_version = get_hf_dataset_safe_version(repo_id, version) path = hf_hub_download(repo_id, 'meta_data/episode_data_index.safetensors', repo_type='dataset', revision=safe_version) return load_file(path) def load_stats(repo_id, version, root) -> dict[str, dict[str, torch.Tensor]]: if root is not None: path = Path(root) / repo_id / 'meta_data' / 'stats.safetensors' else: safe_version = get_hf_dataset_safe_version(repo_id, version) path = hf_hub_download(repo_id, 'meta_data/stats.safetensors', repo_type='dataset', revision=safe_version) stats = load_file(path) return unflatten_dict(stats) def load_info(repo_id, version, root) -> dict: if root is not None: path = Path(root) / repo_id / 'meta_data' / 'info.json' else: safe_version = get_hf_dataset_safe_version(repo_id, version) path = hf_hub_download(repo_id, 'meta_data/info.json', repo_type='dataset', revision=safe_version) with open(path) as f: info = json.load(f) return info def load_videos(repo_id, version, root) -> Path: if root is not None: path = Path(root) / repo_id / 'videos' else: safe_version = get_hf_dataset_safe_version(repo_id, version) repo_dir = snapshot_download(repo_id, repo_type='dataset', revision=safe_version) path = Path(repo_dir) / 'videos' return path def load_previous_and_future_frames(item: dict[str, torch.Tensor], hf_dataset: datasets.Dataset, episode_data_index: dict[str, torch.Tensor], delta_timestamps: dict[str, list[float]], tolerance_s: float) -> dict[torch.Tensor]: ep_id = item['episode_index'].item() ep_data_id_from = episode_data_index['from'][ep_id].item() ep_data_id_to = episode_data_index['to'][ep_id].item() ep_data_ids = torch.arange(ep_data_id_from, ep_data_id_to, 1) ep_timestamps = hf_dataset.select_columns('timestamp')[ep_data_id_from:ep_data_id_to]['timestamp'] ep_timestamps = torch.stack(ep_timestamps) ep_first_ts = ep_timestamps[0] ep_last_ts = ep_timestamps[-1] current_ts = item['timestamp'].item() for key in delta_timestamps: delta_ts = delta_timestamps[key] query_ts = current_ts + torch.tensor(delta_ts) dist = torch.cdist(query_ts[:, None], ep_timestamps[:, None], p=1) (min_, argmin_) = dist.min(1) is_pad = min_ > tolerance_s assert ((query_ts[is_pad] < ep_first_ts) | (ep_last_ts < query_ts[is_pad])).all(), f'One or several timestamps unexpectedly violate the tolerance ({min_} > tolerance_s={tolerance_s!r}) inside episode range.This might be due to synchronization issues with timestamps during data collection.' data_ids = ep_data_ids[argmin_] item[key] = hf_dataset.select_columns(key)[data_ids][key] if isinstance(item[key][0], dict) and 'path' in item[key][0]: item[key] = item[key] else: item[key] = torch.stack(item[key]) item[f'{key}_is_pad'] = is_pad return item def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> Dict[str, torch.Tensor]: episode_data_index = {'from': [], 'to': []} current_episode = None '' if len(hf_dataset) == 0: episode_data_index = {'from': torch.tensor([]), 'to': torch.tensor([])} return episode_data_index for (idx, episode_idx) in enumerate(hf_dataset['episode_index']): if episode_idx != current_episode: episode_data_index['from'].append(idx) if current_episode is not None: episode_data_index['to'].append(idx) current_episode = episode_idx else: pass episode_data_index['to'].append(idx + 1) for k in ['from', 'to']: episode_data_index[k] = torch.tensor(episode_data_index[k]) return episode_data_index def reset_episode_index(hf_dataset: datasets.Dataset) -> datasets.Dataset: if len(hf_dataset) == 0: return hf_dataset unique_episode_idxs = torch.stack(hf_dataset['episode_index']).unique().tolist() episode_idx_to_reset_idx_mapping = {ep_id: reset_ep_id for (reset_ep_id, ep_id) in enumerate(unique_episode_idxs)} def modify_ep_idx_func(example): example['episode_index'] = episode_idx_to_reset_idx_mapping[example['episode_index'].item()] return example hf_dataset = hf_dataset.map(modify_ep_idx_func) return hf_dataset def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) def create_branch(repo_id, *, branch: str, repo_type: str | None=None): api = HfApi() branches = api.list_repo_refs(repo_id, repo_type=repo_type).branches refs = [branch.ref for branch in branches] ref = f'refs/heads/{branch}' if ref in refs: api.delete_branch(repo_id, repo_type=repo_type, branch=branch) api.create_branch(repo_id, repo_type=repo_type, branch=branch) def create_lerobot_dataset_card(tags: list | None=None, text: str | None=None) -> DatasetCard: card = DatasetCard(DATASET_CARD_TEMPLATE) card.data.task_categories = ['robotics'] card.data.tags = ['LeRobot'] if tags is not None: card.data.tags += tags if text is not None: card.text += text return card # File: lerobot-main/lerobot/common/datasets/video_utils.py import logging import subprocess import warnings from collections import OrderedDict from dataclasses import dataclass, field from pathlib import Path from typing import Any, ClassVar import pyarrow as pa import torch import torchvision from datasets.features.features import register_feature def load_from_videos(item: dict[str, torch.Tensor], video_frame_keys: list[str], videos_dir: Path, tolerance_s: float, backend: str='pyav'): data_dir = videos_dir.parent for key in video_frame_keys: if isinstance(item[key], list): timestamps = [frame['timestamp'] for frame in item[key]] paths = [frame['path'] for frame in item[key]] if len(set(paths)) > 1: raise NotImplementedError('All video paths are expected to be the same for now.') video_path = data_dir / paths[0] frames = decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend) item[key] = frames else: timestamps = [item[key]['timestamp']] video_path = data_dir / item[key]['path'] frames = decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend) item[key] = frames[0] return item def decode_video_frames_torchvision(video_path: str, timestamps: list[float], tolerance_s: float, backend: str='pyav', log_loaded_timestamps: bool=False) -> torch.Tensor: video_path = str(video_path) keyframes_only = False torchvision.set_video_backend(backend) if backend == 'pyav': keyframes_only = True reader = torchvision.io.VideoReader(video_path, 'video') first_ts = timestamps[0] last_ts = timestamps[-1] reader.seek(first_ts, keyframes_only=keyframes_only) loaded_frames = [] loaded_ts = [] for frame in reader: current_ts = frame['pts'] if log_loaded_timestamps: logging.info(f'frame loaded at timestamp={current_ts:.4f}') loaded_frames.append(frame['data']) loaded_ts.append(current_ts) if current_ts >= last_ts: break if backend == 'pyav': reader.container.close() reader = None query_ts = torch.tensor(timestamps) loaded_ts = torch.tensor(loaded_ts) dist = torch.cdist(query_ts[:, None], loaded_ts[:, None], p=1) (min_, argmin_) = dist.min(1) is_within_tol = min_ < tolerance_s assert is_within_tol.all(), f'One or several query timestamps unexpectedly violate the tolerance ({min_[~is_within_tol]} > tolerance_s={tolerance_s!r}).It means that the closest frame that can be loaded from the video is too far away in time.This might be due to synchronization issues with timestamps during data collection.To be safe, we advise to ignore this item during training.\nqueried timestamps: {query_ts}\nloaded timestamps: {loaded_ts}\nvideo: {video_path}\nbackend: {backend}' closest_frames = torch.stack([loaded_frames[idx] for idx in argmin_]) closest_ts = loaded_ts[argmin_] if log_loaded_timestamps: logging.info(f'closest_ts={closest_ts!r}') closest_frames = closest_frames.type(torch.float32) / 255 assert len(timestamps) == len(closest_frames) return closest_frames def encode_video_frames(imgs_dir: Path, video_path: Path, fps: int, vcodec: str='libsvtav1', pix_fmt: str='yuv420p', g: int | None=2, crf: int | None=30, fast_decode: int=0, log_level: str | None='error', overwrite: bool=False) -> None: video_path = Path(video_path) video_path.parent.mkdir(parents=True, exist_ok=True) ffmpeg_args = OrderedDict([('-f', 'image2'), ('-r', str(fps)), ('-i', str(imgs_dir / 'frame_%06d.png')), ('-vcodec', vcodec), ('-pix_fmt', pix_fmt)]) if g is not None: ffmpeg_args['-g'] = str(g) if crf is not None: ffmpeg_args['-crf'] = str(crf) if fast_decode: key = '-svtav1-params' if vcodec == 'libsvtav1' else '-tune' value = f'fast-decode={fast_decode}' if vcodec == 'libsvtav1' else 'fastdecode' ffmpeg_args[key] = value if log_level is not None: ffmpeg_args['-loglevel'] = str(log_level) ffmpeg_args = [item for pair in ffmpeg_args.items() for item in pair] if overwrite: ffmpeg_args.append('-y') ffmpeg_cmd = ['ffmpeg'] + ffmpeg_args + [str(video_path)] subprocess.run(ffmpeg_cmd, check=True, stdin=subprocess.DEVNULL) if not video_path.exists(): raise OSError(f"Video encoding did not work. File not found: {video_path}. Try running the command manually to debug: `{''.join(ffmpeg_cmd)}`") @dataclass class VideoFrame: pa_type: ClassVar[Any] = pa.struct({'path': pa.string(), 'timestamp': pa.float32()}) _type: str = field(default='VideoFrame', init=False, repr=False) def __call__(self): return self.pa_type with warnings.catch_warnings(): warnings.filterwarnings('ignore', "'register_feature' is experimental and might be subject to breaking changes in the future.", category=UserWarning) register_feature(VideoFrame, 'VideoFrame') # File: lerobot-main/lerobot/common/envs/factory.py import importlib import gymnasium as gym from omegaconf import DictConfig def make_env(cfg: DictConfig, n_envs: int | None=None) -> gym.vector.VectorEnv | None: if n_envs is not None and n_envs < 1: raise ValueError('`n_envs must be at least 1') if cfg.env.name == 'real_world': return package_name = f'gym_{cfg.env.name}' try: importlib.import_module(package_name) except ModuleNotFoundError as e: print(f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.env.name}]'`") raise e gym_handle = f'{package_name}/{cfg.env.task}' gym_kwgs = dict(cfg.env.get('gym', {})) if cfg.env.get('episode_length'): gym_kwgs['max_episode_steps'] = cfg.env.episode_length env_cls = gym.vector.AsyncVectorEnv if cfg.eval.use_async_envs else gym.vector.SyncVectorEnv env = env_cls([lambda : gym.make(gym_handle, disable_env_checker=True, **gym_kwgs) for _ in range(n_envs if n_envs is not None else cfg.eval.batch_size)]) return env # File: lerobot-main/lerobot/common/envs/utils.py import einops import numpy as np import torch from torch import Tensor def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Tensor]: return_observations = {} if 'pixels' in observations: if isinstance(observations['pixels'], dict): imgs = {f'observation.images.{key}': img for (key, img) in observations['pixels'].items()} else: imgs = {'observation.image': observations['pixels']} for (imgkey, img) in imgs.items(): img = torch.from_numpy(img) (_, h, w, c) = img.shape assert c < h and c < w, f'expect channel last images, but instead got img.shape={img.shape!r}' assert img.dtype == torch.uint8, f'expect torch.uint8, but instead img.dtype={img.dtype!r}' img = einops.rearrange(img, 'b h w c -> b c h w').contiguous() img = img.type(torch.float32) img /= 255 return_observations[imgkey] = img if 'environment_state' in observations: return_observations['observation.environment_state'] = torch.from_numpy(observations['environment_state']).float() return_observations['observation.state'] = torch.from_numpy(observations['agent_pos']).float() return return_observations # File: lerobot-main/lerobot/common/logger.py """""" import logging import os import re from glob import glob from pathlib import Path import torch from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE from omegaconf import DictConfig, OmegaConf from termcolor import colored from torch.optim import Optimizer from torch.optim.lr_scheduler import LRScheduler from lerobot.common.policies.policy_protocol import Policy from lerobot.common.utils.utils import get_global_random_state, set_global_random_state def log_output_dir(out_dir): logging.info(colored('Output dir:', 'yellow', attrs=['bold']) + f' {out_dir}') def cfg_to_group(cfg: DictConfig, return_list: bool=False) -> list[str] | str: lst = [f'policy:{cfg.policy.name}', f'dataset:{cfg.dataset_repo_id}', f'env:{cfg.env.name}', f'seed:{cfg.seed}'] return lst if return_list else '-'.join(lst) def get_wandb_run_id_from_filesystem(checkpoint_dir: Path) -> str: paths = glob(str(checkpoint_dir / '../wandb/latest-run/run-*')) if len(paths) != 1: raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.") match = re.search('run-([^\\.]+).wandb', paths[0].split('/')[-1]) if match is None: raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.") wandb_run_id = match.groups(0)[0] return wandb_run_id class Logger: pretrained_model_dir_name = 'pretrained_model' training_state_file_name = 'training_state.pth' def __init__(self, cfg: DictConfig, log_dir: str, wandb_job_name: str | None=None): self._cfg = cfg self.log_dir = Path(log_dir) self.log_dir.mkdir(parents=True, exist_ok=True) self.checkpoints_dir = self.get_checkpoints_dir(log_dir) self.last_checkpoint_dir = self.get_last_checkpoint_dir(log_dir) self.last_pretrained_model_dir = self.get_last_pretrained_model_dir(log_dir) self._group = cfg_to_group(cfg) project = cfg.get('wandb', {}).get('project') entity = cfg.get('wandb', {}).get('entity') enable_wandb = cfg.get('wandb', {}).get('enable', False) run_offline = not enable_wandb or not project if run_offline: logging.info(colored('Logs will be saved locally.', 'yellow', attrs=['bold'])) self._wandb = None else: os.environ['WANDB_SILENT'] = 'true' import wandb wandb_run_id = None if cfg.resume: wandb_run_id = get_wandb_run_id_from_filesystem(self.checkpoints_dir) wandb.init(id=wandb_run_id, project=project, entity=entity, name=wandb_job_name, notes=cfg.get('wandb', {}).get('notes'), tags=cfg_to_group(cfg, return_list=True), dir=log_dir, config=OmegaConf.to_container(cfg, resolve=True), save_code=False, job_type='train_eval', resume='must' if cfg.resume else None) print(colored('Logs will be synced with wandb.', 'blue', attrs=['bold'])) logging.info(f"Track this run --> {colored(wandb.run.get_url(), 'yellow', attrs=['bold'])}") self._wandb = wandb @classmethod def get_checkpoints_dir(cls, log_dir: str | Path) -> Path: return Path(log_dir) / 'checkpoints' @classmethod def get_last_checkpoint_dir(cls, log_dir: str | Path) -> Path: return cls.get_checkpoints_dir(log_dir) / 'last' @classmethod def get_last_pretrained_model_dir(cls, log_dir: str | Path) -> Path: return cls.get_last_checkpoint_dir(log_dir) / cls.pretrained_model_dir_name def save_model(self, save_dir: Path, policy: Policy, wandb_artifact_name: str | None=None): self.checkpoints_dir.mkdir(parents=True, exist_ok=True) policy.save_pretrained(save_dir) OmegaConf.save(self._cfg, save_dir / 'config.yaml') if self._wandb and (not self._cfg.wandb.disable_artifact): artifact = self._wandb.Artifact(wandb_artifact_name, type='model') artifact.add_file(save_dir / SAFETENSORS_SINGLE_FILE) self._wandb.log_artifact(artifact) if self.last_checkpoint_dir.exists(): os.remove(self.last_checkpoint_dir) def save_training_state(self, save_dir: Path, train_step: int, optimizer: Optimizer, scheduler: LRScheduler | None): training_state = {'step': train_step, 'optimizer': optimizer.state_dict(), **get_global_random_state()} if scheduler is not None: training_state['scheduler'] = scheduler.state_dict() torch.save(training_state, save_dir / self.training_state_file_name) def save_checkpont(self, train_step: int, policy: Policy, optimizer: Optimizer, scheduler: LRScheduler | None, identifier: str): checkpoint_dir = self.checkpoints_dir / str(identifier) wandb_artifact_name = None if self._wandb is None else f"{self._group.replace(':', '_').replace('/', '_')}-{self._cfg.seed}-{identifier}" self.save_model(checkpoint_dir / self.pretrained_model_dir_name, policy, wandb_artifact_name=wandb_artifact_name) self.save_training_state(checkpoint_dir, train_step, optimizer, scheduler) os.symlink(checkpoint_dir.absolute(), self.last_checkpoint_dir) def load_last_training_state(self, optimizer: Optimizer, scheduler: LRScheduler | None) -> int: training_state = torch.load(self.last_checkpoint_dir / self.training_state_file_name) optimizer.load_state_dict(training_state['optimizer']) if scheduler is not None: scheduler.load_state_dict(training_state['scheduler']) elif 'scheduler' in training_state: raise ValueError('The checkpoint contains a scheduler state_dict, but no LRScheduler was provided.') set_global_random_state({k: training_state[k] for k in get_global_random_state()}) return training_state['step'] def log_dict(self, d, step, mode='train'): assert mode in {'train', 'eval'} if self._wandb is not None: for (k, v) in d.items(): if not isinstance(v, (int, float, str)): logging.warning(f'WandB logging of key "{k}" was ignored as its type is not handled by this wrapper.') continue self._wandb.log({f'{mode}/{k}': v}, step=step) def log_video(self, video_path: str, step: int, mode: str='train'): assert mode in {'train', 'eval'} assert self._wandb is not None wandb_video = self._wandb.Video(video_path, fps=self._cfg.fps, format='mp4') self._wandb.log({f'{mode}/video': wandb_video}, step=step) # File: lerobot-main/lerobot/common/policies/act/configuration_act.py from dataclasses import dataclass, field @dataclass class ACTConfig: n_obs_steps: int = 1 chunk_size: int = 100 n_action_steps: int = 100 input_shapes: dict[str, list[int]] = field(default_factory=lambda : {'observation.images.top': [3, 480, 640], 'observation.state': [14]}) output_shapes: dict[str, list[int]] = field(default_factory=lambda : {'action': [14]}) input_normalization_modes: dict[str, str] = field(default_factory=lambda : {'observation.images.top': 'mean_std', 'observation.state': 'mean_std'}) output_normalization_modes: dict[str, str] = field(default_factory=lambda : {'action': 'mean_std'}) vision_backbone: str = 'resnet18' pretrained_backbone_weights: str | None = 'ResNet18_Weights.IMAGENET1K_V1' replace_final_stride_with_dilation: int = False pre_norm: bool = False dim_model: int = 512 n_heads: int = 8 dim_feedforward: int = 3200 feedforward_activation: str = 'relu' n_encoder_layers: int = 4 n_decoder_layers: int = 1 use_vae: bool = True latent_dim: int = 32 n_vae_encoder_layers: int = 4 temporal_ensemble_coeff: float | None = None dropout: float = 0.1 kl_weight: float = 10.0 def __post_init__(self): if not self.vision_backbone.startswith('resnet'): raise ValueError(f'`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}.') if self.temporal_ensemble_coeff is not None and self.n_action_steps > 1: raise NotImplementedError('`n_action_steps` must be 1 when using temporal ensembling. This is because the policy needs to be queried every step to compute the ensembled action.') if self.n_action_steps > self.chunk_size: raise ValueError(f'The chunk size is the upper bound for the number of action steps per model invocation. Got {self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`.') if self.n_obs_steps != 1: raise ValueError(f'Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`') if not any((k.startswith('observation.image') for k in self.input_shapes)) and 'observation.environment_state' not in self.input_shapes: raise ValueError('You must provide at least one image or the environment state among the inputs.') # File: lerobot-main/lerobot/common/policies/act/modeling_act.py """""" import math from collections import deque from itertools import chain from typing import Callable import einops import numpy as np import torch import torch.nn.functional as F import torchvision from huggingface_hub import PyTorchModelHubMixin from torch import Tensor, nn from torchvision.models._utils import IntermediateLayerGetter from torchvision.ops.misc import FrozenBatchNorm2d from lerobot.common.policies.act.configuration_act import ACTConfig from lerobot.common.policies.normalize import Normalize, Unnormalize class ACTPolicy(nn.Module, PyTorchModelHubMixin, library_name='lerobot', repo_url='https://github.com/huggingface/lerobot', tags=['robotics', 'act']): name = 'act' def __init__(self, config: ACTConfig | None=None, dataset_stats: dict[str, dict[str, Tensor]] | None=None): super().__init__() if config is None: config = ACTConfig() self.config: ACTConfig = config self.normalize_inputs = Normalize(config.input_shapes, config.input_normalization_modes, dataset_stats) self.normalize_targets = Normalize(config.output_shapes, config.output_normalization_modes, dataset_stats) self.unnormalize_outputs = Unnormalize(config.output_shapes, config.output_normalization_modes, dataset_stats) self.model = ACT(config) self.expected_image_keys = [k for k in config.input_shapes if k.startswith('observation.image')] if config.temporal_ensemble_coeff is not None: self.temporal_ensembler = ACTTemporalEnsembler(config.temporal_ensemble_coeff, config.chunk_size) self.reset() def reset(self): if self.config.temporal_ensemble_coeff is not None: self.temporal_ensembler.reset() else: self._action_queue = deque([], maxlen=self.config.n_action_steps) @torch.no_grad def select_action(self, batch: dict[str, Tensor]) -> Tensor: self.eval() batch = self.normalize_inputs(batch) if len(self.expected_image_keys) > 0: batch = dict(batch) batch['observation.images'] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) if self.config.temporal_ensemble_coeff is not None: actions = self.model(batch)[0] actions = self.unnormalize_outputs({'action': actions})['action'] action = self.temporal_ensembler.update(actions) return action if len(self._action_queue) == 0: actions = self.model(batch)[0][:, :self.config.n_action_steps] actions = self.unnormalize_outputs({'action': actions})['action'] self._action_queue.extend(actions.transpose(0, 1)) return self._action_queue.popleft() def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = self.normalize_inputs(batch) if len(self.expected_image_keys) > 0: batch = dict(batch) batch['observation.images'] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) batch = self.normalize_targets(batch) (actions_hat, (mu_hat, log_sigma_x2_hat)) = self.model(batch) l1_loss = (F.l1_loss(batch['action'], actions_hat, reduction='none') * ~batch['action_is_pad'].unsqueeze(-1)).mean() loss_dict = {'l1_loss': l1_loss.item()} if self.config.use_vae: mean_kld = (-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - log_sigma_x2_hat.exp())).sum(-1).mean() loss_dict['kld_loss'] = mean_kld.item() loss_dict['loss'] = l1_loss + mean_kld * self.config.kl_weight else: loss_dict['loss'] = l1_loss return loss_dict class ACTTemporalEnsembler: def __init__(self, temporal_ensemble_coeff: float, chunk_size: int) -> None: self.chunk_size = chunk_size self.ensemble_weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size)) self.ensemble_weights_cumsum = torch.cumsum(self.ensemble_weights, dim=0) self.reset() def reset(self): self.ensembled_actions = None self.ensembled_actions_count = None def update(self, actions: Tensor) -> Tensor: self.ensemble_weights = self.ensemble_weights.to(device=actions.device) self.ensemble_weights_cumsum = self.ensemble_weights_cumsum.to(device=actions.device) if self.ensembled_actions is None: self.ensembled_actions = actions.clone() self.ensembled_actions_count = torch.ones((self.chunk_size, 1), dtype=torch.long, device=self.ensembled_actions.device) else: self.ensembled_actions *= self.ensemble_weights_cumsum[self.ensembled_actions_count - 1] self.ensembled_actions += actions[:, :-1] * self.ensemble_weights[self.ensembled_actions_count] self.ensembled_actions /= self.ensemble_weights_cumsum[self.ensembled_actions_count] self.ensembled_actions_count = torch.clamp(self.ensembled_actions_count + 1, max=self.chunk_size) self.ensembled_actions = torch.cat([self.ensembled_actions, actions[:, -1:]], dim=1) self.ensembled_actions_count = torch.cat([self.ensembled_actions_count, torch.ones_like(self.ensembled_actions_count[-1:])]) (action, self.ensembled_actions, self.ensembled_actions_count) = (self.ensembled_actions[:, 0], self.ensembled_actions[:, 1:], self.ensembled_actions_count[1:]) return action class ACT(nn.Module): def __init__(self, config: ACTConfig): super().__init__() self.config = config self.use_robot_state = 'observation.state' in config.input_shapes self.use_images = any((k.startswith('observation.image') for k in config.input_shapes)) self.use_env_state = 'observation.environment_state' in config.input_shapes if self.config.use_vae: self.vae_encoder = ACTEncoder(config, is_vae_encoder=True) self.vae_encoder_cls_embed = nn.Embedding(1, config.dim_model) if self.use_robot_state: self.vae_encoder_robot_state_input_proj = nn.Linear(config.input_shapes['observation.state'][0], config.dim_model) self.vae_encoder_action_input_proj = nn.Linear(config.output_shapes['action'][0], config.dim_model) self.vae_encoder_latent_output_proj = nn.Linear(config.dim_model, config.latent_dim * 2) num_input_token_encoder = 1 + config.chunk_size if self.use_robot_state: num_input_token_encoder += 1 self.register_buffer('vae_encoder_pos_enc', create_sinusoidal_pos_embedding(num_input_token_encoder, config.dim_model).unsqueeze(0)) if self.use_images: backbone_model = getattr(torchvision.models, config.vision_backbone)(replace_stride_with_dilation=[False, False, config.replace_final_stride_with_dilation], weights=config.pretrained_backbone_weights, norm_layer=FrozenBatchNorm2d) self.backbone = IntermediateLayerGetter(backbone_model, return_layers={'layer4': 'feature_map'}) self.encoder = ACTEncoder(config) self.decoder = ACTDecoder(config) if self.use_robot_state: self.encoder_robot_state_input_proj = nn.Linear(config.input_shapes['observation.state'][0], config.dim_model) if self.use_env_state: self.encoder_env_state_input_proj = nn.Linear(config.input_shapes['observation.environment_state'][0], config.dim_model) self.encoder_latent_input_proj = nn.Linear(config.latent_dim, config.dim_model) if self.use_images: self.encoder_img_feat_input_proj = nn.Conv2d(backbone_model.fc.in_features, config.dim_model, kernel_size=1) n_1d_tokens = 1 if self.use_robot_state: n_1d_tokens += 1 if self.use_env_state: n_1d_tokens += 1 self.encoder_1d_feature_pos_embed = nn.Embedding(n_1d_tokens, config.dim_model) if self.use_images: self.encoder_cam_feat_pos_embed = ACTSinusoidalPositionEmbedding2d(config.dim_model // 2) self.decoder_pos_embed = nn.Embedding(config.chunk_size, config.dim_model) self.action_head = nn.Linear(config.dim_model, config.output_shapes['action'][0]) self._reset_parameters() def _reset_parameters(self): for p in chain(self.encoder.parameters(), self.decoder.parameters()): if p.dim() > 1: nn.init.xavier_uniform_(p) def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tensor] | tuple[None, None]]: if self.config.use_vae and self.training: assert 'action' in batch, 'actions must be provided when using the variational objective in training mode.' batch_size = (batch['observation.images'] if 'observation.images' in batch else batch['observation.environment_state']).shape[0] if self.config.use_vae and 'action' in batch: cls_embed = einops.repeat(self.vae_encoder_cls_embed.weight, '1 d -> b 1 d', b=batch_size) if self.use_robot_state: robot_state_embed = self.vae_encoder_robot_state_input_proj(batch['observation.state']) robot_state_embed = robot_state_embed.unsqueeze(1) action_embed = self.vae_encoder_action_input_proj(batch['action']) if self.use_robot_state: vae_encoder_input = [cls_embed, robot_state_embed, action_embed] else: vae_encoder_input = [cls_embed, action_embed] vae_encoder_input = torch.cat(vae_encoder_input, axis=1) pos_embed = self.vae_encoder_pos_enc.clone().detach() cls_joint_is_pad = torch.full((batch_size, 2 if self.use_robot_state else 1), False, device=batch['observation.state'].device) key_padding_mask = torch.cat([cls_joint_is_pad, batch['action_is_pad']], axis=1) cls_token_out = self.vae_encoder(vae_encoder_input.permute(1, 0, 2), pos_embed=pos_embed.permute(1, 0, 2), key_padding_mask=key_padding_mask)[0] latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) mu = latent_pdf_params[:, :self.config.latent_dim] log_sigma_x2 = latent_pdf_params[:, self.config.latent_dim:] latent_sample = mu + log_sigma_x2.div(2).exp() * torch.randn_like(mu) else: mu = log_sigma_x2 = None latent_sample = torch.zeros([batch_size, self.config.latent_dim], dtype=torch.float32).to(batch['observation.state'].device) encoder_in_tokens = [self.encoder_latent_input_proj(latent_sample)] encoder_in_pos_embed = list(self.encoder_1d_feature_pos_embed.weight.unsqueeze(1)) if self.use_robot_state: encoder_in_tokens.append(self.encoder_robot_state_input_proj(batch['observation.state'])) if self.use_env_state: encoder_in_tokens.append(self.encoder_env_state_input_proj(batch['observation.environment_state'])) if self.use_images: all_cam_features = [] all_cam_pos_embeds = [] for cam_index in range(batch['observation.images'].shape[-4]): cam_features = self.backbone(batch['observation.images'][:, cam_index])['feature_map'] cam_pos_embed = self.encoder_cam_feat_pos_embed(cam_features).to(dtype=cam_features.dtype) cam_features = self.encoder_img_feat_input_proj(cam_features) all_cam_features.append(cam_features) all_cam_pos_embeds.append(cam_pos_embed) all_cam_features = torch.cat(all_cam_features, axis=-1) encoder_in_tokens.extend(einops.rearrange(all_cam_features, 'b c h w -> (h w) b c')) all_cam_pos_embeds = torch.cat(all_cam_pos_embeds, axis=-1) encoder_in_pos_embed.extend(einops.rearrange(all_cam_pos_embeds, 'b c h w -> (h w) b c')) encoder_in_tokens = torch.stack(encoder_in_tokens, axis=0) encoder_in_pos_embed = torch.stack(encoder_in_pos_embed, axis=0) encoder_out = self.encoder(encoder_in_tokens, pos_embed=encoder_in_pos_embed) decoder_in = torch.zeros((self.config.chunk_size, batch_size, self.config.dim_model), dtype=encoder_in_pos_embed.dtype, device=encoder_in_pos_embed.device) decoder_out = self.decoder(decoder_in, encoder_out, encoder_pos_embed=encoder_in_pos_embed, decoder_pos_embed=self.decoder_pos_embed.weight.unsqueeze(1)) decoder_out = decoder_out.transpose(0, 1) actions = self.action_head(decoder_out) return (actions, (mu, log_sigma_x2)) class ACTEncoder(nn.Module): def __init__(self, config: ACTConfig, is_vae_encoder: bool=False): super().__init__() self.is_vae_encoder = is_vae_encoder num_layers = config.n_vae_encoder_layers if self.is_vae_encoder else config.n_encoder_layers self.layers = nn.ModuleList([ACTEncoderLayer(config) for _ in range(num_layers)]) self.norm = nn.LayerNorm(config.dim_model) if config.pre_norm else nn.Identity() def forward(self, x: Tensor, pos_embed: Tensor | None=None, key_padding_mask: Tensor | None=None) -> Tensor: for layer in self.layers: x = layer(x, pos_embed=pos_embed, key_padding_mask=key_padding_mask) x = self.norm(x) return x class ACTEncoderLayer(nn.Module): def __init__(self, config: ACTConfig): super().__init__() self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout) self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward) self.dropout = nn.Dropout(config.dropout) self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model) self.norm1 = nn.LayerNorm(config.dim_model) self.norm2 = nn.LayerNorm(config.dim_model) self.dropout1 = nn.Dropout(config.dropout) self.dropout2 = nn.Dropout(config.dropout) self.activation = get_activation_fn(config.feedforward_activation) self.pre_norm = config.pre_norm def forward(self, x, pos_embed: Tensor | None=None, key_padding_mask: Tensor | None=None) -> Tensor: skip = x if self.pre_norm: x = self.norm1(x) q = k = x if pos_embed is None else x + pos_embed x = self.self_attn(q, k, value=x, key_padding_mask=key_padding_mask) x = x[0] x = skip + self.dropout1(x) if self.pre_norm: skip = x x = self.norm2(x) else: x = self.norm1(x) skip = x x = self.linear2(self.dropout(self.activation(self.linear1(x)))) x = skip + self.dropout2(x) if not self.pre_norm: x = self.norm2(x) return x class ACTDecoder(nn.Module): def __init__(self, config: ACTConfig): super().__init__() self.layers = nn.ModuleList([ACTDecoderLayer(config) for _ in range(config.n_decoder_layers)]) self.norm = nn.LayerNorm(config.dim_model) def forward(self, x: Tensor, encoder_out: Tensor, decoder_pos_embed: Tensor | None=None, encoder_pos_embed: Tensor | None=None) -> Tensor: for layer in self.layers: x = layer(x, encoder_out, decoder_pos_embed=decoder_pos_embed, encoder_pos_embed=encoder_pos_embed) if self.norm is not None: x = self.norm(x) return x class ACTDecoderLayer(nn.Module): def __init__(self, config: ACTConfig): super().__init__() self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout) self.multihead_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout) self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward) self.dropout = nn.Dropout(config.dropout) self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model) self.norm1 = nn.LayerNorm(config.dim_model) self.norm2 = nn.LayerNorm(config.dim_model) self.norm3 = nn.LayerNorm(config.dim_model) self.dropout1 = nn.Dropout(config.dropout) self.dropout2 = nn.Dropout(config.dropout) self.dropout3 = nn.Dropout(config.dropout) self.activation = get_activation_fn(config.feedforward_activation) self.pre_norm = config.pre_norm def maybe_add_pos_embed(self, tensor: Tensor, pos_embed: Tensor | None) -> Tensor: return tensor if pos_embed is None else tensor + pos_embed def forward(self, x: Tensor, encoder_out: Tensor, decoder_pos_embed: Tensor | None=None, encoder_pos_embed: Tensor | None=None) -> Tensor: skip = x if self.pre_norm: x = self.norm1(x) q = k = self.maybe_add_pos_embed(x, decoder_pos_embed) x = self.self_attn(q, k, value=x)[0] x = skip + self.dropout1(x) if self.pre_norm: skip = x x = self.norm2(x) else: x = self.norm1(x) skip = x x = self.multihead_attn(query=self.maybe_add_pos_embed(x, decoder_pos_embed), key=self.maybe_add_pos_embed(encoder_out, encoder_pos_embed), value=encoder_out)[0] x = skip + self.dropout2(x) if self.pre_norm: skip = x x = self.norm3(x) else: x = self.norm2(x) skip = x x = self.linear2(self.dropout(self.activation(self.linear1(x)))) x = skip + self.dropout3(x) if not self.pre_norm: x = self.norm3(x) return x def create_sinusoidal_pos_embedding(num_positions: int, dimension: int) -> Tensor: def get_position_angle_vec(position): return [position / np.power(10000, 2 * (hid_j // 2) / dimension) for hid_j in range(dimension)] sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(num_positions)]) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) return torch.from_numpy(sinusoid_table).float() class ACTSinusoidalPositionEmbedding2d(nn.Module): def __init__(self, dimension: int): super().__init__() self.dimension = dimension self._two_pi = 2 * math.pi self._eps = 1e-06 self._temperature = 10000 def forward(self, x: Tensor) -> Tensor: not_mask = torch.ones_like(x[0, :1]) y_range = not_mask.cumsum(1, dtype=torch.float32) x_range = not_mask.cumsum(2, dtype=torch.float32) y_range = y_range / (y_range[:, -1:, :] + self._eps) * self._two_pi x_range = x_range / (x_range[:, :, -1:] + self._eps) * self._two_pi inverse_frequency = self._temperature ** (2 * (torch.arange(self.dimension, dtype=torch.float32, device=x.device) // 2) / self.dimension) x_range = x_range.unsqueeze(-1) / inverse_frequency y_range = y_range.unsqueeze(-1) / inverse_frequency pos_embed_x = torch.stack((x_range[..., 0::2].sin(), x_range[..., 1::2].cos()), dim=-1).flatten(3) pos_embed_y = torch.stack((y_range[..., 0::2].sin(), y_range[..., 1::2].cos()), dim=-1).flatten(3) pos_embed = torch.cat((pos_embed_y, pos_embed_x), dim=3).permute(0, 3, 1, 2) return pos_embed def get_activation_fn(activation: str) -> Callable: if activation == 'relu': return F.relu if activation == 'gelu': return F.gelu if activation == 'glu': return F.glu raise RuntimeError(f'activation should be relu/gelu/glu, not {activation}.') # File: lerobot-main/lerobot/common/policies/diffusion/configuration_diffusion.py from dataclasses import dataclass, field @dataclass class DiffusionConfig: n_obs_steps: int = 2 horizon: int = 16 n_action_steps: int = 8 input_shapes: dict[str, list[int]] = field(default_factory=lambda : {'observation.image': [3, 96, 96], 'observation.state': [2]}) output_shapes: dict[str, list[int]] = field(default_factory=lambda : {'action': [2]}) input_normalization_modes: dict[str, str] = field(default_factory=lambda : {'observation.image': 'mean_std', 'observation.state': 'min_max'}) output_normalization_modes: dict[str, str] = field(default_factory=lambda : {'action': 'min_max'}) vision_backbone: str = 'resnet18' crop_shape: tuple[int, int] | None = (84, 84) crop_is_random: bool = True pretrained_backbone_weights: str | None = None use_group_norm: bool = True spatial_softmax_num_keypoints: int = 32 down_dims: tuple[int, ...] = (512, 1024, 2048) kernel_size: int = 5 n_groups: int = 8 diffusion_step_embed_dim: int = 128 use_film_scale_modulation: bool = True noise_scheduler_type: str = 'DDPM' num_train_timesteps: int = 100 beta_schedule: str = 'squaredcos_cap_v2' beta_start: float = 0.0001 beta_end: float = 0.02 prediction_type: str = 'epsilon' clip_sample: bool = True clip_sample_range: float = 1.0 num_inference_steps: int | None = None do_mask_loss_for_padding: bool = False def __post_init__(self): if not self.vision_backbone.startswith('resnet'): raise ValueError(f'`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}.') image_keys = {k for k in self.input_shapes if k.startswith('observation.image')} if len(image_keys) == 0 and 'observation.environment_state' not in self.input_shapes: raise ValueError('You must provide at least one image or the environment state among the inputs.') if len(image_keys) > 0: if self.crop_shape is not None: for image_key in image_keys: if self.crop_shape[0] > self.input_shapes[image_key][1] or self.crop_shape[1] > self.input_shapes[image_key][2]: raise ValueError(f'`crop_shape` should fit within `input_shapes[{image_key}]`. Got {self.crop_shape} for `crop_shape` and {self.input_shapes[image_key]} for `input_shapes[{{image_key}}]`.') first_image_key = next(iter(image_keys)) for image_key in image_keys: if self.input_shapes[image_key] != self.input_shapes[first_image_key]: raise ValueError(f'`input_shapes[{image_key}]` does not match `input_shapes[{first_image_key}]`, but we expect all image shapes to match.') supported_prediction_types = ['epsilon', 'sample'] if self.prediction_type not in supported_prediction_types: raise ValueError(f'`prediction_type` must be one of {supported_prediction_types}. Got {self.prediction_type}.') supported_noise_schedulers = ['DDPM', 'DDIM'] if self.noise_scheduler_type not in supported_noise_schedulers: raise ValueError(f'`noise_scheduler_type` must be one of {supported_noise_schedulers}. Got {self.noise_scheduler_type}.') downsampling_factor = 2 ** len(self.down_dims) if self.horizon % downsampling_factor != 0: raise ValueError(f'The horizon should be an integer multiple of the downsampling factor (which is determined by `len(down_dims)`). Got self.horizon={self.horizon!r} and self.down_dims={self.down_dims!r}') # File: lerobot-main/lerobot/common/policies/diffusion/modeling_diffusion.py """""" import math from collections import deque from typing import Callable import einops import numpy as np import torch import torch.nn.functional as F import torchvision from diffusers.schedulers.scheduling_ddim import DDIMScheduler from diffusers.schedulers.scheduling_ddpm import DDPMScheduler from huggingface_hub import PyTorchModelHubMixin from torch import Tensor, nn from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.utils import get_device_from_parameters, get_dtype_from_parameters, populate_queues class DiffusionPolicy(nn.Module, PyTorchModelHubMixin, library_name='lerobot', repo_url='https://github.com/huggingface/lerobot', tags=['robotics', 'diffusion-policy']): name = 'diffusion' def __init__(self, config: DiffusionConfig | None=None, dataset_stats: dict[str, dict[str, Tensor]] | None=None): super().__init__() if config is None: config = DiffusionConfig() self.config = config self.normalize_inputs = Normalize(config.input_shapes, config.input_normalization_modes, dataset_stats) self.normalize_targets = Normalize(config.output_shapes, config.output_normalization_modes, dataset_stats) self.unnormalize_outputs = Unnormalize(config.output_shapes, config.output_normalization_modes, dataset_stats) self._queues = None self.diffusion = DiffusionModel(config) self.expected_image_keys = [k for k in config.input_shapes if k.startswith('observation.image')] self.use_env_state = 'observation.environment_state' in config.input_shapes self.reset() def reset(self): self._queues = {'observation.state': deque(maxlen=self.config.n_obs_steps), 'action': deque(maxlen=self.config.n_action_steps)} if len(self.expected_image_keys) > 0: self._queues['observation.images'] = deque(maxlen=self.config.n_obs_steps) if self.use_env_state: self._queues['observation.environment_state'] = deque(maxlen=self.config.n_obs_steps) @torch.no_grad def select_action(self, batch: dict[str, Tensor]) -> Tensor: batch = self.normalize_inputs(batch) if len(self.expected_image_keys) > 0: batch = dict(batch) batch['observation.images'] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) self._queues = populate_queues(self._queues, batch) if len(self._queues['action']) == 0: batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} actions = self.diffusion.generate_actions(batch) actions = self.unnormalize_outputs({'action': actions})['action'] self._queues['action'].extend(actions.transpose(0, 1)) action = self._queues['action'].popleft() return action def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = self.normalize_inputs(batch) if len(self.expected_image_keys) > 0: batch = dict(batch) batch['observation.images'] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) batch = self.normalize_targets(batch) loss = self.diffusion.compute_loss(batch) return {'loss': loss} def _make_noise_scheduler(name: str, **kwargs: dict) -> DDPMScheduler | DDIMScheduler: if name == 'DDPM': return DDPMScheduler(**kwargs) elif name == 'DDIM': return DDIMScheduler(**kwargs) else: raise ValueError(f'Unsupported noise scheduler type {name}') class DiffusionModel(nn.Module): def __init__(self, config: DiffusionConfig): super().__init__() self.config = config global_cond_dim = config.input_shapes['observation.state'][0] num_images = len([k for k in config.input_shapes if k.startswith('observation.image')]) self._use_images = False self._use_env_state = False if num_images > 0: self._use_images = True self.rgb_encoder = DiffusionRgbEncoder(config) global_cond_dim += self.rgb_encoder.feature_dim * num_images if 'observation.environment_state' in config.input_shapes: self._use_env_state = True global_cond_dim += config.input_shapes['observation.environment_state'][0] self.unet = DiffusionConditionalUnet1d(config, global_cond_dim=global_cond_dim * config.n_obs_steps) self.noise_scheduler = _make_noise_scheduler(config.noise_scheduler_type, num_train_timesteps=config.num_train_timesteps, beta_start=config.beta_start, beta_end=config.beta_end, beta_schedule=config.beta_schedule, clip_sample=config.clip_sample, clip_sample_range=config.clip_sample_range, prediction_type=config.prediction_type) if config.num_inference_steps is None: self.num_inference_steps = self.noise_scheduler.config.num_train_timesteps else: self.num_inference_steps = config.num_inference_steps def conditional_sample(self, batch_size: int, global_cond: Tensor | None=None, generator: torch.Generator | None=None) -> Tensor: device = get_device_from_parameters(self) dtype = get_dtype_from_parameters(self) sample = torch.randn(size=(batch_size, self.config.horizon, self.config.output_shapes['action'][0]), dtype=dtype, device=device, generator=generator) self.noise_scheduler.set_timesteps(self.num_inference_steps) for t in self.noise_scheduler.timesteps: model_output = self.unet(sample, torch.full(sample.shape[:1], t, dtype=torch.long, device=sample.device), global_cond=global_cond) sample = self.noise_scheduler.step(model_output, t, sample, generator=generator).prev_sample return sample def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor: (batch_size, n_obs_steps) = batch['observation.state'].shape[:2] global_cond_feats = [batch['observation.state']] if self._use_images: img_features = self.rgb_encoder(einops.rearrange(batch['observation.images'], 'b s n ... -> (b s n) ...')) img_features = einops.rearrange(img_features, '(b s n) ... -> b s (n ...)', b=batch_size, s=n_obs_steps) global_cond_feats.append(img_features) if self._use_env_state: global_cond_feats.append(batch['observation.environment_state']) return torch.cat(global_cond_feats, dim=-1).flatten(start_dim=1) def generate_actions(self, batch: dict[str, Tensor]) -> Tensor: (batch_size, n_obs_steps) = batch['observation.state'].shape[:2] assert n_obs_steps == self.config.n_obs_steps global_cond = self._prepare_global_conditioning(batch) actions = self.conditional_sample(batch_size, global_cond=global_cond) start = n_obs_steps - 1 end = start + self.config.n_action_steps actions = actions[:, start:end] return actions def compute_loss(self, batch: dict[str, Tensor]) -> Tensor: assert set(batch).issuperset({'observation.state', 'action', 'action_is_pad'}) assert 'observation.images' in batch or 'observation.environment_state' in batch n_obs_steps = batch['observation.state'].shape[1] horizon = batch['action'].shape[1] assert horizon == self.config.horizon assert n_obs_steps == self.config.n_obs_steps global_cond = self._prepare_global_conditioning(batch) trajectory = batch['action'] eps = torch.randn(trajectory.shape, device=trajectory.device) timesteps = torch.randint(low=0, high=self.noise_scheduler.config.num_train_timesteps, size=(trajectory.shape[0],), device=trajectory.device).long() noisy_trajectory = self.noise_scheduler.add_noise(trajectory, eps, timesteps) pred = self.unet(noisy_trajectory, timesteps, global_cond=global_cond) if self.config.prediction_type == 'epsilon': target = eps elif self.config.prediction_type == 'sample': target = batch['action'] else: raise ValueError(f'Unsupported prediction type {self.config.prediction_type}') loss = F.mse_loss(pred, target, reduction='none') if self.config.do_mask_loss_for_padding: if 'action_is_pad' not in batch: raise ValueError(f"You need to provide 'action_is_pad' in the batch when self.config.do_mask_loss_for_padding={self.config.do_mask_loss_for_padding!r}.") in_episode_bound = ~batch['action_is_pad'] loss = loss * in_episode_bound.unsqueeze(-1) return loss.mean() class SpatialSoftmax(nn.Module): def __init__(self, input_shape, num_kp=None): super().__init__() assert len(input_shape) == 3 (self._in_c, self._in_h, self._in_w) = input_shape if num_kp is not None: self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1) self._out_c = num_kp else: self.nets = None self._out_c = self._in_c (pos_x, pos_y) = np.meshgrid(np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h)) pos_x = torch.from_numpy(pos_x.reshape(self._in_h * self._in_w, 1)).float() pos_y = torch.from_numpy(pos_y.reshape(self._in_h * self._in_w, 1)).float() self.register_buffer('pos_grid', torch.cat([pos_x, pos_y], dim=1)) def forward(self, features: Tensor) -> Tensor: if self.nets is not None: features = self.nets(features) features = features.reshape(-1, self._in_h * self._in_w) attention = F.softmax(features, dim=-1) expected_xy = attention @ self.pos_grid feature_keypoints = expected_xy.view(-1, self._out_c, 2) return feature_keypoints class DiffusionRgbEncoder(nn.Module): def __init__(self, config: DiffusionConfig): super().__init__() if config.crop_shape is not None: self.do_crop = True self.center_crop = torchvision.transforms.CenterCrop(config.crop_shape) if config.crop_is_random: self.maybe_random_crop = torchvision.transforms.RandomCrop(config.crop_shape) else: self.maybe_random_crop = self.center_crop else: self.do_crop = False backbone_model = getattr(torchvision.models, config.vision_backbone)(weights=config.pretrained_backbone_weights) self.backbone = nn.Sequential(*list(backbone_model.children())[:-2]) if config.use_group_norm: if config.pretrained_backbone_weights: raise ValueError("You can't replace BatchNorm in a pretrained model without ruining the weights!") self.backbone = _replace_submodules(root_module=self.backbone, predicate=lambda x: isinstance(x, nn.BatchNorm2d), func=lambda x: nn.GroupNorm(num_groups=x.num_features // 16, num_channels=x.num_features)) image_keys = [k for k in config.input_shapes if k.startswith('observation.image')] image_key = image_keys[0] dummy_input_h_w = config.crop_shape if config.crop_shape is not None else config.input_shapes[image_key][1:] dummy_input = torch.zeros(size=(1, config.input_shapes[image_key][0], *dummy_input_h_w)) with torch.inference_mode(): dummy_feature_map = self.backbone(dummy_input) feature_map_shape = tuple(dummy_feature_map.shape[1:]) self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) self.feature_dim = config.spatial_softmax_num_keypoints * 2 self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim) self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: if self.do_crop: if self.training: x = self.maybe_random_crop(x) else: x = self.center_crop(x) x = torch.flatten(self.pool(self.backbone(x)), start_dim=1) x = self.relu(self.out(x)) return x def _replace_submodules(root_module: nn.Module, predicate: Callable[[nn.Module], bool], func: Callable[[nn.Module], nn.Module]) -> nn.Module: if predicate(root_module): return func(root_module) replace_list = [k.split('.') for (k, m) in root_module.named_modules(remove_duplicate=True) if predicate(m)] for (*parents, k) in replace_list: parent_module = root_module if len(parents) > 0: parent_module = root_module.get_submodule('.'.join(parents)) if isinstance(parent_module, nn.Sequential): src_module = parent_module[int(k)] else: src_module = getattr(parent_module, k) tgt_module = func(src_module) if isinstance(parent_module, nn.Sequential): parent_module[int(k)] = tgt_module else: setattr(parent_module, k, tgt_module) assert not any((predicate(m) for (_, m) in root_module.named_modules(remove_duplicate=True))) return root_module class DiffusionSinusoidalPosEmb(nn.Module): def __init__(self, dim: int): super().__init__() self.dim = dim def forward(self, x: Tensor) -> Tensor: device = x.device half_dim = self.dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, device=device) * -emb) emb = x.unsqueeze(-1) * emb.unsqueeze(0) emb = torch.cat((emb.sin(), emb.cos()), dim=-1) return emb class DiffusionConv1dBlock(nn.Module): def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): super().__init__() self.block = nn.Sequential(nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), nn.GroupNorm(n_groups, out_channels), nn.Mish()) def forward(self, x): return self.block(x) class DiffusionConditionalUnet1d(nn.Module): def __init__(self, config: DiffusionConfig, global_cond_dim: int): super().__init__() self.config = config self.diffusion_step_encoder = nn.Sequential(DiffusionSinusoidalPosEmb(config.diffusion_step_embed_dim), nn.Linear(config.diffusion_step_embed_dim, config.diffusion_step_embed_dim * 4), nn.Mish(), nn.Linear(config.diffusion_step_embed_dim * 4, config.diffusion_step_embed_dim)) cond_dim = config.diffusion_step_embed_dim + global_cond_dim in_out = [(config.output_shapes['action'][0], config.down_dims[0])] + list(zip(config.down_dims[:-1], config.down_dims[1:], strict=True)) common_res_block_kwargs = {'cond_dim': cond_dim, 'kernel_size': config.kernel_size, 'n_groups': config.n_groups, 'use_film_scale_modulation': config.use_film_scale_modulation} self.down_modules = nn.ModuleList([]) for (ind, (dim_in, dim_out)) in enumerate(in_out): is_last = ind >= len(in_out) - 1 self.down_modules.append(nn.ModuleList([DiffusionConditionalResidualBlock1d(dim_in, dim_out, **common_res_block_kwargs), DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs), nn.Conv1d(dim_out, dim_out, 3, 2, 1) if not is_last else nn.Identity()])) self.mid_modules = nn.ModuleList([DiffusionConditionalResidualBlock1d(config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs), DiffusionConditionalResidualBlock1d(config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs)]) self.up_modules = nn.ModuleList([]) for (ind, (dim_out, dim_in)) in enumerate(reversed(in_out[1:])): is_last = ind >= len(in_out) - 1 self.up_modules.append(nn.ModuleList([DiffusionConditionalResidualBlock1d(dim_in * 2, dim_out, **common_res_block_kwargs), DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs), nn.ConvTranspose1d(dim_out, dim_out, 4, 2, 1) if not is_last else nn.Identity()])) self.final_conv = nn.Sequential(DiffusionConv1dBlock(config.down_dims[0], config.down_dims[0], kernel_size=config.kernel_size), nn.Conv1d(config.down_dims[0], config.output_shapes['action'][0], 1)) def forward(self, x: Tensor, timestep: Tensor | int, global_cond=None) -> Tensor: x = einops.rearrange(x, 'b t d -> b d t') timesteps_embed = self.diffusion_step_encoder(timestep) if global_cond is not None: global_feature = torch.cat([timesteps_embed, global_cond], axis=-1) else: global_feature = timesteps_embed encoder_skip_features: list[Tensor] = [] for (resnet, resnet2, downsample) in self.down_modules: x = resnet(x, global_feature) x = resnet2(x, global_feature) encoder_skip_features.append(x) x = downsample(x) for mid_module in self.mid_modules: x = mid_module(x, global_feature) for (resnet, resnet2, upsample) in self.up_modules: x = torch.cat((x, encoder_skip_features.pop()), dim=1) x = resnet(x, global_feature) x = resnet2(x, global_feature) x = upsample(x) x = self.final_conv(x) x = einops.rearrange(x, 'b d t -> b t d') return x class DiffusionConditionalResidualBlock1d(nn.Module): def __init__(self, in_channels: int, out_channels: int, cond_dim: int, kernel_size: int=3, n_groups: int=8, use_film_scale_modulation: bool=False): super().__init__() self.use_film_scale_modulation = use_film_scale_modulation self.out_channels = out_channels self.conv1 = DiffusionConv1dBlock(in_channels, out_channels, kernel_size, n_groups=n_groups) cond_channels = out_channels * 2 if use_film_scale_modulation else out_channels self.cond_encoder = nn.Sequential(nn.Mish(), nn.Linear(cond_dim, cond_channels)) self.conv2 = DiffusionConv1dBlock(out_channels, out_channels, kernel_size, n_groups=n_groups) self.residual_conv = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity() def forward(self, x: Tensor, cond: Tensor) -> Tensor: out = self.conv1(x) cond_embed = self.cond_encoder(cond).unsqueeze(-1) if self.use_film_scale_modulation: scale = cond_embed[:, :self.out_channels] bias = cond_embed[:, self.out_channels:] out = scale * out + bias else: out = out + cond_embed out = self.conv2(out) out = out + self.residual_conv(x) return out # File: lerobot-main/lerobot/common/policies/factory.py import inspect import logging from omegaconf import DictConfig, OmegaConf from lerobot.common.policies.policy_protocol import Policy from lerobot.common.utils.utils import get_safe_torch_device def _policy_cfg_from_hydra_cfg(policy_cfg_class, hydra_cfg): expected_kwargs = set(inspect.signature(policy_cfg_class).parameters) if not set(hydra_cfg.policy).issuperset(expected_kwargs): logging.warning(f'Hydra config is missing arguments: {set(expected_kwargs).difference(hydra_cfg.policy)}') def list_to_tuple(item): return tuple(item) if isinstance(item, list) else item policy_cfg = policy_cfg_class(**{k: list_to_tuple(v) for (k, v) in OmegaConf.to_container(hydra_cfg.policy, resolve=True).items() if k in expected_kwargs}) return policy_cfg def get_policy_and_config_classes(name: str) -> tuple[Policy, object]: if name == 'tdmpc': from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.common.policies.tdmpc.modeling_tdmpc import TDMPCPolicy return (TDMPCPolicy, TDMPCConfig) elif name == 'diffusion': from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy return (DiffusionPolicy, DiffusionConfig) elif name == 'act': from lerobot.common.policies.act.configuration_act import ACTConfig from lerobot.common.policies.act.modeling_act import ACTPolicy return (ACTPolicy, ACTConfig) elif name == 'vqbet': from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTPolicy return (VQBeTPolicy, VQBeTConfig) else: raise NotImplementedError(f'Policy with name {name} is not implemented.') def make_policy(hydra_cfg: DictConfig, pretrained_policy_name_or_path: str | None=None, dataset_stats=None) -> Policy: if not (pretrained_policy_name_or_path is None) ^ (dataset_stats is None): raise ValueError('Exactly one of `pretrained_policy_name_or_path` and `dataset_stats` must be provided.') (policy_cls, policy_cfg_class) = get_policy_and_config_classes(hydra_cfg.policy.name) policy_cfg = _policy_cfg_from_hydra_cfg(policy_cfg_class, hydra_cfg) if pretrained_policy_name_or_path is None: policy = policy_cls(policy_cfg, dataset_stats) else: policy = policy_cls(policy_cfg) policy.load_state_dict(policy_cls.from_pretrained(pretrained_policy_name_or_path).state_dict()) policy.to(get_safe_torch_device(hydra_cfg.device)) return policy # File: lerobot-main/lerobot/common/policies/normalize.py import torch from torch import Tensor, nn def create_stats_buffers(shapes: dict[str, list[int]], modes: dict[str, str], stats: dict[str, dict[str, Tensor]] | None=None) -> dict[str, dict[str, nn.ParameterDict]]: stats_buffers = {} for (key, mode) in modes.items(): assert mode in ['mean_std', 'min_max'] shape = tuple(shapes[key]) if 'image' in key: assert len(shape) == 3, f'number of dimensions of {key} != 3 (shape={shape!r}' (c, h, w) = shape assert c < h and c < w, f'{key} is not channel first (shape={shape!r})' shape = (c, 1, 1) buffer = {} if mode == 'mean_std': mean = torch.ones(shape, dtype=torch.float32) * torch.inf std = torch.ones(shape, dtype=torch.float32) * torch.inf buffer = nn.ParameterDict({'mean': nn.Parameter(mean, requires_grad=False), 'std': nn.Parameter(std, requires_grad=False)}) elif mode == 'min_max': min = torch.ones(shape, dtype=torch.float32) * torch.inf max = torch.ones(shape, dtype=torch.float32) * torch.inf buffer = nn.ParameterDict({'min': nn.Parameter(min, requires_grad=False), 'max': nn.Parameter(max, requires_grad=False)}) if stats is not None: if mode == 'mean_std': buffer['mean'].data = stats[key]['mean'].clone() buffer['std'].data = stats[key]['std'].clone() elif mode == 'min_max': buffer['min'].data = stats[key]['min'].clone() buffer['max'].data = stats[key]['max'].clone() stats_buffers[key] = buffer return stats_buffers def _no_stats_error_str(name: str) -> str: return f'`{name}` is infinity. You should either initialize with `stats` as an argument, or use a pretrained model.' class Normalize(nn.Module): def __init__(self, shapes: dict[str, list[int]], modes: dict[str, str], stats: dict[str, dict[str, Tensor]] | None=None): super().__init__() self.shapes = shapes self.modes = modes self.stats = stats stats_buffers = create_stats_buffers(shapes, modes, stats) for (key, buffer) in stats_buffers.items(): setattr(self, 'buffer_' + key.replace('.', '_'), buffer) @torch.no_grad def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = dict(batch) for (key, mode) in self.modes.items(): buffer = getattr(self, 'buffer_' + key.replace('.', '_')) if mode == 'mean_std': mean = buffer['mean'] std = buffer['std'] assert not torch.isinf(mean).any(), _no_stats_error_str('mean') assert not torch.isinf(std).any(), _no_stats_error_str('std') batch[key] = (batch[key] - mean) / (std + 1e-08) elif mode == 'min_max': min = buffer['min'] max = buffer['max'] assert not torch.isinf(min).any(), _no_stats_error_str('min') assert not torch.isinf(max).any(), _no_stats_error_str('max') batch[key] = (batch[key] - min) / (max - min + 1e-08) batch[key] = batch[key] * 2 - 1 else: raise ValueError(mode) return batch class Unnormalize(nn.Module): def __init__(self, shapes: dict[str, list[int]], modes: dict[str, str], stats: dict[str, dict[str, Tensor]] | None=None): super().__init__() self.shapes = shapes self.modes = modes self.stats = stats stats_buffers = create_stats_buffers(shapes, modes, stats) for (key, buffer) in stats_buffers.items(): setattr(self, 'buffer_' + key.replace('.', '_'), buffer) @torch.no_grad def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = dict(batch) for (key, mode) in self.modes.items(): buffer = getattr(self, 'buffer_' + key.replace('.', '_')) if mode == 'mean_std': mean = buffer['mean'] std = buffer['std'] assert not torch.isinf(mean).any(), _no_stats_error_str('mean') assert not torch.isinf(std).any(), _no_stats_error_str('std') batch[key] = batch[key] * std + mean elif mode == 'min_max': min = buffer['min'] max = buffer['max'] assert not torch.isinf(min).any(), _no_stats_error_str('min') assert not torch.isinf(max).any(), _no_stats_error_str('max') batch[key] = (batch[key] + 1) / 2 batch[key] = batch[key] * (max - min) + min else: raise ValueError(mode) return batch # File: lerobot-main/lerobot/common/policies/policy_protocol.py """""" from typing import Protocol, runtime_checkable from torch import Tensor @runtime_checkable class Policy(Protocol): name: str def __init__(self, cfg, dataset_stats: dict[str, dict[str, Tensor]] | None=None): def reset(self): def forward(self, batch: dict[str, Tensor]) -> dict: def select_action(self, batch: dict[str, Tensor]) -> Tensor: @runtime_checkable class PolicyWithUpdate(Policy, Protocol): def update(self): # File: lerobot-main/lerobot/common/policies/tdmpc/configuration_tdmpc.py from dataclasses import dataclass, field @dataclass class TDMPCConfig: n_action_repeats: int = 2 horizon: int = 5 n_action_steps: int = 1 input_shapes: dict[str, list[int]] = field(default_factory=lambda : {'observation.image': [3, 84, 84], 'observation.state': [4]}) output_shapes: dict[str, list[int]] = field(default_factory=lambda : {'action': [4]}) input_normalization_modes: dict[str, str] | None = None output_normalization_modes: dict[str, str] = field(default_factory=lambda : {'action': 'min_max'}) image_encoder_hidden_dim: int = 32 state_encoder_hidden_dim: int = 256 latent_dim: int = 50 q_ensemble_size: int = 5 mlp_dim: int = 512 discount: float = 0.9 use_mpc: bool = True cem_iterations: int = 6 max_std: float = 2.0 min_std: float = 0.05 n_gaussian_samples: int = 512 n_pi_samples: int = 51 uncertainty_regularizer_coeff: float = 1.0 n_elites: int = 50 elite_weighting_temperature: float = 0.5 gaussian_mean_momentum: float = 0.1 max_random_shift_ratio: float = 0.0476 reward_coeff: float = 0.5 expectile_weight: float = 0.9 value_coeff: float = 0.1 consistency_coeff: float = 20.0 advantage_scaling: float = 3.0 pi_coeff: float = 0.5 temporal_decay_coeff: float = 0.5 target_model_momentum: float = 0.995 def __post_init__(self): image_keys = {k for k in self.input_shapes if k.startswith('observation.image')} if len(image_keys) > 1: raise ValueError(f'{self.__class__.__name__} handles at most one image for now. Got image keys {image_keys}.') if len(image_keys) > 0: image_key = next(iter(image_keys)) if self.input_shapes[image_key][-2] != self.input_shapes[image_key][-1]: raise ValueError(f'Only square images are handled now. Got image shape {self.input_shapes[image_key]}.') if self.n_gaussian_samples <= 0: raise ValueError(f'The number of guassian samples for CEM should be non-zero. Got `self.n_gaussian_samples={self.n_gaussian_samples!r}`') if self.output_normalization_modes != {'action': 'min_max'}: raise ValueError(f'TD-MPC assumes the action space dimensions to all be in [-1, 1]. Therefore it is strongly advised that you stick with the default. See {self.__class__.__name__} docstring for more information.') if self.n_action_steps > 1: if self.n_action_repeats != 1: raise ValueError('If `n_action_steps > 1`, `n_action_repeats` must be left to its default value of 1.') if not self.use_mpc: raise ValueError('If `n_action_steps > 1`, `use_mpc` must be set to `True`.') if self.n_action_steps > self.horizon: raise ValueError('`n_action_steps` must be less than or equal to `horizon`.') # File: lerobot-main/lerobot/common/policies/tdmpc/modeling_tdmpc.py """""" from collections import deque from copy import deepcopy from functools import partial from typing import Callable import einops import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from huggingface_hub import PyTorchModelHubMixin from torch import Tensor from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.common.policies.utils import get_device_from_parameters, populate_queues class TDMPCPolicy(nn.Module, PyTorchModelHubMixin, library_name='lerobot', repo_url='https://github.com/huggingface/lerobot', tags=['robotics', 'tdmpc']): name = 'tdmpc' def __init__(self, config: TDMPCConfig | None=None, dataset_stats: dict[str, dict[str, Tensor]] | None=None): super().__init__() if config is None: config = TDMPCConfig() self.config = config self.model = TDMPCTOLD(config) self.model_target = deepcopy(self.model) for param in self.model_target.parameters(): param.requires_grad = False if config.input_normalization_modes is not None: self.normalize_inputs = Normalize(config.input_shapes, config.input_normalization_modes, dataset_stats) else: self.normalize_inputs = nn.Identity() self.normalize_targets = Normalize(config.output_shapes, config.output_normalization_modes, dataset_stats) self.unnormalize_outputs = Unnormalize(config.output_shapes, config.output_normalization_modes, dataset_stats) image_keys = [k for k in config.input_shapes if k.startswith('observation.image')] self._use_image = False self._use_env_state = False if len(image_keys) > 0: assert len(image_keys) == 1 self._use_image = True self.input_image_key = image_keys[0] if 'observation.environment_state' in config.input_shapes: self._use_env_state = True self.reset() def reset(self): self._queues = {'observation.state': deque(maxlen=1), 'action': deque(maxlen=max(self.config.n_action_steps, self.config.n_action_repeats))} if self._use_image: self._queues['observation.image'] = deque(maxlen=1) if self._use_env_state: self._queues['observation.environment_state'] = deque(maxlen=1) self._prev_mean: torch.Tensor | None = None @torch.no_grad() def select_action(self, batch: dict[str, Tensor]) -> Tensor: batch = self.normalize_inputs(batch) if self._use_image: batch = dict(batch) batch['observation.image'] = batch[self.input_image_key] self._queues = populate_queues(self._queues, batch) if len(self._queues['action']) == 0: batch = {key: torch.stack(list(self._queues[key]), dim=1) for key in batch} for key in batch: assert batch[key].shape[1] == 1 batch[key] = batch[key][:, 0] encode_keys = [] if self._use_image: encode_keys.append('observation.image') if self._use_env_state: encode_keys.append('observation.environment_state') encode_keys.append('observation.state') z = self.model.encode({k: batch[k] for k in encode_keys}) if self.config.use_mpc: actions = self.plan(z) else: actions = self.model.pi(z).unsqueeze(0) actions = torch.clamp(actions, -1, +1) actions = self.unnormalize_outputs({'action': actions})['action'] if self.config.n_action_repeats > 1: for _ in range(self.config.n_action_repeats): self._queues['action'].append(actions[0]) else: self._queues['action'].extend(actions[:self.config.n_action_steps]) action = self._queues['action'].popleft() return action @torch.no_grad() def plan(self, z: Tensor) -> Tensor: device = get_device_from_parameters(self) batch_size = z.shape[0] pi_actions = torch.empty(self.config.horizon, self.config.n_pi_samples, batch_size, self.config.output_shapes['action'][0], device=device) if self.config.n_pi_samples > 0: _z = einops.repeat(z, 'b d -> n b d', n=self.config.n_pi_samples) for t in range(self.config.horizon): pi_actions[t] = self.model.pi(_z, self.config.min_std) _z = self.model.latent_dynamics(_z, pi_actions[t]) z = einops.repeat(z, 'b d -> n b d', n=self.config.n_gaussian_samples + self.config.n_pi_samples) mean = torch.zeros(self.config.horizon, batch_size, self.config.output_shapes['action'][0], device=device) if self._prev_mean is not None: mean[:-1] = self._prev_mean[1:] std = self.config.max_std * torch.ones_like(mean) for _ in range(self.config.cem_iterations): std_normal_noise = torch.randn(self.config.horizon, self.config.n_gaussian_samples, batch_size, self.config.output_shapes['action'][0], device=std.device) gaussian_actions = torch.clamp(mean.unsqueeze(1) + std.unsqueeze(1) * std_normal_noise, -1, 1) actions = torch.cat([gaussian_actions, pi_actions], dim=1) value = self.estimate_value(z, actions).nan_to_num_(0) elite_idxs = torch.topk(value, self.config.n_elites, dim=0).indices elite_value = value.take_along_dim(elite_idxs, dim=0) elite_actions = actions.take_along_dim(einops.rearrange(elite_idxs, 'n b -> 1 n b 1'), dim=1) max_value = elite_value.max(0, keepdim=True)[0] score = torch.exp(self.config.elite_weighting_temperature * (elite_value - max_value)) score /= score.sum(axis=0, keepdim=True) _mean = torch.sum(einops.rearrange(score, 'n b -> n b 1') * elite_actions, dim=1) _std = torch.sqrt(torch.sum(einops.rearrange(score, 'n b -> n b 1') * (elite_actions - einops.rearrange(_mean, 'h b d -> h 1 b d')) ** 2, dim=1)) mean = self.config.gaussian_mean_momentum * mean + (1 - self.config.gaussian_mean_momentum) * _mean std = _std.clamp_(self.config.min_std, self.config.max_std) self._prev_mean = mean actions = elite_actions[:, torch.multinomial(score.T, 1).squeeze(), torch.arange(batch_size)] return actions @torch.no_grad() def estimate_value(self, z: Tensor, actions: Tensor): (G, running_discount) = (0, 1) for t in range(actions.shape[0]): if self.config.uncertainty_regularizer_coeff > 0: regularization = -(self.config.uncertainty_regularizer_coeff * self.model.Qs(z, actions[t]).std(0)) else: regularization = 0 (z, reward) = self.model.latent_dynamics_and_reward(z, actions[t]) G += running_discount * (reward + regularization) running_discount *= self.config.discount next_action = self.model.pi(z, self.config.min_std) terminal_values = self.model.Qs(z, next_action) if self.config.q_ensemble_size > 2: G += running_discount * torch.min(terminal_values[torch.randint(0, self.config.q_ensemble_size, size=(2,))], dim=0)[0] else: G += running_discount * torch.min(terminal_values, dim=0)[0] if self.config.uncertainty_regularizer_coeff > 0: G -= running_discount * self.config.uncertainty_regularizer_coeff * terminal_values.std(0) return G def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor | float]: device = get_device_from_parameters(self) batch = self.normalize_inputs(batch) if self._use_image: batch = dict(batch) batch['observation.image'] = batch[self.input_image_key] batch = self.normalize_targets(batch) info = {} for key in batch: if batch[key].ndim > 1: batch[key] = batch[key].transpose(1, 0) action = batch['action'] reward = batch['next.reward'] observations = {k: v for (k, v) in batch.items() if k.startswith('observation.')} if self._use_image and self.config.max_random_shift_ratio > 0: observations['observation.image'] = flatten_forward_unflatten(partial(random_shifts_aug, max_random_shift_ratio=self.config.max_random_shift_ratio), observations['observation.image']) (current_observation, next_observations) = ({}, {}) for k in observations: current_observation[k] = observations[k][0] next_observations[k] = observations[k][1:] (horizon, batch_size) = next_observations['observation.image' if self._use_image else 'observation.environment_state'].shape[:2] batch_size = batch['index'].shape[0] z_preds = torch.empty(horizon + 1, batch_size, self.config.latent_dim, device=device) z_preds[0] = self.model.encode(current_observation) reward_preds = torch.empty_like(reward, device=device) for t in range(horizon): (z_preds[t + 1], reward_preds[t]) = self.model.latent_dynamics_and_reward(z_preds[t], action[t]) q_preds_ensemble = self.model.Qs(z_preds[:-1], action) v_preds = self.model.V(z_preds[:-1]) info.update({'Q': q_preds_ensemble.mean().item(), 'V': v_preds.mean().item()}) with torch.no_grad(): z_targets = self.model_target.encode(next_observations) q_targets = reward + self.config.discount * self.model.V(self.model.encode(next_observations)) v_targets = self.model_target.Qs(z_preds[:-1].detach(), action, return_min=True) temporal_loss_coeffs = torch.pow(self.config.temporal_decay_coeff, torch.arange(horizon, device=device)).unsqueeze(-1) consistency_loss = (temporal_loss_coeffs * F.mse_loss(z_preds[1:], z_targets, reduction='none').mean(dim=-1) * ~batch['observation.state_is_pad'][0] * ~batch['action_is_pad'] * ~batch['observation.state_is_pad'][1:]).sum(0).mean() reward_loss = (temporal_loss_coeffs * F.mse_loss(reward_preds, reward, reduction='none') * ~batch['next.reward_is_pad'] * ~batch['observation.state_is_pad'][0] * ~batch['action_is_pad']).sum(0).mean() q_value_loss = (temporal_loss_coeffs * F.mse_loss(q_preds_ensemble, einops.repeat(q_targets, 't b -> e t b', e=q_preds_ensemble.shape[0]), reduction='none').sum(0) * ~batch['observation.state_is_pad'][0] * ~batch['action_is_pad'] * ~batch['next.reward_is_pad'] * ~batch['observation.state_is_pad'][1:]).sum(0).mean() diff = v_targets - v_preds raw_v_value_loss = torch.where(diff > 0, self.config.expectile_weight, 1 - self.config.expectile_weight) * diff ** 2 v_value_loss = (temporal_loss_coeffs * raw_v_value_loss * ~batch['observation.state_is_pad'][0] * ~batch['action_is_pad']).sum(0).mean() z_preds = z_preds.detach() with torch.no_grad(): advantage = self.model_target.Qs(z_preds[:-1], action, return_min=True) - self.model.V(z_preds[:-1]) info['advantage'] = advantage[0] exp_advantage = torch.clamp(torch.exp(advantage * self.config.advantage_scaling), max=100.0) action_preds = self.model.pi(z_preds[:-1]) mse = F.mse_loss(action_preds, action, reduction='none').sum(-1) pi_loss = (exp_advantage * mse * temporal_loss_coeffs * ~batch['observation.state_is_pad'][0] * ~batch['action_is_pad']).mean() loss = self.config.consistency_coeff * consistency_loss + self.config.reward_coeff * reward_loss + self.config.value_coeff * q_value_loss + self.config.value_coeff * v_value_loss + self.config.pi_coeff * pi_loss info.update({'consistency_loss': consistency_loss.item(), 'reward_loss': reward_loss.item(), 'Q_value_loss': q_value_loss.item(), 'V_value_loss': v_value_loss.item(), 'pi_loss': pi_loss.item(), 'loss': loss, 'sum_loss': loss.item() * self.config.horizon}) for key in batch: if batch[key].ndim > 1: batch[key] = batch[key].transpose(1, 0) return info def update(self): update_ema_parameters(self.model_target, self.model, self.config.target_model_momentum) class TDMPCTOLD(nn.Module): def __init__(self, config: TDMPCConfig): super().__init__() self.config = config self._encoder = TDMPCObservationEncoder(config) self._dynamics = nn.Sequential(nn.Linear(config.latent_dim + config.output_shapes['action'][0], config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.latent_dim), nn.LayerNorm(config.latent_dim), nn.Sigmoid()) self._reward = nn.Sequential(nn.Linear(config.latent_dim + config.output_shapes['action'][0], config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, 1)) self._pi = nn.Sequential(nn.Linear(config.latent_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.output_shapes['action'][0])) self._Qs = nn.ModuleList([nn.Sequential(nn.Linear(config.latent_dim + config.output_shapes['action'][0], config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Tanh(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.ELU(), nn.Linear(config.mlp_dim, 1)) for _ in range(config.q_ensemble_size)]) self._V = nn.Sequential(nn.Linear(config.latent_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Tanh(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.ELU(), nn.Linear(config.mlp_dim, 1)) self._init_weights() def _init_weights(self): def _apply_fn(m): if isinstance(m, nn.Linear): nn.init.orthogonal_(m.weight.data) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): gain = nn.init.calculate_gain('relu') nn.init.orthogonal_(m.weight.data, gain) if m.bias is not None: nn.init.zeros_(m.bias) self.apply(_apply_fn) for m in [self._reward, *self._Qs]: assert isinstance(m[-1], nn.Linear), 'Sanity check. The last linear layer needs 0 initialization on weights.' nn.init.zeros_(m[-1].weight) nn.init.zeros_(m[-1].bias) def encode(self, obs: dict[str, Tensor]) -> Tensor: return self._encoder(obs) def latent_dynamics_and_reward(self, z: Tensor, a: Tensor) -> tuple[Tensor, Tensor]: x = torch.cat([z, a], dim=-1) return (self._dynamics(x), self._reward(x).squeeze(-1)) def latent_dynamics(self, z: Tensor, a: Tensor) -> Tensor: x = torch.cat([z, a], dim=-1) return self._dynamics(x) def pi(self, z: Tensor, std: float=0.0) -> Tensor: action = torch.tanh(self._pi(z)) if std > 0: std = torch.ones_like(action) * std action += torch.randn_like(action) * std return action def V(self, z: Tensor) -> Tensor: return self._V(z).squeeze(-1) def Qs(self, z: Tensor, a: Tensor, return_min: bool=False) -> Tensor: x = torch.cat([z, a], dim=-1) if not return_min: return torch.stack([q(x).squeeze(-1) for q in self._Qs], dim=0) else: if len(self._Qs) > 2: Qs = [self._Qs[i] for i in np.random.choice(len(self._Qs), size=2)] else: Qs = self._Qs return torch.stack([q(x).squeeze(-1) for q in Qs], dim=0).min(dim=0)[0] class TDMPCObservationEncoder(nn.Module): def __init__(self, config: TDMPCConfig): super().__init__() self.config = config if 'observation.image' in config.input_shapes: self.image_enc_layers = nn.Sequential(nn.Conv2d(config.input_shapes['observation.image'][0], config.image_encoder_hidden_dim, 7, stride=2), nn.ReLU(), nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 5, stride=2), nn.ReLU(), nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 3, stride=2), nn.ReLU(), nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 3, stride=2), nn.ReLU()) dummy_batch = torch.zeros(1, *config.input_shapes['observation.image']) with torch.inference_mode(): out_shape = self.image_enc_layers(dummy_batch).shape[1:] self.image_enc_layers.extend(nn.Sequential(nn.Flatten(), nn.Linear(np.prod(out_shape), config.latent_dim), nn.LayerNorm(config.latent_dim), nn.Sigmoid())) if 'observation.state' in config.input_shapes: self.state_enc_layers = nn.Sequential(nn.Linear(config.input_shapes['observation.state'][0], config.state_encoder_hidden_dim), nn.ELU(), nn.Linear(config.state_encoder_hidden_dim, config.latent_dim), nn.LayerNorm(config.latent_dim), nn.Sigmoid()) if 'observation.environment_state' in config.input_shapes: self.env_state_enc_layers = nn.Sequential(nn.Linear(config.input_shapes['observation.environment_state'][0], config.state_encoder_hidden_dim), nn.ELU(), nn.Linear(config.state_encoder_hidden_dim, config.latent_dim), nn.LayerNorm(config.latent_dim), nn.Sigmoid()) def forward(self, obs_dict: dict[str, Tensor]) -> Tensor: feat = [] if 'observation.image' in self.config.input_shapes: feat.append(flatten_forward_unflatten(self.image_enc_layers, obs_dict['observation.image'])) if 'observation.environment_state' in self.config.input_shapes: feat.append(self.env_state_enc_layers(obs_dict['observation.environment_state'])) if 'observation.state' in self.config.input_shapes: feat.append(self.state_enc_layers(obs_dict['observation.state'])) return torch.stack(feat, dim=0).mean(0) def random_shifts_aug(x: Tensor, max_random_shift_ratio: float) -> Tensor: (b, _, h, w) = x.size() assert h == w, 'non-square images not handled yet' pad = int(round(max_random_shift_ratio * h)) x = F.pad(x, tuple([pad] * 4), 'replicate') eps = 1.0 / (h + 2 * pad) arange = torch.linspace(-1.0 + eps, 1.0 - eps, h + 2 * pad, device=x.device, dtype=torch.float32)[:h] arange = einops.repeat(arange, 'w -> h w 1', h=h) base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2) base_grid = einops.repeat(base_grid, 'h w c -> b h w c', b=b) shift = torch.randint(0, 2 * pad + 1, size=(b, 1, 1, 2), device=x.device, dtype=torch.float32) shift *= 2.0 / (h + 2 * pad) grid = base_grid + shift return F.grid_sample(x, grid, padding_mode='zeros', align_corners=False) def update_ema_parameters(ema_net: nn.Module, net: nn.Module, alpha: float): for (ema_module, module) in zip(ema_net.modules(), net.modules(), strict=True): for ((n_p_ema, p_ema), (n_p, p)) in zip(ema_module.named_parameters(recurse=False), module.named_parameters(recurse=False), strict=True): assert n_p_ema == n_p, "Parameter names don't match for EMA model update" if isinstance(p, dict): raise RuntimeError('Dict parameter not supported') if isinstance(module, nn.modules.batchnorm._BatchNorm) or not p.requires_grad: p_ema.copy_(p.to(dtype=p_ema.dtype).data) with torch.no_grad(): p_ema.mul_(alpha) p_ema.add_(p.to(dtype=p_ema.dtype).data, alpha=1 - alpha) def flatten_forward_unflatten(fn: Callable[[Tensor], Tensor], image_tensor: Tensor) -> Tensor: if image_tensor.ndim == 4: return fn(image_tensor) start_dims = image_tensor.shape[:-3] inp = torch.flatten(image_tensor, end_dim=-4) flat_out = fn(inp) return torch.reshape(flat_out, (*start_dims, *flat_out.shape[1:])) # File: lerobot-main/lerobot/common/policies/utils.py import torch from torch import nn def populate_queues(queues, batch): for key in batch: if key not in queues: continue if len(queues[key]) != queues[key].maxlen: while len(queues[key]) != queues[key].maxlen: queues[key].append(batch[key]) else: queues[key].append(batch[key]) return queues def get_device_from_parameters(module: nn.Module) -> torch.device: return next(iter(module.parameters())).device def get_dtype_from_parameters(module: nn.Module) -> torch.dtype: return next(iter(module.parameters())).dtype # File: lerobot-main/lerobot/common/policies/vqbet/configuration_vqbet.py from dataclasses import dataclass, field @dataclass class VQBeTConfig: n_obs_steps: int = 5 n_action_pred_token: int = 3 action_chunk_size: int = 5 input_shapes: dict[str, list[int]] = field(default_factory=lambda : {'observation.image': [3, 96, 96], 'observation.state': [2]}) output_shapes: dict[str, list[int]] = field(default_factory=lambda : {'action': [2]}) input_normalization_modes: dict[str, str] = field(default_factory=lambda : {'observation.image': 'mean_std', 'observation.state': 'min_max'}) output_normalization_modes: dict[str, str] = field(default_factory=lambda : {'action': 'min_max'}) vision_backbone: str = 'resnet18' crop_shape: tuple[int, int] | None = (84, 84) crop_is_random: bool = True pretrained_backbone_weights: str | None = None use_group_norm: bool = True spatial_softmax_num_keypoints: int = 32 n_vqvae_training_steps: int = 20000 vqvae_n_embed: int = 16 vqvae_embedding_dim: int = 256 vqvae_enc_hidden_dim: int = 128 gpt_block_size: int = 500 gpt_input_dim: int = 512 gpt_output_dim: int = 512 gpt_n_layer: int = 8 gpt_n_head: int = 8 gpt_hidden_dim: int = 512 dropout: float = 0.1 mlp_hidden_dim: int = 1024 offset_loss_weight: float = 10000.0 primary_code_loss_weight: float = 5.0 secondary_code_loss_weight: float = 0.5 bet_softmax_temperature: float = 0.1 sequentially_select: bool = False def __post_init__(self): if not self.vision_backbone.startswith('resnet'): raise ValueError(f'`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}.') image_keys = {k for k in self.input_shapes if k.startswith('observation.image')} if self.crop_shape is not None: for image_key in image_keys: if self.crop_shape[0] > self.input_shapes[image_key][1] or self.crop_shape[1] > self.input_shapes[image_key][2]: raise ValueError(f'`crop_shape` should fit within `input_shapes[{image_key}]`. Got {self.crop_shape} for `crop_shape` and {self.input_shapes[image_key]} for `input_shapes[{{image_key}}]`.') first_image_key = next(iter(image_keys)) for image_key in image_keys: if self.input_shapes[image_key] != self.input_shapes[first_image_key]: raise ValueError(f'`input_shapes[{image_key}]` does not match `input_shapes[{first_image_key}]`, but we expect all image shapes to match.') # File: lerobot-main/lerobot/common/policies/vqbet/modeling_vqbet.py import math import warnings from collections import deque from typing import Callable, List import einops import numpy as np import torch import torch.nn.functional as F import torchvision from huggingface_hub import PyTorchModelHubMixin from torch import Tensor, nn from torch.optim.lr_scheduler import LambdaLR from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.utils import get_device_from_parameters, populate_queues from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.common.policies.vqbet.vqbet_utils import GPT, ResidualVQ class VQBeTPolicy(nn.Module, PyTorchModelHubMixin, library_name='lerobot', repo_url='https://github.com/huggingface/lerobot', tags=['robotics', 'vqbet']): name = 'vqbet' def __init__(self, config: VQBeTConfig | None=None, dataset_stats: dict[str, dict[str, Tensor]] | None=None): super().__init__() if config is None: config = VQBeTConfig() self.config = config self.normalize_inputs = Normalize(config.input_shapes, config.input_normalization_modes, dataset_stats) self.normalize_targets = Normalize(config.output_shapes, config.output_normalization_modes, dataset_stats) self.unnormalize_outputs = Unnormalize(config.output_shapes, config.output_normalization_modes, dataset_stats) self.vqbet = VQBeTModel(config) self.expected_image_keys = [k for k in config.input_shapes if k.startswith('observation.image')] self.reset() def reset(self): self._queues = {'observation.images': deque(maxlen=self.config.n_obs_steps), 'observation.state': deque(maxlen=self.config.n_obs_steps), 'action': deque(maxlen=self.config.action_chunk_size)} @torch.no_grad def select_action(self, batch: dict[str, Tensor]) -> Tensor: batch = self.normalize_inputs(batch) batch = dict(batch) batch['observation.images'] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) self._queues = populate_queues(self._queues, batch) if not self.vqbet.action_head.vqvae_model.discretized.item(): warnings.warn('To evaluate in the environment, your VQ-BeT model should contain a pretrained Residual VQ.', stacklevel=1) if len(self._queues['action']) == 0: batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} actions = self.vqbet(batch, rollout=True)[:, :self.config.action_chunk_size] actions = self.unnormalize_outputs({'action': actions})['action'] self._queues['action'].extend(actions.transpose(0, 1)) action = self._queues['action'].popleft() return action def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = self.normalize_inputs(batch) batch = dict(batch) batch['observation.images'] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) batch = self.normalize_targets(batch) if not self.vqbet.action_head.vqvae_model.discretized.item(): (loss, n_different_codes, n_different_combinations, recon_l1_error) = self.vqbet.action_head.discretize(self.config.n_vqvae_training_steps, batch['action']) return {'loss': loss, 'n_different_codes': n_different_codes, 'n_different_combinations': n_different_combinations, 'recon_l1_error': recon_l1_error} (_, loss_dict) = self.vqbet(batch, rollout=False) return loss_dict class SpatialSoftmax(nn.Module): def __init__(self, input_shape, num_kp=None): super().__init__() assert len(input_shape) == 3 (self._in_c, self._in_h, self._in_w) = input_shape if num_kp is not None: self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1) self._out_c = num_kp else: self.nets = None self._out_c = self._in_c (pos_x, pos_y) = np.meshgrid(np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h)) pos_x = torch.from_numpy(pos_x.reshape(self._in_h * self._in_w, 1)).float() pos_y = torch.from_numpy(pos_y.reshape(self._in_h * self._in_w, 1)).float() self.register_buffer('pos_grid', torch.cat([pos_x, pos_y], dim=1)) def forward(self, features: Tensor) -> Tensor: if self.nets is not None: features = self.nets(features) features = features.reshape(-1, self._in_h * self._in_w) attention = F.softmax(features, dim=-1) expected_xy = attention @ self.pos_grid feature_keypoints = expected_xy.view(-1, self._out_c, 2) return feature_keypoints class VQBeTModel(nn.Module): def __init__(self, config: VQBeTConfig): super().__init__() self.config = config self.rgb_encoder = VQBeTRgbEncoder(config) self.num_images = len([k for k in config.input_shapes if k.startswith('observation.image')]) self.action_token = nn.Parameter(torch.randn(1, 1, self.config.gpt_input_dim)) self.state_projector = MLP(config.input_shapes['observation.state'][0], hidden_channels=[self.config.gpt_input_dim]) self.rgb_feature_projector = MLP(self.rgb_encoder.feature_dim, hidden_channels=[self.config.gpt_input_dim]) self.policy = GPT(config) self.action_head = VQBeTHead(config) num_tokens = self.config.n_action_pred_token + self.config.n_obs_steps - 1 self.register_buffer('select_target_actions_indices', torch.row_stack([torch.arange(i, i + self.config.action_chunk_size) for i in range(num_tokens)])) def forward(self, batch: dict[str, Tensor], rollout: bool) -> Tensor: assert set(batch).issuperset({'observation.state', 'observation.images'}) (batch_size, n_obs_steps) = batch['observation.state'].shape[:2] assert n_obs_steps == self.config.n_obs_steps img_features = self.rgb_encoder(einops.rearrange(batch['observation.images'], 'b s n ... -> (b s n) ...')) img_features = einops.rearrange(img_features, '(b s n) ... -> b s n ...', b=batch_size, s=n_obs_steps, n=self.num_images) rgb_tokens = self.rgb_feature_projector(img_features) input_tokens = [rgb_tokens[:, :, i] for i in range(rgb_tokens.size(2))] input_tokens.append(self.state_projector(batch['observation.state'])) input_tokens.append(einops.repeat(self.action_token, '1 1 d -> b n d', b=batch_size, n=n_obs_steps)) input_tokens = torch.stack(input_tokens, dim=2) input_tokens = einops.rearrange(input_tokens, 'b n t d -> b (n t) d') len_additional_action_token = self.config.n_action_pred_token - 1 future_action_tokens = self.action_token.repeat(batch_size, len_additional_action_token, 1) input_tokens = torch.cat([input_tokens, future_action_tokens], dim=1) features = self.policy(input_tokens) historical_act_pred_index = np.arange(0, n_obs_steps) * (len(self.config.input_shapes) + 1) + len(self.config.input_shapes) if len_additional_action_token > 0: features = torch.cat([features[:, historical_act_pred_index], features[:, -len_additional_action_token:]], dim=1) else: features = features[:, historical_act_pred_index] action_head_output = self.action_head(features) if rollout: return action_head_output['predicted_action'][:, n_obs_steps - 1, :].reshape(batch_size, self.config.action_chunk_size, -1) else: output = batch['action'][:, self.select_target_actions_indices] loss = self.action_head.loss_fn(action_head_output, output, reduction='mean') return (action_head_output, loss) class VQBeTHead(nn.Module): def __init__(self, config: VQBeTConfig): super().__init__() self.config = config self.vqvae_model = VqVae(config) if config.sequentially_select: self.map_to_cbet_preds_primary_bin = MLP(in_channels=config.gpt_output_dim, hidden_channels=[self.config.vqvae_n_embed]) self.map_to_cbet_preds_secondary_bin = MLP(in_channels=config.gpt_output_dim + self.config.vqvae_n_embed, hidden_channels=[self.config.vqvae_n_embed]) else: self.map_to_cbet_preds_bin = MLP(in_channels=config.gpt_output_dim, hidden_channels=[self.vqvae_model.vqvae_num_layers * self.config.vqvae_n_embed]) self.map_to_cbet_preds_offset = MLP(in_channels=config.gpt_output_dim, hidden_channels=[self.vqvae_model.vqvae_num_layers * self.config.vqvae_n_embed * config.action_chunk_size * config.output_shapes['action'][0]]) self._focal_loss_fn = FocalLoss(gamma=2.0) def discretize(self, n_vqvae_training_steps, actions): actions = torch.cat([actions[:, j:j + self.config.action_chunk_size, :] for j in range(actions.shape[1] + 1 - self.config.action_chunk_size)], dim=0) (loss, metric) = self.vqvae_model.vqvae_forward(actions) n_different_codes = sum([len(torch.unique(metric[2][:, i])) for i in range(self.vqvae_model.vqvae_num_layers)]) n_different_combinations = len(torch.unique(metric[2], dim=0)) recon_l1_error = metric[0].detach().cpu().item() self.vqvae_model.optimized_steps += 1 if self.vqvae_model.optimized_steps >= n_vqvae_training_steps: self.vqvae_model.discretized = torch.tensor(True) self.vqvae_model.vq_layer.freeze_codebook = torch.tensor(True) print('Finished discretizing action data!') self.vqvae_model.eval() for param in self.vqvae_model.vq_layer.parameters(): param.requires_grad = False return (loss, n_different_codes, n_different_combinations, recon_l1_error) def forward(self, x, **kwargs): (N, T, _) = x.shape x = einops.rearrange(x, 'N T WA -> (N T) WA') cbet_offsets = self.map_to_cbet_preds_offset(x) cbet_offsets = einops.rearrange(cbet_offsets, '(NT) (G C WA) -> (NT) G C WA', G=self.vqvae_model.vqvae_num_layers, C=self.config.vqvae_n_embed) if self.config.sequentially_select: cbet_primary_logits = self.map_to_cbet_preds_primary_bin(x) cbet_primary_probs = torch.softmax(cbet_primary_logits / self.config.bet_softmax_temperature, dim=-1) (NT, choices) = cbet_primary_probs.shape sampled_primary_centers = einops.rearrange(torch.multinomial(cbet_primary_probs.view(-1, choices), num_samples=1), '(NT) 1 -> NT', NT=NT) cbet_secondary_logits = self.map_to_cbet_preds_secondary_bin(torch.cat((x, F.one_hot(sampled_primary_centers, num_classes=self.config.vqvae_n_embed)), axis=1)) cbet_secondary_probs = torch.softmax(cbet_secondary_logits / self.config.bet_softmax_temperature, dim=-1) sampled_secondary_centers = einops.rearrange(torch.multinomial(cbet_secondary_probs.view(-1, choices), num_samples=1), '(NT) 1 -> NT', NT=NT) sampled_centers = torch.stack((sampled_primary_centers, sampled_secondary_centers), axis=1) cbet_logits = torch.stack([cbet_primary_logits, cbet_secondary_logits], dim=1) else: cbet_logits = self.map_to_cbet_preds_bin(x) cbet_logits = einops.rearrange(cbet_logits, '(NT) (G C) -> (NT) G C', G=self.vqvae_model.vqvae_num_layers) cbet_probs = torch.softmax(cbet_logits / self.config.bet_softmax_temperature, dim=-1) (NT, G, choices) = cbet_probs.shape sampled_centers = einops.rearrange(torch.multinomial(cbet_probs.view(-1, choices), num_samples=1), '(NT G) 1 -> NT G', NT=NT) device = get_device_from_parameters(self) indices = (torch.arange(NT, device=device).unsqueeze(1), torch.arange(self.vqvae_model.vqvae_num_layers, device=device).unsqueeze(0), sampled_centers) sampled_offsets = cbet_offsets[indices] sampled_offsets = sampled_offsets.sum(dim=1) with torch.no_grad(): return_decoder_input = self.vqvae_model.get_embeddings_from_code(sampled_centers).clone().detach() decoded_action = self.vqvae_model.get_action_from_latent(return_decoder_input).clone().detach() sampled_offsets = einops.rearrange(sampled_offsets, 'NT (W A) -> NT W A', W=self.config.action_chunk_size) predicted_action = decoded_action + sampled_offsets predicted_action = einops.rearrange(predicted_action, '(N T) W A -> N T (W A)', N=N, T=T, W=self.config.action_chunk_size) return {'cbet_logits': cbet_logits, 'predicted_action': predicted_action, 'sampled_centers': sampled_centers, 'decoded_action': decoded_action} def loss_fn(self, pred, target, **kwargs): action_seq = target predicted_action = pred['predicted_action'] sampled_centers = pred['sampled_centers'] decoded_action = pred['decoded_action'] NT = predicted_action.shape[0] * predicted_action.shape[1] cbet_logits = pred['cbet_logits'] predicted_action = einops.rearrange(predicted_action, 'N T (W A) -> (N T) W A', W=self.config.action_chunk_size) action_seq = einops.rearrange(action_seq, 'N T W A -> (N T) W A') with torch.no_grad(): (state_vq, action_bins) = self.vqvae_model.get_code(action_seq) offset_loss = F.l1_loss(action_seq, predicted_action) cbet_loss1 = self._focal_loss_fn(cbet_logits[:, 0, :], action_bins[:, 0]) cbet_loss2 = self._focal_loss_fn(cbet_logits[:, 1, :], action_bins[:, 1]) cbet_loss = cbet_loss1 * self.config.primary_code_loss_weight + cbet_loss2 * self.config.secondary_code_loss_weight equal_primary_code_rate = torch.sum((action_bins[:, 0] == sampled_centers[:, 0]).int()) / NT equal_secondary_code_rate = torch.sum((action_bins[:, 1] == sampled_centers[:, 1]).int()) / NT action_mse_error = torch.mean((action_seq - predicted_action) ** 2) vq_action_error = torch.mean(torch.abs(action_seq - decoded_action)) offset_action_error = torch.mean(torch.abs(action_seq - predicted_action)) action_error_max = torch.max(torch.abs(action_seq - predicted_action)) loss = cbet_loss + self.config.offset_loss_weight * offset_loss loss_dict = {'loss': loss, 'classification_loss': cbet_loss.detach().cpu().item(), 'offset_loss': offset_loss.detach().cpu().item(), 'equal_primary_code_rate': equal_primary_code_rate.detach().cpu().item(), 'equal_secondary_code_rate': equal_secondary_code_rate.detach().cpu().item(), 'vq_action_error': vq_action_error.detach().cpu().item(), 'offset_action_error': offset_action_error.detach().cpu().item(), 'action_error_max': action_error_max.detach().cpu().item(), 'action_mse_error': action_mse_error.detach().cpu().item()} return loss_dict class VQBeTOptimizer(torch.optim.Adam): def __init__(self, policy, cfg): vqvae_params = list(policy.vqbet.action_head.vqvae_model.encoder.parameters()) + list(policy.vqbet.action_head.vqvae_model.decoder.parameters()) + list(policy.vqbet.action_head.vqvae_model.vq_layer.parameters()) (decay_params, no_decay_params) = policy.vqbet.policy.configure_parameters() decay_params = decay_params + list(policy.vqbet.rgb_encoder.parameters()) + list(policy.vqbet.state_projector.parameters()) + list(policy.vqbet.rgb_feature_projector.parameters()) + [policy.vqbet.action_token] + list(policy.vqbet.action_head.map_to_cbet_preds_offset.parameters()) if cfg.policy.sequentially_select: decay_params = decay_params + list(policy.vqbet.action_head.map_to_cbet_preds_primary_bin.parameters()) + list(policy.vqbet.action_head.map_to_cbet_preds_secondary_bin.parameters()) else: decay_params = decay_params + list(policy.vqbet.action_head.map_to_cbet_preds_bin.parameters()) optim_groups = [{'params': decay_params, 'weight_decay': cfg.training.adam_weight_decay, 'lr': cfg.training.lr}, {'params': vqvae_params, 'weight_decay': 0.0001, 'lr': cfg.training.vqvae_lr}, {'params': no_decay_params, 'weight_decay': 0.0, 'lr': cfg.training.lr}] super().__init__(optim_groups, cfg.training.lr, cfg.training.adam_betas, cfg.training.adam_eps) class VQBeTScheduler(nn.Module): def __init__(self, optimizer, cfg): super().__init__() n_vqvae_training_steps = cfg.training.n_vqvae_training_steps num_warmup_steps = cfg.training.lr_warmup_steps num_training_steps = cfg.training.offline_steps num_cycles = 0.5 def lr_lambda(current_step): if current_step < n_vqvae_training_steps: return float(1) else: current_step = current_step - n_vqvae_training_steps if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) self.lr_scheduler = LambdaLR(optimizer, lr_lambda, -1) def step(self): self.lr_scheduler.step() class VQBeTRgbEncoder(nn.Module): def __init__(self, config: VQBeTConfig): super().__init__() if config.crop_shape is not None: self.do_crop = True self.center_crop = torchvision.transforms.CenterCrop(config.crop_shape) if config.crop_is_random: self.maybe_random_crop = torchvision.transforms.RandomCrop(config.crop_shape) else: self.maybe_random_crop = self.center_crop else: self.do_crop = False backbone_model = getattr(torchvision.models, config.vision_backbone)(weights=config.pretrained_backbone_weights) self.backbone = nn.Sequential(*list(backbone_model.children())[:-2]) if config.use_group_norm: if config.pretrained_backbone_weights: raise ValueError("You can't replace BatchNorm in a pretrained model without ruining the weights!") self.backbone = _replace_submodules(root_module=self.backbone, predicate=lambda x: isinstance(x, nn.BatchNorm2d), func=lambda x: nn.GroupNorm(num_groups=x.num_features // 16, num_channels=x.num_features)) image_keys = [k for k in config.input_shapes if k.startswith('observation.image')] assert len(image_keys) == 1 image_key = image_keys[0] dummy_input_h_w = config.crop_shape if config.crop_shape is not None else config.input_shapes[image_key][1:] dummy_input = torch.zeros(size=(1, config.input_shapes[image_key][0], *dummy_input_h_w)) with torch.inference_mode(): dummy_feature_map = self.backbone(dummy_input) feature_map_shape = tuple(dummy_feature_map.shape[1:]) self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) self.feature_dim = config.spatial_softmax_num_keypoints * 2 self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim) self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: if self.do_crop: if self.training: x = self.maybe_random_crop(x) else: x = self.center_crop(x) x = torch.flatten(self.pool(self.backbone(x)), start_dim=1) x = self.relu(self.out(x)) return x def _replace_submodules(root_module: nn.Module, predicate: Callable[[nn.Module], bool], func: Callable[[nn.Module], nn.Module]) -> nn.Module: if predicate(root_module): return func(root_module) replace_list = [k.split('.') for (k, m) in root_module.named_modules(remove_duplicate=True) if predicate(m)] for (*parents, k) in replace_list: parent_module = root_module if len(parents) > 0: parent_module = root_module.get_submodule('.'.join(parents)) if isinstance(parent_module, nn.Sequential): src_module = parent_module[int(k)] else: src_module = getattr(parent_module, k) tgt_module = func(src_module) if isinstance(parent_module, nn.Sequential): parent_module[int(k)] = tgt_module else: setattr(parent_module, k, tgt_module) assert not any((predicate(m) for (_, m) in root_module.named_modules(remove_duplicate=True))) return root_module class VqVae(nn.Module): def __init__(self, config: VQBeTConfig): super().__init__() self.config = config self.register_buffer('discretized', torch.tensor(False)) self.optimized_steps = 0 self.vqvae_num_layers = 2 self.vq_layer = ResidualVQ(dim=config.vqvae_embedding_dim, num_quantizers=self.vqvae_num_layers, codebook_size=config.vqvae_n_embed) self.encoder = MLP(in_channels=self.config.output_shapes['action'][0] * self.config.action_chunk_size, hidden_channels=[config.vqvae_enc_hidden_dim, config.vqvae_enc_hidden_dim, config.vqvae_embedding_dim]) self.decoder = MLP(in_channels=config.vqvae_embedding_dim, hidden_channels=[config.vqvae_enc_hidden_dim, config.vqvae_enc_hidden_dim, self.config.output_shapes['action'][0] * self.config.action_chunk_size]) def get_embeddings_from_code(self, encoding_indices): with torch.no_grad(): z_embed = self.vq_layer.get_codebook_vector_from_indices(encoding_indices) z_embed = z_embed.sum(dim=0) return z_embed def get_action_from_latent(self, latent): output = self.decoder(latent) if self.config.action_chunk_size == 1: return einops.rearrange(output, 'N (T A) -> N T A', A=self.config.output_shapes['action'][0]) else: return einops.rearrange(output, 'N (T A) -> N T A', A=self.config.output_shapes['action'][0]) def get_code(self, state): state = einops.rearrange(state, 'N T A -> N (T A)') with torch.no_grad(): state_rep = self.encoder(state) state_rep_shape = state_rep.shape[:-1] state_rep_flat = state_rep.view(state_rep.size(0), -1, state_rep.size(1)) (state_rep_flat, vq_code, vq_loss_state) = self.vq_layer(state_rep_flat) state_vq = state_rep_flat.view(*state_rep_shape, -1) vq_code = vq_code.view(*state_rep_shape, -1) vq_loss_state = torch.sum(vq_loss_state) return (state_vq, vq_code) def vqvae_forward(self, state): state = einops.rearrange(state, 'N T A -> N (T A)') state_rep = self.encoder(state) state_rep_shape = state_rep.shape[:-1] state_rep_flat = state_rep.view(state_rep.size(0), -1, state_rep.size(1)) (state_rep_flat, vq_code, vq_loss_state) = self.vq_layer(state_rep_flat) state_vq = state_rep_flat.view(*state_rep_shape, -1) vq_code = vq_code.view(*state_rep_shape, -1) vq_loss_state = torch.sum(vq_loss_state) dec_out = self.decoder(state_vq) encoder_loss = (state - dec_out).abs().mean() rep_loss = encoder_loss + vq_loss_state * 5 metric = (encoder_loss.clone().detach(), vq_loss_state.clone().detach(), vq_code, rep_loss.item()) return (rep_loss, metric) class FocalLoss(nn.Module): def __init__(self, gamma: float=0, size_average: bool=True): super().__init__() self.gamma = gamma self.size_average = size_average def forward(self, input, target): if len(input.shape) == 3: (N, T, _) = input.shape logpt = F.log_softmax(input, dim=-1) logpt = logpt.gather(-1, target.view(N, T, 1)).view(N, T) elif len(input.shape) == 2: logpt = F.log_softmax(input, dim=-1) logpt = logpt.gather(-1, target.view(-1, 1)).view(-1) pt = logpt.exp() loss = -1 * (1 - pt) ** self.gamma * logpt if self.size_average: return loss.mean() else: return loss.sum() class MLP(torch.nn.Sequential): def __init__(self, in_channels: int, hidden_channels: List[int]): layers = [] in_dim = in_channels for hidden_dim in hidden_channels[:-1]: layers.append(torch.nn.Linear(in_dim, hidden_dim)) layers.append(torch.nn.ReLU()) in_dim = hidden_dim layers.append(torch.nn.Linear(in_dim, hidden_channels[-1])) super().__init__(*layers) # File: lerobot-main/lerobot/common/policies/vqbet/vqbet_utils.py import math from functools import partial from math import ceil from random import randrange from typing import Callable import torch import torch.distributed as distributed import torch.nn.functional as F from einops import pack, rearrange, reduce, repeat, unpack from torch import einsum, nn from torch.cuda.amp import autocast from torch.optim import Optimizer from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig '' '' class CausalSelfAttention(nn.Module): def __init__(self, config): super().__init__() assert config.gpt_hidden_dim % config.gpt_n_head == 0 self.c_attn = nn.Linear(config.gpt_hidden_dim, 3 * config.gpt_hidden_dim) self.c_proj = nn.Linear(config.gpt_hidden_dim, config.gpt_hidden_dim) self.attn_dropout = nn.Dropout(config.dropout) self.resid_dropout = nn.Dropout(config.dropout) self.register_buffer('bias', torch.tril(torch.ones(config.gpt_block_size, config.gpt_block_size)).view(1, 1, config.gpt_block_size, config.gpt_block_size)) self.gpt_n_head = config.gpt_n_head self.gpt_hidden_dim = config.gpt_hidden_dim def forward(self, x): (B, T, C) = x.size() (q, k, v) = self.c_attn(x).split(self.gpt_hidden_dim, dim=2) k = k.view(B, T, self.gpt_n_head, C // self.gpt_n_head).transpose(1, 2) q = q.view(B, T, self.gpt_n_head, C // self.gpt_n_head).transpose(1, 2) v = v.view(B, T, self.gpt_n_head, C // self.gpt_n_head).transpose(1, 2) att = q @ k.transpose(-2, -1) * (1.0 / math.sqrt(k.size(-1))) att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf')) att = F.softmax(att, dim=-1) att = self.attn_dropout(att) y = att @ v y = y.transpose(1, 2).contiguous().view(B, T, C) y = self.resid_dropout(self.c_proj(y)) return y class Block(nn.Module): def __init__(self, config): super().__init__() self.ln_1 = nn.LayerNorm(config.gpt_hidden_dim) self.attn = CausalSelfAttention(config) self.ln_2 = nn.LayerNorm(config.gpt_hidden_dim) self.mlp = nn.Sequential(nn.Linear(config.gpt_hidden_dim, 4 * config.gpt_hidden_dim), nn.GELU(), nn.Linear(4 * config.gpt_hidden_dim, config.gpt_hidden_dim), nn.Dropout(config.dropout)) def forward(self, x): x = x + self.attn(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x class GPT(nn.Module): def __init__(self, config: VQBeTConfig): super().__init__() assert config.gpt_output_dim is not None assert config.gpt_block_size is not None self.config = config self.transformer = nn.ModuleDict({'wte': nn.Linear(config.gpt_input_dim, config.gpt_hidden_dim), 'wpe': nn.Embedding(config.gpt_block_size, config.gpt_hidden_dim), 'drop': nn.Dropout(config.dropout), 'h': nn.ModuleList([Block(config) for _ in range(config.gpt_n_layer)]), 'ln_f': nn.LayerNorm(config.gpt_hidden_dim)}) self.lm_head = nn.Linear(config.gpt_hidden_dim, config.gpt_output_dim, bias=False) self.apply(self._init_weights) for (pn, p) in self.named_parameters(): if pn.endswith('c_proj.weight'): torch.nn.init.normal_(p, mean=0.0, std=0.02 / math.sqrt(2 * config.gpt_n_layer)) n_params = sum((p.numel() for p in self.parameters())) print('number of parameters: {:.2f}M'.format(n_params / 1000000.0)) def forward(self, input, targets=None): device = input.device (b, t, d) = input.size() assert t <= self.config.gpt_block_size, f'Cannot forward sequence of length {t}, block size is only {self.config.gpt_block_size}' pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) tok_emb = self.transformer.wte(input) pos_emb = self.transformer.wpe(pos) x = self.transformer.drop(tok_emb + pos_emb) for block in self.transformer.h: x = block(x) x = self.transformer.ln_f(x) logits = self.lm_head(x) return logits def _init_weights(self, module): if isinstance(module, nn.Linear): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) if module.bias is not None: torch.nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) elif isinstance(module, nn.LayerNorm): torch.nn.init.zeros_(module.bias) torch.nn.init.ones_(module.weight) def crop_block_size(self, gpt_block_size): assert gpt_block_size <= self.config.gpt_block_size self.config.gpt_block_size = gpt_block_size self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:gpt_block_size]) for block in self.transformer.h: block.attn.bias = block.attn.bias[:, :, :gpt_block_size, :gpt_block_size] def configure_parameters(self): decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear,) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) for (mn, m) in self.named_modules(): for (pn, _p) in m.named_parameters(): fpn = '{}.{}'.format(mn, pn) if mn else pn if pn.endswith('bias'): no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): no_decay.add(fpn) param_dict = dict(self.named_parameters()) inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, 'parameters {} made it into both decay/no_decay sets!'.format(str(inter_params)) assert len(param_dict.keys() - union_params) == 0, 'parameters {} were not separated into either decay/no_decay set!'.format(str(param_dict.keys() - union_params)) decay = [param_dict[pn] for pn in sorted(decay)] no_decay = [param_dict[pn] for pn in sorted(no_decay)] return (decay, no_decay) '' class ResidualVQ(nn.Module): def __init__(self, *, dim, num_quantizers, codebook_dim=None, shared_codebook=False, heads=1, quantize_dropout=False, quantize_dropout_cutoff_index=0, quantize_dropout_multiple_of=1, accept_image_fmap=False, **kwargs): super().__init__() assert heads == 1, 'residual vq is not compatible with multi-headed codes' codebook_dim = codebook_dim if codebook_dim is not None else dim codebook_input_dim = codebook_dim * heads requires_projection = codebook_input_dim != dim self.project_in = nn.Linear(dim, codebook_input_dim) if requires_projection else nn.Identity() self.project_out = nn.Linear(codebook_input_dim, dim) if requires_projection else nn.Identity() self.num_quantizers = num_quantizers self.accept_image_fmap = accept_image_fmap self.layers = nn.ModuleList([VectorQuantize(dim=codebook_dim, codebook_dim=codebook_dim, accept_image_fmap=accept_image_fmap, **kwargs) for _ in range(num_quantizers)]) self.quantize_dropout = quantize_dropout and num_quantizers > 1 assert quantize_dropout_cutoff_index >= 0 self.register_buffer('freeze_codebook', torch.tensor(False)) self.quantize_dropout_cutoff_index = quantize_dropout_cutoff_index self.quantize_dropout_multiple_of = quantize_dropout_multiple_of if not shared_codebook: return (first_vq, *rest_vq) = self.layers codebook = first_vq._codebook for vq in rest_vq: vq._codebook = codebook @property def codebooks(self): codebooks = [layer._codebook.embed for layer in self.layers] codebooks = torch.stack(codebooks, dim=0) codebooks = rearrange(codebooks, 'q 1 c d -> q c d') return codebooks def get_codebook_vector_from_indices(self, indices): (batch, quantize_dim) = (indices.shape[0], indices.shape[-1]) (indices, ps) = pack([indices], 'b * q') if quantize_dim < self.num_quantizers: assert self.quantize_dropout > 0.0, 'quantize dropout must be greater than 0 if you wish to reconstruct from a signal with less fine quantizations' indices = F.pad(indices, (0, self.num_quantizers - quantize_dim), value=-1) codebooks = repeat(self.codebooks, 'q c d -> q b c d', b=batch) gather_indices = repeat(indices, 'b n q -> q b n d', d=codebooks.shape[-1]) mask = gather_indices == -1.0 gather_indices = gather_indices.masked_fill(mask, 0) all_codes = codebooks.gather(2, gather_indices) all_codes = all_codes.masked_fill(mask, 0.0) (all_codes,) = unpack(all_codes, ps, 'q b * d') return all_codes def forward(self, x, indices=None, return_all_codes=False, sample_codebook_temp=None): (num_quant, quant_dropout_multiple_of, return_loss, device) = (self.num_quantizers, self.quantize_dropout_multiple_of, indices is not None, x.device) x = self.project_in(x) assert not (self.accept_image_fmap and indices is not None) quantized_out = 0.0 residual = x all_losses = [] all_indices = [] if return_loss: assert not torch.any(indices == -1), 'some of the residual vq indices were dropped out. please use indices derived when the module is in eval mode to derive cross entropy loss' ce_losses = [] should_quantize_dropout = self.training and self.quantize_dropout and (not return_loss) if should_quantize_dropout: rand_quantize_dropout_index = randrange(self.quantize_dropout_cutoff_index, num_quant) if quant_dropout_multiple_of != 1: rand_quantize_dropout_index = ceil((rand_quantize_dropout_index + 1) / quant_dropout_multiple_of) * quant_dropout_multiple_of - 1 null_indices_shape = (x.shape[0], *x.shape[-2:]) if self.accept_image_fmap else tuple(x.shape[:2]) null_indices = torch.full(null_indices_shape, -1.0, device=device, dtype=torch.long) null_loss = torch.full((1,), 0.0, device=device, dtype=x.dtype) for (quantizer_index, layer) in enumerate(self.layers): if should_quantize_dropout and quantizer_index > rand_quantize_dropout_index: all_indices.append(null_indices) all_losses.append(null_loss) continue layer_indices = None if return_loss: layer_indices = indices[..., quantizer_index] (quantized, *rest) = layer(residual, indices=layer_indices, sample_codebook_temp=sample_codebook_temp, freeze_codebook=self.freeze_codebook) residual = residual - quantized.detach() quantized_out = quantized_out + quantized if return_loss: ce_loss = rest[0] ce_losses.append(ce_loss) continue (embed_indices, loss) = rest all_indices.append(embed_indices) all_losses.append(loss) quantized_out = self.project_out(quantized_out) if return_loss: return (quantized_out, sum(ce_losses)) (all_losses, all_indices) = map(partial(torch.stack, dim=-1), (all_losses, all_indices)) ret = (quantized_out, all_indices, all_losses) if return_all_codes: all_codes = self.get_codebook_vector_from_indices(all_indices) ret = (*ret, all_codes) return ret class VectorQuantize(nn.Module): def __init__(self, dim, codebook_size, codebook_dim=None, heads=1, separate_codebook_per_head=False, decay=0.8, eps=1e-05, kmeans_init=False, kmeans_iters=10, sync_kmeans=True, threshold_ema_dead_code=0, channel_last=True, accept_image_fmap=False, commitment_weight=1.0, commitment_use_cross_entropy_loss=False, orthogonal_reg_weight=0.0, orthogonal_reg_active_codes_only=False, orthogonal_reg_max_codes=None, stochastic_sample_codes=False, sample_codebook_temp=1.0, straight_through=False, reinmax=False, sync_codebook=None, sync_affine_param=False, ema_update=True, learnable_codebook=False, in_place_codebook_optimizer: Callable[..., Optimizer]=None, affine_param=False, affine_param_batch_decay=0.99, affine_param_codebook_decay=0.9, sync_update_v=0.0): super().__init__() self.dim = dim self.heads = heads self.separate_codebook_per_head = separate_codebook_per_head codebook_dim = codebook_dim if codebook_dim is not None else dim codebook_input_dim = codebook_dim * heads requires_projection = codebook_input_dim != dim self.project_in = nn.Linear(dim, codebook_input_dim) if requires_projection else nn.Identity() self.project_out = nn.Linear(codebook_input_dim, dim) if requires_projection else nn.Identity() self.eps = eps self.commitment_weight = commitment_weight self.commitment_use_cross_entropy_loss = commitment_use_cross_entropy_loss self.learnable_codebook = learnable_codebook has_codebook_orthogonal_loss = orthogonal_reg_weight > 0 self.has_codebook_orthogonal_loss = has_codebook_orthogonal_loss self.orthogonal_reg_weight = orthogonal_reg_weight self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only self.orthogonal_reg_max_codes = orthogonal_reg_max_codes assert not (ema_update and learnable_codebook), 'learnable codebook not compatible with EMA update' assert 0 <= sync_update_v <= 1.0 assert not (sync_update_v > 0.0 and (not learnable_codebook)), 'learnable codebook must be turned on' self.sync_update_v = sync_update_v gumbel_sample_fn = partial(gumbel_sample, stochastic=stochastic_sample_codes, reinmax=reinmax, straight_through=straight_through) if sync_codebook is None: sync_codebook = distributed.is_initialized() and distributed.get_world_size() > 1 codebook_kwargs = {'dim': codebook_dim, 'num_codebooks': heads if separate_codebook_per_head else 1, 'codebook_size': codebook_size, 'kmeans_init': kmeans_init, 'kmeans_iters': kmeans_iters, 'sync_kmeans': sync_kmeans, 'decay': decay, 'eps': eps, 'threshold_ema_dead_code': threshold_ema_dead_code, 'use_ddp': sync_codebook, 'learnable_codebook': has_codebook_orthogonal_loss or learnable_codebook, 'sample_codebook_temp': sample_codebook_temp, 'gumbel_sample': gumbel_sample_fn, 'ema_update': ema_update} if affine_param: codebook_kwargs = dict(**codebook_kwargs, affine_param=True, sync_affine_param=sync_affine_param, affine_param_batch_decay=affine_param_batch_decay, affine_param_codebook_decay=affine_param_codebook_decay) self._codebook = EuclideanCodebook(**codebook_kwargs) self.in_place_codebook_optimizer = in_place_codebook_optimizer(self._codebook.parameters()) if in_place_codebook_optimizer is not None else None self.codebook_size = codebook_size self.accept_image_fmap = accept_image_fmap self.channel_last = channel_last @property def codebook(self): codebook = self._codebook.embed if self.separate_codebook_per_head: return codebook return rearrange(codebook, '1 ... -> ...') @codebook.setter def codebook(self, codes): if not self.separate_codebook_per_head: codes = rearrange(codes, '... -> 1 ...') self._codebook.embed.copy_(codes) def get_codebook_vector_from_indices(self, indices): codebook = self.codebook is_multiheaded = codebook.ndim > 2 if not is_multiheaded: codes = codebook[indices] return rearrange(codes, '... h d -> ... (h d)') (indices, ps) = pack_one(indices, 'b * h') indices = rearrange(indices, 'b n h -> b h n') indices = repeat(indices, 'b h n -> b h n d', d=codebook.shape[-1]) codebook = repeat(codebook, 'h n d -> b h n d', b=indices.shape[0]) codes = codebook.gather(2, indices) codes = rearrange(codes, 'b h n d -> b n (h d)') codes = unpack_one(codes, ps, 'b * d') return codes def forward(self, x, indices=None, mask=None, sample_codebook_temp=None, freeze_codebook=False): orig_input = x only_one = x.ndim == 2 if only_one: assert mask is None x = rearrange(x, 'b d -> b 1 d') (shape, device, heads, is_multiheaded, _codebook_size, return_loss) = (x.shape, x.device, self.heads, self.heads > 1, self.codebook_size, indices is not None) need_transpose = not self.channel_last and (not self.accept_image_fmap) should_inplace_optimize = self.in_place_codebook_optimizer is not None if self.accept_image_fmap: (height, width) = x.shape[-2:] x = rearrange(x, 'b c h w -> b (h w) c') if need_transpose: x = rearrange(x, 'b d n -> b n d') x = self.project_in(x) if is_multiheaded: ein_rhs_eq = 'h b n d' if self.separate_codebook_per_head else '1 (b h) n d' x = rearrange(x, f'b n (h d) -> {ein_rhs_eq}', h=heads) x = self._codebook.transform_input(x) codebook_forward_kwargs = {'sample_codebook_temp': sample_codebook_temp, 'mask': mask, 'freeze_codebook': freeze_codebook} (quantize, embed_ind, distances) = self._codebook(x, **codebook_forward_kwargs) if should_inplace_optimize and self.training and (not freeze_codebook): if mask is not None: loss = F.mse_loss(quantize, x.detach(), reduction='none') loss_mask = mask if is_multiheaded: loss_mask = repeat(mask, 'b n -> c (b h) n', c=loss.shape[0], h=loss.shape[1] // mask.shape[0]) loss = loss[loss_mask].mean() else: loss = F.mse_loss(quantize, x.detach()) loss.backward() self.in_place_codebook_optimizer.step() self.in_place_codebook_optimizer.zero_grad() (quantize, embed_ind, distances) = self._codebook(x, **codebook_forward_kwargs) if self.training: maybe_detach = torch.detach if not self.learnable_codebook or freeze_codebook else identity commit_quantize = maybe_detach(quantize) quantize = x + (quantize - x).detach() if self.sync_update_v > 0.0: quantize = quantize + self.sync_update_v * (quantize - quantize.detach()) def calculate_ce_loss(codes): if not is_multiheaded: dist_einops_eq = '1 b n l -> b l n' elif self.separate_codebook_per_head: dist_einops_eq = 'c b n l -> b l n c' else: dist_einops_eq = '1 (b h) n l -> b l n h' ce_loss = F.cross_entropy(rearrange(distances, dist_einops_eq, b=shape[0]), codes, ignore_index=-1) return ce_loss if return_loss: return (quantize, calculate_ce_loss(indices)) if is_multiheaded: if self.separate_codebook_per_head: embed_ind = rearrange(embed_ind, 'h b n -> b n h', h=heads) else: embed_ind = rearrange(embed_ind, '1 (b h) n -> b n h', h=heads) if self.accept_image_fmap: embed_ind = rearrange(embed_ind, 'b (h w) ... -> b h w ...', h=height, w=width) if only_one: embed_ind = rearrange(embed_ind, 'b 1 -> b') loss = torch.tensor([0.0], device=device, requires_grad=self.training) if self.training: if self.commitment_weight > 0: if self.commitment_use_cross_entropy_loss: if mask is not None: ce_loss_mask = mask if is_multiheaded: ce_loss_mask = repeat(ce_loss_mask, 'b n -> b n h', h=heads) embed_ind.masked_fill_(~ce_loss_mask, -1) commit_loss = calculate_ce_loss(embed_ind) elif mask is not None: commit_loss = F.mse_loss(commit_quantize, x, reduction='none') loss_mask = mask if is_multiheaded: loss_mask = repeat(loss_mask, 'b n -> c (b h) n', c=commit_loss.shape[0], h=commit_loss.shape[1] // mask.shape[0]) commit_loss = commit_loss[loss_mask].mean() else: commit_loss = F.mse_loss(commit_quantize, x) loss = loss + commit_loss * self.commitment_weight if self.has_codebook_orthogonal_loss: codebook = self._codebook.embed if self.orthogonal_reg_active_codes_only: assert not (is_multiheaded and self.separate_codebook_per_head), 'orthogonal regularization for only active codes not compatible with multi-headed with separate codebooks yet' unique_code_ids = torch.unique(embed_ind) codebook = codebook[:, unique_code_ids] num_codes = codebook.shape[-2] if self.orthogonal_reg_max_codes is not None and num_codes > self.orthogonal_reg_max_codes: rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes] codebook = codebook[:, rand_ids] orthogonal_reg_loss = orthogonal_loss_fn(codebook) loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight if is_multiheaded: if self.separate_codebook_per_head: quantize = rearrange(quantize, 'h b n d -> b n (h d)', h=heads) else: quantize = rearrange(quantize, '1 (b h) n d -> b n (h d)', h=heads) quantize = self.project_out(quantize) if need_transpose: quantize = rearrange(quantize, 'b n d -> b d n') if self.accept_image_fmap: quantize = rearrange(quantize, 'b (h w) c -> b c h w', h=height, w=width) if only_one: quantize = rearrange(quantize, 'b 1 d -> b d') if mask is not None: quantize = torch.where(rearrange(mask, '... -> ... 1'), quantize, orig_input) return (quantize, embed_ind, loss) def noop(*args, **kwargs): pass def identity(t): return t def cdist(x, y): x2 = reduce(x ** 2, 'b n d -> b n', 'sum') y2 = reduce(y ** 2, 'b n d -> b n', 'sum') xy = einsum('b i d, b j d -> b i j', x, y) * -2 return (rearrange(x2, 'b i -> b i 1') + rearrange(y2, 'b j -> b 1 j') + xy).sqrt() def log(t, eps=1e-20): return torch.log(t.clamp(min=eps)) def ema_inplace(old, new, decay): is_mps = str(old.device).startswith('mps:') if not is_mps: old.lerp_(new, 1 - decay) else: old.mul_(decay).add_(new * (1 - decay)) def pack_one(t, pattern): return pack([t], pattern) def unpack_one(t, ps, pattern): return unpack(t, ps, pattern)[0] def uniform_init(*shape): t = torch.empty(shape) nn.init.kaiming_uniform_(t) return t def gumbel_noise(t): noise = torch.zeros_like(t).uniform_(0, 1) return -log(-log(noise)) def gumbel_sample(logits, temperature=1.0, stochastic=False, straight_through=False, reinmax=False, dim=-1, training=True): (dtype, size) = (logits.dtype, logits.shape[dim]) if training and stochastic and (temperature > 0): sampling_logits = logits / temperature + gumbel_noise(logits) else: sampling_logits = logits ind = sampling_logits.argmax(dim=dim) one_hot = F.one_hot(ind, size).type(dtype) assert not (reinmax and (not straight_through)), 'reinmax can only be turned on if using straight through gumbel softmax' if not straight_through or temperature <= 0.0 or (not training): return (ind, one_hot) if reinmax: π0 = logits.softmax(dim=dim) π1 = (one_hot + (logits / temperature).softmax(dim=dim)) / 2 π1 = ((log(π1) - logits).detach() + logits).softmax(dim=1) π2 = 2 * π1 - 0.5 * π0 one_hot = π2 - π2.detach() + one_hot else: π1 = (logits / temperature).softmax(dim=dim) one_hot = one_hot + π1 - π1.detach() return (ind, one_hot) def laplace_smoothing(x, n_categories, eps=1e-05, dim=-1): denom = x.sum(dim=dim, keepdim=True) return (x + eps) / (denom + n_categories * eps) def sample_vectors(samples, num): (num_samples, device) = (samples.shape[0], samples.device) if num_samples >= num: indices = torch.randperm(num_samples, device=device)[:num] else: indices = torch.randint(0, num_samples, (num,), device=device) return samples[indices] def batched_sample_vectors(samples, num): return torch.stack([sample_vectors(sample, num) for sample in samples.unbind(dim=0)], dim=0) def pad_shape(shape, size, dim=0): return [size if i == dim else s for (i, s) in enumerate(shape)] def sample_multinomial(total_count, probs): device = probs.device probs = probs.cpu() total_count = probs.new_full((), total_count) remainder = probs.new_ones(()) sample = torch.empty_like(probs, dtype=torch.long) for (i, p) in enumerate(probs): s = torch.binomial(total_count, p / remainder) sample[i] = s total_count -= s remainder -= p return sample.to(device) def all_gather_sizes(x, dim): size = torch.tensor(x.shape[dim], dtype=torch.long, device=x.device) all_sizes = [torch.empty_like(size) for _ in range(distributed.get_world_size())] distributed.all_gather(all_sizes, size) return torch.stack(all_sizes) def all_gather_variably_sized(x, sizes, dim=0): rank = distributed.get_rank() all_x = [] for (i, size) in enumerate(sizes): t = x if i == rank else x.new_empty(pad_shape(x.shape, size, dim)) distributed.broadcast(t, src=i, async_op=True) all_x.append(t) distributed.barrier() return all_x def sample_vectors_distributed(local_samples, num): local_samples = rearrange(local_samples, '1 ... -> ...') rank = distributed.get_rank() all_num_samples = all_gather_sizes(local_samples, dim=0) if rank == 0: samples_per_rank = sample_multinomial(num, all_num_samples / all_num_samples.sum()) else: samples_per_rank = torch.empty_like(all_num_samples) distributed.broadcast(samples_per_rank, src=0) samples_per_rank = samples_per_rank.tolist() local_samples = sample_vectors(local_samples, samples_per_rank[rank]) all_samples = all_gather_variably_sized(local_samples, samples_per_rank, dim=0) out = torch.cat(all_samples, dim=0) return rearrange(out, '... -> 1 ...') def batched_bincount(x, *, minlength): (batch, dtype, device) = (x.shape[0], x.dtype, x.device) target = torch.zeros(batch, minlength, dtype=dtype, device=device) values = torch.ones_like(x) target.scatter_add_(-1, x, values) return target def kmeans(samples, num_clusters, num_iters=10, sample_fn=batched_sample_vectors, all_reduce_fn=noop): (num_codebooks, dim, dtype, _device) = (samples.shape[0], samples.shape[-1], samples.dtype, samples.device) means = sample_fn(samples, num_clusters) for _ in range(num_iters): dists = -torch.cdist(samples, means, p=2) buckets = torch.argmax(dists, dim=-1) bins = batched_bincount(buckets, minlength=num_clusters) all_reduce_fn(bins) zero_mask = bins == 0 bins_min_clamped = bins.masked_fill(zero_mask, 1) new_means = buckets.new_zeros(num_codebooks, num_clusters, dim, dtype=dtype) new_means.scatter_add_(1, repeat(buckets, 'h n -> h n d', d=dim), samples) new_means = new_means / rearrange(bins_min_clamped, '... -> ... 1') all_reduce_fn(new_means) means = torch.where(rearrange(zero_mask, '... -> ... 1'), means, new_means) return (means, bins) def batched_embedding(indices, embeds): (batch, dim) = (indices.shape[1], embeds.shape[-1]) indices = repeat(indices, 'h b n -> h b n d', d=dim) embeds = repeat(embeds, 'h c d -> h b c d', b=batch) return embeds.gather(2, indices) def orthogonal_loss_fn(t): (h, n) = t.shape[:2] normed_codes = F.normalize(t, p=2, dim=-1) cosine_sim = einsum('h i d, h j d -> h i j', normed_codes, normed_codes) return (cosine_sim ** 2).sum() / (h * n ** 2) - 1 / n class EuclideanCodebook(nn.Module): def __init__(self, dim, codebook_size, num_codebooks=1, kmeans_init=False, kmeans_iters=10, sync_kmeans=True, decay=0.8, eps=1e-05, threshold_ema_dead_code=2, reset_cluster_size=None, use_ddp=False, learnable_codebook=False, gumbel_sample=gumbel_sample, sample_codebook_temp=1.0, ema_update=True, affine_param=False, sync_affine_param=False, affine_param_batch_decay=0.99, affine_param_codebook_decay=0.9): super().__init__() self.transform_input = identity self.decay = decay self.ema_update = ema_update init_fn = uniform_init if not kmeans_init else torch.zeros embed = init_fn(num_codebooks, codebook_size, dim) self.codebook_size = codebook_size self.num_codebooks = num_codebooks self.kmeans_iters = kmeans_iters self.eps = eps self.threshold_ema_dead_code = threshold_ema_dead_code self.reset_cluster_size = reset_cluster_size if reset_cluster_size is not None else threshold_ema_dead_code assert callable(gumbel_sample) self.gumbel_sample = gumbel_sample self.sample_codebook_temp = sample_codebook_temp assert not (use_ddp and num_codebooks > 1 and kmeans_init), 'kmeans init is not compatible with multiple codebooks in distributed environment for now' self.sample_fn = sample_vectors_distributed if use_ddp and sync_kmeans else batched_sample_vectors self.kmeans_all_reduce_fn = distributed.all_reduce if use_ddp and sync_kmeans else noop self.all_reduce_fn = distributed.all_reduce if use_ddp else noop self.register_buffer('initted', torch.Tensor([not kmeans_init])) self.register_buffer('cluster_size', torch.zeros(num_codebooks, codebook_size)) self.register_buffer('embed_avg', embed.clone()) self.learnable_codebook = learnable_codebook if learnable_codebook: self.embed = nn.Parameter(embed) else: self.register_buffer('embed', embed) self.affine_param = affine_param self.sync_affine_param = sync_affine_param if not affine_param: return self.affine_param_batch_decay = affine_param_batch_decay self.affine_param_codebook_decay = affine_param_codebook_decay self.register_buffer('batch_mean', None) self.register_buffer('batch_variance', None) self.register_buffer('codebook_mean_needs_init', torch.Tensor([True])) self.register_buffer('codebook_mean', torch.empty(num_codebooks, 1, dim)) self.register_buffer('codebook_variance_needs_init', torch.Tensor([True])) self.register_buffer('codebook_variance', torch.empty(num_codebooks, 1, dim)) @torch.jit.ignore def init_embed_(self, data, mask=None): if self.initted: return if mask is not None: c = data.shape[0] data = rearrange(data[mask], '(c n) d -> c n d', c=c) (embed, cluster_size) = kmeans(data, self.codebook_size, self.kmeans_iters, sample_fn=self.sample_fn, all_reduce_fn=self.kmeans_all_reduce_fn) embed_sum = embed * rearrange(cluster_size, '... -> ... 1') self.embed.data.copy_(embed) self.embed_avg.data.copy_(embed_sum) self.cluster_size.data.copy_(cluster_size) self.initted.data.copy_(torch.Tensor([True])) @torch.jit.ignore def update_with_decay(self, buffer_name, new_value, decay): old_value = getattr(self, buffer_name) needs_init = getattr(self, buffer_name + '_needs_init', False) if needs_init: self.register_buffer(buffer_name + '_needs_init', torch.Tensor([False])) if not old_value is not None or needs_init: self.register_buffer(buffer_name, new_value.detach()) return value = old_value * decay + new_value.detach() * (1 - decay) self.register_buffer(buffer_name, value) @torch.jit.ignore def update_affine(self, data, embed, mask=None): assert self.affine_param var_fn = partial(torch.var, unbiased=False) embed = rearrange(embed, 'h ... d -> h (...) d') if self.training: self.update_with_decay('codebook_mean', reduce(embed, 'h n d -> h 1 d', 'mean'), self.affine_param_codebook_decay) self.update_with_decay('codebook_variance', reduce(embed, 'h n d -> h 1 d', var_fn), self.affine_param_codebook_decay) data = rearrange(data, 'h ... d -> h (...) d') if mask is not None: c = data.shape[0] data = rearrange(data[mask], '(c n) d -> c n d', c=c) if not self.sync_affine_param: self.update_with_decay('batch_mean', reduce(data, 'h n d -> h 1 d', 'mean'), self.affine_param_batch_decay) self.update_with_decay('batch_variance', reduce(data, 'h n d -> h 1 d', var_fn), self.affine_param_batch_decay) return (num_vectors, device, dtype) = (data.shape[-2], data.device, data.dtype) num_vectors = torch.tensor([num_vectors], device=device, dtype=dtype) distributed.all_reduce(num_vectors) batch_sum = reduce(data, 'h n d -> h 1 d', 'sum') distributed.all_reduce(batch_sum) batch_mean = batch_sum / num_vectors self.update_with_decay('batch_mean', batch_mean, self.affine_param_batch_decay) variance_numer = reduce((data - batch_mean) ** 2, 'h n d -> h 1 d', 'sum') distributed.all_reduce(variance_numer) batch_variance = variance_numer / num_vectors self.update_with_decay('batch_variance', batch_variance, self.affine_param_batch_decay) def replace(self, batch_samples, batch_mask): for (ind, (samples, mask)) in enumerate(zip(batch_samples.unbind(dim=0), batch_mask.unbind(dim=0), strict=False)): if not torch.any(mask): continue sampled = self.sample_fn(rearrange(samples, '... -> 1 ...'), mask.sum().item()) sampled = rearrange(sampled, '1 ... -> ...') self.embed.data[ind][mask] = sampled self.cluster_size.data[ind][mask] = self.reset_cluster_size self.embed_avg.data[ind][mask] = sampled * self.reset_cluster_size def expire_codes_(self, batch_samples): if self.threshold_ema_dead_code == 0: return expired_codes = self.cluster_size < self.threshold_ema_dead_code if not torch.any(expired_codes): return batch_samples = rearrange(batch_samples, 'h ... d -> h (...) d') self.replace(batch_samples, batch_mask=expired_codes) @autocast(enabled=False) def forward(self, x, sample_codebook_temp=None, mask=None, freeze_codebook=False): needs_codebook_dim = x.ndim < 4 sample_codebook_temp = sample_codebook_temp if sample_codebook_temp is not None else self.sample_codebook_temp x = x.float() if needs_codebook_dim: x = rearrange(x, '... -> 1 ...') (flatten, ps) = pack_one(x, 'h * d') if mask is not None: mask = repeat(mask, 'b n -> c (b h n)', c=flatten.shape[0], h=flatten.shape[-2] // (mask.shape[0] * mask.shape[1])) self.init_embed_(flatten, mask=mask) if self.affine_param: self.update_affine(flatten, self.embed, mask=mask) embed = self.embed if self.learnable_codebook else self.embed.detach() if self.affine_param: codebook_std = self.codebook_variance.clamp(min=1e-05).sqrt() batch_std = self.batch_variance.clamp(min=1e-05).sqrt() embed = (embed - self.codebook_mean) * (batch_std / codebook_std) + self.batch_mean dist = -cdist(flatten, embed) (embed_ind, embed_onehot) = self.gumbel_sample(dist, dim=-1, temperature=sample_codebook_temp, training=self.training) embed_ind = unpack_one(embed_ind, ps, 'h *') if self.training: unpacked_onehot = unpack_one(embed_onehot, ps, 'h * c') quantize = einsum('h b n c, h c d -> h b n d', unpacked_onehot, embed) else: quantize = batched_embedding(embed_ind, embed) if self.training and self.ema_update and (not freeze_codebook): if self.affine_param: flatten = (flatten - self.batch_mean) * (codebook_std / batch_std) + self.codebook_mean if mask is not None: embed_onehot[~mask] = 0.0 cluster_size = embed_onehot.sum(dim=1) self.all_reduce_fn(cluster_size) ema_inplace(self.cluster_size.data, cluster_size, self.decay) embed_sum = einsum('h n d, h n c -> h c d', flatten, embed_onehot) self.all_reduce_fn(embed_sum.contiguous()) ema_inplace(self.embed_avg.data, embed_sum, self.decay) cluster_size = laplace_smoothing(self.cluster_size, self.codebook_size, self.eps) * self.cluster_size.sum(dim=-1, keepdim=True) embed_normalized = self.embed_avg / rearrange(cluster_size, '... -> ... 1') self.embed.data.copy_(embed_normalized) self.expire_codes_(x) if needs_codebook_dim: (quantize, embed_ind) = tuple((rearrange(t, '1 ... -> ...') for t in (quantize, embed_ind))) dist = unpack_one(dist, ps, 'h * d') return (quantize, embed_ind, dist) # File: lerobot-main/lerobot/common/robot_devices/cameras/intelrealsense.py """""" import argparse import concurrent.futures import logging import shutil import threading import time import traceback from dataclasses import dataclass, replace from pathlib import Path from threading import Thread import cv2 import numpy as np import pyrealsense2 as rs from PIL import Image from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError from lerobot.common.utils.utils import capture_timestamp_utc from lerobot.scripts.control_robot import busy_wait SERIAL_NUMBER_INDEX = 1 def find_camera_indices(raise_when_empty=True) -> list[int]: camera_ids = [] for device in rs.context().query_devices(): serial_number = int(device.get_info(rs.camera_info(SERIAL_NUMBER_INDEX))) camera_ids.append(serial_number) if raise_when_empty and len(camera_ids) == 0: raise OSError('Not a single camera was detected. Try re-plugging, or re-installing `librealsense` and its python wrapper `pyrealsense2`, or updating the firmware.') return camera_ids def save_image(img_array, camera_idx, frame_index, images_dir): try: img = Image.fromarray(img_array) path = images_dir / f'camera_{camera_idx}_frame_{frame_index:06d}.png' path.parent.mkdir(parents=True, exist_ok=True) img.save(str(path), quality=100) logging.info(f'Saved image: {path}') except Exception as e: logging.error(f'Failed to save image for camera {camera_idx} frame {frame_index}: {e}') def save_images_from_cameras(images_dir: Path, camera_ids: list[int] | None=None, fps=None, width=None, height=None, record_time_s=2): if camera_ids is None: camera_ids = find_camera_indices() print('Connecting cameras') cameras = [] for cam_idx in camera_ids: camera = IntelRealSenseCamera(cam_idx, fps=fps, width=width, height=height) camera.connect() print(f'IntelRealSenseCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})') cameras.append(camera) images_dir = Path(images_dir) if images_dir.exists(): shutil.rmtree(images_dir) images_dir.mkdir(parents=True, exist_ok=True) print(f'Saving images to {images_dir}') frame_index = 0 start_time = time.perf_counter() try: with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: while True: now = time.perf_counter() for camera in cameras: image = camera.read() if fps is None else camera.async_read() if image is None: print('No Frame') bgr_converted_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) executor.submit(save_image, bgr_converted_image, camera.camera_index, frame_index, images_dir) if fps is not None: dt_s = time.perf_counter() - now busy_wait(1 / fps - dt_s) if time.perf_counter() - start_time > record_time_s: break print(f'Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}') frame_index += 1 finally: print(f'Images have been saved to {images_dir}') for camera in cameras: camera.disconnect() @dataclass class IntelRealSenseCameraConfig: fps: int | None = None width: int | None = None height: int | None = None color_mode: str = 'rgb' use_depth: bool = False force_hardware_reset: bool = True def __post_init__(self): if self.color_mode not in ['rgb', 'bgr']: raise ValueError(f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided.") if (self.fps or self.width or self.height) and (not (self.fps and self.width and self.height)): raise ValueError(f'For `fps`, `width` and `height`, either all of them need to be set, or none of them, but self.fps={self.fps!r}, self.width={self.width!r}, self.height={self.height!r} were provided.') class IntelRealSenseCamera: def __init__(self, camera_index: int, config: IntelRealSenseCameraConfig | None=None, **kwargs): if config is None: config = IntelRealSenseCameraConfig() config = replace(config, **kwargs) self.camera_index = camera_index self.fps = config.fps self.width = config.width self.height = config.height self.color_mode = config.color_mode self.use_depth = config.use_depth self.force_hardware_reset = config.force_hardware_reset self.camera = None self.is_connected = False self.thread = None self.stop_event = None self.color_image = None self.depth_map = None self.logs = {} def connect(self): if self.is_connected: raise RobotDeviceAlreadyConnectedError(f'IntelRealSenseCamera({self.camera_index}) is already connected.') config = rs.config() config.enable_device(str(self.camera_index)) if self.fps and self.width and self.height: config.enable_stream(rs.stream.color, self.width, self.height, rs.format.rgb8, self.fps) else: config.enable_stream(rs.stream.color) if self.use_depth: if self.fps and self.width and self.height: config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.fps) else: config.enable_stream(rs.stream.depth) self.camera = rs.pipeline() try: self.camera.start(config) is_camera_open = True except RuntimeError: is_camera_open = False traceback.print_exc() if not is_camera_open: available_cam_ids = find_camera_indices() if self.camera_index not in available_cam_ids: raise ValueError(f'`camera_index` is expected to be one of these available cameras {available_cam_ids}, but {self.camera_index} is provided instead. To find the camera index you should use, run `python lerobot/common/robot_devices/cameras/intelrealsense.py`.') raise OSError(f"Can't access IntelRealSenseCamera({self.camera_index}).") self.is_connected = True def read(self, temporary_color: str | None=None) -> np.ndarray | tuple[np.ndarray, np.ndarray]: if not self.is_connected: raise RobotDeviceNotConnectedError(f'IntelRealSenseCamera({self.camera_index}) is not connected. Try running `camera.connect()` first.') start_time = time.perf_counter() frame = self.camera.wait_for_frames(timeout_ms=5000) color_frame = frame.get_color_frame() if not color_frame: raise OSError(f"Can't capture color image from IntelRealSenseCamera({self.camera_index}).") color_image = np.asanyarray(color_frame.get_data()) requested_color_mode = self.color_mode if temporary_color is None else temporary_color if requested_color_mode not in ['rgb', 'bgr']: raise ValueError(f"Expected color values are 'rgb' or 'bgr', but {requested_color_mode} is provided.") if requested_color_mode == 'bgr': color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR) (h, w, _) = color_image.shape if h != self.height or w != self.width: raise OSError(f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead.") self.logs['delta_timestamp_s'] = time.perf_counter() - start_time self.logs['timestamp_utc'] = capture_timestamp_utc() if self.use_depth: depth_frame = frame.get_depth_frame() if not depth_frame: raise OSError(f"Can't capture depth image from IntelRealSenseCamera({self.camera_index}).") depth_map = np.asanyarray(depth_frame.get_data()) (h, w) = depth_map.shape if h != self.height or w != self.width: raise OSError(f"Can't capture depth map with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead.") return (color_image, depth_map) else: return color_image def read_loop(self): while self.stop_event is None or not self.stop_event.is_set(): if self.use_depth: (self.color_image, self.depth_map) = self.read() else: self.color_image = self.read() def async_read(self): if not self.is_connected: raise RobotDeviceNotConnectedError(f'IntelRealSenseCamera({self.camera_index}) is not connected. Try running `camera.connect()` first.') if self.thread is None: self.stop_event = threading.Event() self.thread = Thread(target=self.read_loop, args=()) self.thread.daemon = True self.thread.start() num_tries = 0 while self.color_image is None: num_tries += 1 time.sleep(1 / self.fps) if num_tries > self.fps and (self.thread.ident is None or not self.thread.is_alive()): raise Exception('The thread responsible for `self.async_read()` took too much time to start. There might be an issue. Verify that `self.thread.start()` has been called.') if self.use_depth: return (self.color_image, self.depth_map) else: return self.color_image def disconnect(self): if not self.is_connected: raise RobotDeviceNotConnectedError(f'IntelRealSenseCamera({self.camera_index}) is not connected. Try running `camera.connect()` first.') if self.thread is not None and self.thread.is_alive(): self.stop_event.set() self.thread.join() self.thread = None self.stop_event = None self.camera.stop() self.camera = None self.is_connected = False def __del__(self): if getattr(self, 'is_connected', False): self.disconnect() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Save a few frames using `IntelRealSenseCamera` for all cameras connected to the computer, or a selected subset.') parser.add_argument('--camera-ids', type=int, nargs='*', default=None, help='List of camera indices used to instantiate the `IntelRealSenseCamera`. If not provided, find and use all available camera indices.') parser.add_argument('--fps', type=int, default=30, help='Set the number of frames recorded per seconds for all cameras. If not provided, use the default fps of each camera.') parser.add_argument('--width', type=str, default=640, help='Set the width for all cameras. If not provided, use the default width of each camera.') parser.add_argument('--height', type=str, default=480, help='Set the height for all cameras. If not provided, use the default height of each camera.') parser.add_argument('--images-dir', type=Path, default='outputs/images_from_intelrealsense_cameras', help='Set directory to save a few frames for each camera.') parser.add_argument('--record-time-s', type=float, default=2.0, help='Set the number of seconds used to record the frames. By default, 2 seconds.') args = parser.parse_args() save_images_from_cameras(**vars(args)) # File: lerobot-main/lerobot/common/robot_devices/cameras/opencv.py """""" import argparse import concurrent.futures import math import platform import shutil import threading import time from dataclasses import dataclass, replace from pathlib import Path from threading import Thread import cv2 import numpy as np from PIL import Image from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError, busy_wait from lerobot.common.utils.utils import capture_timestamp_utc cv2.setNumThreads(1) MAX_OPENCV_INDEX = 60 def find_camera_indices(raise_when_empty=False, max_index_search_range=MAX_OPENCV_INDEX): if platform.system() == 'Linux': print("Linux detected. Finding available camera indices through scanning '/dev/video*' ports") possible_camera_ids = [] for port in Path('/dev').glob('video*'): camera_idx = int(str(port).replace('/dev/video', '')) possible_camera_ids.append(camera_idx) else: print(f'Mac or Windows detected. Finding available camera indices through scanning all indices from 0 to {MAX_OPENCV_INDEX}') possible_camera_ids = range(max_index_search_range) camera_ids = [] for camera_idx in possible_camera_ids: camera = cv2.VideoCapture(camera_idx) is_open = camera.isOpened() camera.release() if is_open: print(f'Camera found at index {camera_idx}') camera_ids.append(camera_idx) if raise_when_empty and len(camera_ids) == 0: raise OSError('Not a single camera was detected. Try re-plugging, or re-installing `opencv2`, or your camera driver, or make sure your camera is compatible with opencv2.') return camera_ids def save_image(img_array, camera_index, frame_index, images_dir): img = Image.fromarray(img_array) path = images_dir / f'camera_{camera_index:02d}_frame_{frame_index:06d}.png' path.parent.mkdir(parents=True, exist_ok=True) img.save(str(path), quality=100) def save_images_from_cameras(images_dir: Path, camera_ids: list[int] | None=None, fps=None, width=None, height=None, record_time_s=2): if camera_ids is None: camera_ids = find_camera_indices() print('Connecting cameras') cameras = [] for cam_idx in camera_ids: camera = OpenCVCamera(cam_idx, fps=fps, width=width, height=height) camera.connect() print(f'OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})') cameras.append(camera) images_dir = Path(images_dir) if images_dir.exists(): shutil.rmtree(images_dir) images_dir.mkdir(parents=True, exist_ok=True) print(f'Saving images to {images_dir}') frame_index = 0 start_time = time.perf_counter() with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: while True: now = time.perf_counter() for camera in cameras: image = camera.read() if fps is None else camera.async_read() executor.submit(save_image, image, camera.camera_index, frame_index, images_dir) if fps is not None: dt_s = time.perf_counter() - now busy_wait(1 / fps - dt_s) if time.perf_counter() - start_time > record_time_s: break print(f'Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}') frame_index += 1 print(f'Images have been saved to {images_dir}') @dataclass class OpenCVCameraConfig: fps: int | None = None width: int | None = None height: int | None = None color_mode: str = 'rgb' def __post_init__(self): if self.color_mode not in ['rgb', 'bgr']: raise ValueError(f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided.") class OpenCVCamera: def __init__(self, camera_index: int, config: OpenCVCameraConfig | None=None, **kwargs): if config is None: config = OpenCVCameraConfig() config = replace(config, **kwargs) self.camera_index = camera_index self.fps = config.fps self.width = config.width self.height = config.height self.color_mode = config.color_mode self.camera = None self.is_connected = False self.thread = None self.stop_event = None self.color_image = None self.logs = {} def connect(self): if self.is_connected: raise RobotDeviceAlreadyConnectedError(f'OpenCVCamera({self.camera_index}) is already connected.') if platform.system() == 'Linux': tmp_camera = cv2.VideoCapture(f'/dev/video{self.camera_index}') else: tmp_camera = cv2.VideoCapture(self.camera_index) is_camera_open = tmp_camera.isOpened() del tmp_camera if not is_camera_open: available_cam_ids = find_camera_indices() if self.camera_index not in available_cam_ids: raise ValueError(f'`camera_index` is expected to be one of these available cameras {available_cam_ids}, but {self.camera_index} is provided instead. To find the camera index you should use, run `python lerobot/common/robot_devices/cameras/opencv.py`.') raise OSError(f"Can't access OpenCVCamera({self.camera_index}).") if platform.system() == 'Linux': self.camera = cv2.VideoCapture(f'/dev/video{self.camera_index}') else: self.camera = cv2.VideoCapture(self.camera_index) if self.fps is not None: self.camera.set(cv2.CAP_PROP_FPS, self.fps) if self.width is not None: self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) if self.height is not None: self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) actual_fps = self.camera.get(cv2.CAP_PROP_FPS) actual_width = self.camera.get(cv2.CAP_PROP_FRAME_WIDTH) actual_height = self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT) if self.fps is not None and (not math.isclose(self.fps, actual_fps, rel_tol=0.001)): raise OSError(f"Can't set self.fps={self.fps!r} for OpenCVCamera({self.camera_index}). Actual value is {actual_fps}.") if self.width is not None and self.width != actual_width: raise OSError(f"Can't set self.width={self.width!r} for OpenCVCamera({self.camera_index}). Actual value is {actual_width}.") if self.height is not None and self.height != actual_height: raise OSError(f"Can't set self.height={self.height!r} for OpenCVCamera({self.camera_index}). Actual value is {actual_height}.") self.fps = actual_fps self.width = actual_width self.height = actual_height self.is_connected = True def read(self, temporary_color_mode: str | None=None) -> np.ndarray: if not self.is_connected: raise RobotDeviceNotConnectedError(f'OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first.') start_time = time.perf_counter() (ret, color_image) = self.camera.read() if not ret: raise OSError(f"Can't capture color image from camera {self.camera_index}.") requested_color_mode = self.color_mode if temporary_color_mode is None else temporary_color_mode if requested_color_mode not in ['rgb', 'bgr']: raise ValueError(f"Expected color values are 'rgb' or 'bgr', but {requested_color_mode} is provided.") if requested_color_mode == 'rgb': color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB) (h, w, _) = color_image.shape if h != self.height or w != self.width: raise OSError(f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead.") self.logs['delta_timestamp_s'] = time.perf_counter() - start_time self.logs['timestamp_utc'] = capture_timestamp_utc() return color_image def read_loop(self): while self.stop_event is None or not self.stop_event.is_set(): self.color_image = self.read() def async_read(self): if not self.is_connected: raise RobotDeviceNotConnectedError(f'OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first.') if self.thread is None: self.stop_event = threading.Event() self.thread = Thread(target=self.read_loop, args=()) self.thread.daemon = True self.thread.start() num_tries = 0 while self.color_image is None: num_tries += 1 time.sleep(1 / self.fps) if num_tries > self.fps and (self.thread.ident is None or not self.thread.is_alive()): raise Exception('The thread responsible for `self.async_read()` took too much time to start. There might be an issue. Verify that `self.thread.start()` has been called.') return self.color_image def disconnect(self): if not self.is_connected: raise RobotDeviceNotConnectedError(f'OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first.') if self.thread is not None and self.thread.is_alive(): self.stop_event.set() self.thread.join() self.thread = None self.stop_event = None self.camera.release() self.camera = None self.is_connected = False def __del__(self): if getattr(self, 'is_connected', False): self.disconnect() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Save a few frames using `OpenCVCamera` for all cameras connected to the computer, or a selected subset.') parser.add_argument('--camera-ids', type=int, nargs='*', default=None, help='List of camera indices used to instantiate the `OpenCVCamera`. If not provided, find and use all available camera indices.') parser.add_argument('--fps', type=int, default=None, help='Set the number of frames recorded per seconds for all cameras. If not provided, use the default fps of each camera.') parser.add_argument('--width', type=str, default=None, help='Set the width for all cameras. If not provided, use the default width of each camera.') parser.add_argument('--height', type=str, default=None, help='Set the height for all cameras. If not provided, use the default height of each camera.') parser.add_argument('--images-dir', type=Path, default='outputs/images_from_opencv_cameras', help='Set directory to save a few frames for each camera.') parser.add_argument('--record-time-s', type=float, default=2.0, help='Set the number of seconds used to record the frames. By default, 2 seconds.') args = parser.parse_args() save_images_from_cameras(**vars(args)) # File: lerobot-main/lerobot/common/robot_devices/cameras/utils.py from pathlib import Path from typing import Protocol import cv2 import einops import numpy as np def write_shape_on_image_inplace(image): (height, width) = image.shape[:2] text = f'Width: {width} Height: {height}' font = cv2.FONT_HERSHEY_SIMPLEX font_scale = 1 color = (255, 0, 0) thickness = 2 position = (10, height - 10) cv2.putText(image, text, position, font, font_scale, color, thickness) def save_color_image(image, path, write_shape=False): path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) if write_shape: write_shape_on_image_inplace(image) cv2.imwrite(str(path), image) def save_depth_image(depth, path, write_shape=False): path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) depth_image = cv2.applyColorMap(cv2.convertScaleAbs(depth, alpha=0.03), cv2.COLORMAP_JET) if write_shape: write_shape_on_image_inplace(depth_image) cv2.imwrite(str(path), depth_image) def convert_torch_image_to_cv2(tensor, rgb_to_bgr=True): assert tensor.ndim == 3 (c, h, w) = tensor.shape assert c < h and c < w color_image = einops.rearrange(tensor, 'c h w -> h w c').numpy() if rgb_to_bgr: color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR) return color_image class Camera(Protocol): def connect(self): ... def read(self, temporary_color: str | None=None) -> np.ndarray: ... def async_read(self) -> np.ndarray: ... def disconnect(self): ... # File: lerobot-main/lerobot/common/robot_devices/motors/dynamixel.py import enum import logging import math import time import traceback from copy import deepcopy from pathlib import Path import numpy as np import tqdm from dynamixel_sdk import COMM_SUCCESS, DXL_HIBYTE, DXL_HIWORD, DXL_LOBYTE, DXL_LOWORD, GroupSyncRead, GroupSyncWrite, PacketHandler, PortHandler from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError from lerobot.common.utils.utils import capture_timestamp_utc PROTOCOL_VERSION = 2.0 BAUDRATE = 1000000 TIMEOUT_MS = 1000 MAX_ID_RANGE = 252 LOWER_BOUND_DEGREE = -270 UPPER_BOUND_DEGREE = 270 LOWER_BOUND_LINEAR = -10 UPPER_BOUND_LINEAR = 110 HALF_TURN_DEGREE = 180 X_SERIES_CONTROL_TABLE = {'Model_Number': (0, 2), 'Model_Information': (2, 4), 'Firmware_Version': (6, 1), 'ID': (7, 1), 'Baud_Rate': (8, 1), 'Return_Delay_Time': (9, 1), 'Drive_Mode': (10, 1), 'Operating_Mode': (11, 1), 'Secondary_ID': (12, 1), 'Protocol_Type': (13, 1), 'Homing_Offset': (20, 4), 'Moving_Threshold': (24, 4), 'Temperature_Limit': (31, 1), 'Max_Voltage_Limit': (32, 2), 'Min_Voltage_Limit': (34, 2), 'PWM_Limit': (36, 2), 'Current_Limit': (38, 2), 'Acceleration_Limit': (40, 4), 'Velocity_Limit': (44, 4), 'Max_Position_Limit': (48, 4), 'Min_Position_Limit': (52, 4), 'Shutdown': (63, 1), 'Torque_Enable': (64, 1), 'LED': (65, 1), 'Status_Return_Level': (68, 1), 'Registered_Instruction': (69, 1), 'Hardware_Error_Status': (70, 1), 'Velocity_I_Gain': (76, 2), 'Velocity_P_Gain': (78, 2), 'Position_D_Gain': (80, 2), 'Position_I_Gain': (82, 2), 'Position_P_Gain': (84, 2), 'Feedforward_2nd_Gain': (88, 2), 'Feedforward_1st_Gain': (90, 2), 'Bus_Watchdog': (98, 1), 'Goal_PWM': (100, 2), 'Goal_Current': (102, 2), 'Goal_Velocity': (104, 4), 'Profile_Acceleration': (108, 4), 'Profile_Velocity': (112, 4), 'Goal_Position': (116, 4), 'Realtime_Tick': (120, 2), 'Moving': (122, 1), 'Moving_Status': (123, 1), 'Present_PWM': (124, 2), 'Present_Current': (126, 2), 'Present_Velocity': (128, 4), 'Present_Position': (132, 4), 'Velocity_Trajectory': (136, 4), 'Position_Trajectory': (140, 4), 'Present_Input_Voltage': (144, 2), 'Present_Temperature': (146, 1)} X_SERIES_BAUDRATE_TABLE = {0: 9600, 1: 57600, 2: 115200, 3: 1000000, 4: 2000000, 5: 3000000, 6: 4000000} CALIBRATION_REQUIRED = ['Goal_Position', 'Present_Position'] CONVERT_UINT32_TO_INT32_REQUIRED = ['Goal_Position', 'Present_Position'] MODEL_CONTROL_TABLE = {'x_series': X_SERIES_CONTROL_TABLE, 'xl330-m077': X_SERIES_CONTROL_TABLE, 'xl330-m288': X_SERIES_CONTROL_TABLE, 'xl430-w250': X_SERIES_CONTROL_TABLE, 'xm430-w350': X_SERIES_CONTROL_TABLE, 'xm540-w270': X_SERIES_CONTROL_TABLE, 'xc430-w150': X_SERIES_CONTROL_TABLE} MODEL_RESOLUTION = {'x_series': 4096, 'xl330-m077': 4096, 'xl330-m288': 4096, 'xl430-w250': 4096, 'xm430-w350': 4096, 'xm540-w270': 4096, 'xc430-w150': 4096} MODEL_BAUDRATE_TABLE = {'x_series': X_SERIES_BAUDRATE_TABLE, 'xl330-m077': X_SERIES_BAUDRATE_TABLE, 'xl330-m288': X_SERIES_BAUDRATE_TABLE, 'xl430-w250': X_SERIES_BAUDRATE_TABLE, 'xm430-w350': X_SERIES_BAUDRATE_TABLE, 'xm540-w270': X_SERIES_BAUDRATE_TABLE, 'xc430-w150': X_SERIES_BAUDRATE_TABLE} NUM_READ_RETRY = 10 NUM_WRITE_RETRY = 10 def convert_degrees_to_steps(degrees: float | np.ndarray, models: str | list[str]) -> np.ndarray: resolutions = [MODEL_RESOLUTION[model] for model in models] steps = degrees / 180 * np.array(resolutions) / 2 steps = steps.astype(int) return steps def convert_to_bytes(value, bytes): if bytes == 1: data = [DXL_LOBYTE(DXL_LOWORD(value))] elif bytes == 2: data = [DXL_LOBYTE(DXL_LOWORD(value)), DXL_HIBYTE(DXL_LOWORD(value))] elif bytes == 4: data = [DXL_LOBYTE(DXL_LOWORD(value)), DXL_HIBYTE(DXL_LOWORD(value)), DXL_LOBYTE(DXL_HIWORD(value)), DXL_HIBYTE(DXL_HIWORD(value))] else: raise NotImplementedError(f'Value of the number of bytes to be sent is expected to be in [1, 2, 4], but {bytes} is provided instead.') return data def get_group_sync_key(data_name, motor_names): group_key = f'{data_name}_' + '_'.join(motor_names) return group_key def get_result_name(fn_name, data_name, motor_names): group_key = get_group_sync_key(data_name, motor_names) rslt_name = f'{fn_name}_{group_key}' return rslt_name def get_queue_name(fn_name, data_name, motor_names): group_key = get_group_sync_key(data_name, motor_names) queue_name = f'{fn_name}_{group_key}' return queue_name def get_log_name(var_name, fn_name, data_name, motor_names): group_key = get_group_sync_key(data_name, motor_names) log_name = f'{var_name}_{fn_name}_{group_key}' return log_name def assert_same_address(model_ctrl_table, motor_models, data_name): all_addr = [] all_bytes = [] for model in motor_models: (addr, bytes) = model_ctrl_table[model][data_name] all_addr.append(addr) all_bytes.append(bytes) if len(set(all_addr)) != 1: raise NotImplementedError(f"At least two motor models use a different address for `data_name`='{data_name}' ({list(zip(motor_models, all_addr, strict=False))}). Contact a LeRobot maintainer.") if len(set(all_bytes)) != 1: raise NotImplementedError(f"At least two motor models use a different bytes representation for `data_name`='{data_name}' ({list(zip(motor_models, all_bytes, strict=False))}). Contact a LeRobot maintainer.") def find_available_ports(): ports = [] for path in Path('/dev').glob('tty*'): ports.append(str(path)) return ports def find_port(): print('Finding all available ports for the DynamixelMotorsBus.') ports_before = find_available_ports() print(ports_before) print('Remove the usb cable from your DynamixelMotorsBus and press Enter when done.') input() time.sleep(0.5) ports_after = find_available_ports() ports_diff = list(set(ports_before) - set(ports_after)) if len(ports_diff) == 1: port = ports_diff[0] print(f"The port of this DynamixelMotorsBus is '{port}'") print('Reconnect the usb cable.') elif len(ports_diff) == 0: raise OSError(f'Could not detect the port. No difference was found ({ports_diff}).') else: raise OSError(f'Could not detect the port. More than one port was found ({ports_diff}).') class TorqueMode(enum.Enum): ENABLED = 1 DISABLED = 0 class DriveMode(enum.Enum): NON_INVERTED = 0 INVERTED = 1 class CalibrationMode(enum.Enum): DEGREE = 0 LINEAR = 1 class JointOutOfRangeError(Exception): def __init__(self, message='Joint is out of range'): self.message = message super().__init__(self.message) class DynamixelMotorsBus: def __init__(self, port: str, motors: dict[str, tuple[int, str]], extra_model_control_table: dict[str, list[tuple]] | None=None, extra_model_resolution: dict[str, int] | None=None): self.port = port self.motors = motors self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) if extra_model_control_table: self.model_ctrl_table.update(extra_model_control_table) self.model_resolution = deepcopy(MODEL_RESOLUTION) if extra_model_resolution: self.model_resolution.update(extra_model_resolution) self.port_handler = None self.packet_handler = None self.calibration = None self.is_connected = False self.group_readers = {} self.group_writers = {} self.logs = {} def connect(self): if self.is_connected: raise RobotDeviceAlreadyConnectedError(f'DynamixelMotorsBus({self.port}) is already connected. Do not call `motors_bus.connect()` twice.') self.port_handler = PortHandler(self.port) self.packet_handler = PacketHandler(PROTOCOL_VERSION) try: if not self.port_handler.openPort(): raise OSError(f"Failed to open port '{self.port}'.") except Exception: traceback.print_exc() print('\nTry running `python lerobot/common/robot_devices/motors/dynamixel.py` to make sure you are using the correct port.\n') raise self.is_connected = True self.port_handler.setPacketTimeoutMillis(TIMEOUT_MS) self.set_bus_baudrate(BAUDRATE) if not self.are_motors_configured(): input("\n/!\\ A configuration issue has been detected with your motors: \nIf it's the first time that you use these motors, press enter to configure your motors... but before verify that all the cables are connected the proper way. If you find an issue, before making a modification, kill the python process, unplug the power cord to not damage the motors, rewire correctly, then plug the power again and relaunch the script.\n") print() self.configure_motors() def reconnect(self): self.port_handler = PortHandler(self.port) self.packet_handler = PacketHandler(PROTOCOL_VERSION) if not self.port_handler.openPort(): raise OSError(f"Failed to open port '{self.port}'.") self.is_connected = True def are_motors_configured(self): try: return (self.motor_indices == self.read('ID')).all() except ConnectionError as e: print(e) return False def configure_motors(self): print('Scanning all baudrates and motor indices') all_baudrates = set(X_SERIES_BAUDRATE_TABLE.values()) ids_per_baudrate = {} for baudrate in all_baudrates: self.set_bus_baudrate(baudrate) present_ids = self.find_motor_indices() if len(present_ids) > 0: ids_per_baudrate[baudrate] = present_ids print(f'Motor indices detected: {ids_per_baudrate}') print() possible_baudrates = list(ids_per_baudrate.keys()) possible_ids = list({idx for sublist in ids_per_baudrate.values() for idx in sublist}) untaken_ids = list(set(range(MAX_ID_RANGE)) - set(possible_ids) - set(self.motor_indices)) for i in range(len(self.motors)): self.disconnect() input(f'1. Unplug the power cord\n2. Plug/unplug minimal number of cables to only have the first {i + 1} motor(s) ({self.motor_names[:i + 1]}) connected.\n3. Re-plug the power cord\nPress Enter to continue...') print() self.reconnect() if i > 0: try: self._read_with_motor_ids(self.motor_models, untaken_ids[:i], 'ID') except ConnectionError: print(f'Failed to read from {untaken_ids[:i + 1]}. Make sure the power cord is plugged in.') input('Press Enter to continue...') print() self.reconnect() print('Scanning possible baudrates and motor indices') motor_found = False for baudrate in possible_baudrates: self.set_bus_baudrate(baudrate) present_ids = self.find_motor_indices(possible_ids) if len(present_ids) == 1: present_idx = present_ids[0] print(f'Detected motor with index {present_idx}') if baudrate != BAUDRATE: print(f'Setting its baudrate to {BAUDRATE}') baudrate_idx = list(X_SERIES_BAUDRATE_TABLE.values()).index(BAUDRATE) for _ in range(NUM_WRITE_RETRY): self._write_with_motor_ids(self.motor_models, present_idx, 'Baud_Rate', baudrate_idx) time.sleep(0.5) self.set_bus_baudrate(BAUDRATE) try: present_baudrate_idx = self._read_with_motor_ids(self.motor_models, present_idx, 'Baud_Rate') except ConnectionError: print('Failed to write baudrate. Retrying.') self.set_bus_baudrate(baudrate) continue break else: raise if present_baudrate_idx != baudrate_idx: raise OSError('Failed to write baudrate.') print(f'Setting its index to a temporary untaken index ({untaken_ids[i]})') self._write_with_motor_ids(self.motor_models, present_idx, 'ID', untaken_ids[i]) present_idx = self._read_with_motor_ids(self.motor_models, untaken_ids[i], 'ID') if present_idx != untaken_ids[i]: raise OSError('Failed to write index.') motor_found = True break elif len(present_ids) > 1: raise OSError(f'More than one motor detected ({present_ids}), but only one was expected.') if not motor_found: raise OSError('No motor found, but one new motor expected. Verify power cord is plugged in and retry.') print() print(f'Setting expected motor indices: {self.motor_indices}') self.set_bus_baudrate(BAUDRATE) self._write_with_motor_ids(self.motor_models, untaken_ids[:len(self.motors)], 'ID', self.motor_indices) print() if (self.read('ID') != self.motor_indices).any(): raise OSError('Failed to write motors indices.') print('Configuration is done!') def find_motor_indices(self, possible_ids=None): if possible_ids is None: possible_ids = range(MAX_ID_RANGE) indices = [] for idx in tqdm.tqdm(possible_ids): try: present_idx = self._read_with_motor_ids(self.motor_models, [idx], 'ID')[0] except ConnectionError: continue if idx != present_idx: raise OSError('Motor index used to communicate through the bus is not the same as the one present in the motor memory. The motor memory might be damaged.') indices.append(idx) return indices def set_bus_baudrate(self, baudrate): present_bus_baudrate = self.port_handler.getBaudRate() if present_bus_baudrate != baudrate: print(f'Setting bus baud rate to {baudrate}. Previously {present_bus_baudrate}.') self.port_handler.setBaudRate(baudrate) if self.port_handler.getBaudRate() != baudrate: raise OSError('Failed to write bus baud rate.') @property def motor_names(self) -> list[str]: return list(self.motors.keys()) @property def motor_models(self) -> list[str]: return [model for (_, model) in self.motors.values()] @property def motor_indices(self) -> list[int]: return [idx for (idx, _) in self.motors.values()] def set_calibration(self, calibration: dict[str, list]): self.calibration = calibration def apply_calibration_autocorrect(self, values: np.ndarray | list, motor_names: list[str] | None): try: values = self.apply_calibration(values, motor_names) except JointOutOfRangeError as e: print(e) self.autocorrect_calibration(values, motor_names) values = self.apply_calibration(values, motor_names) return values def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None): if motor_names is None: motor_names = self.motor_names values = values.astype(np.float32) for (i, name) in enumerate(motor_names): calib_idx = self.calibration['motor_names'].index(name) calib_mode = self.calibration['calib_mode'][calib_idx] if CalibrationMode[calib_mode] == CalibrationMode.DEGREE: drive_mode = self.calibration['drive_mode'][calib_idx] homing_offset = self.calibration['homing_offset'][calib_idx] (_, model) = self.motors[name] resolution = self.model_resolution[model] if drive_mode: values[i] *= -1 values[i] += homing_offset values[i] = values[i] / (resolution // 2) * HALF_TURN_DEGREE if values[i] < LOWER_BOUND_DEGREE or values[i] > UPPER_BOUND_DEGREE: raise JointOutOfRangeError(f'Wrong motor position range detected for {name}. Expected to be in nominal range of [-{HALF_TURN_DEGREE}, {HALF_TURN_DEGREE}] degrees (a full rotation), with a maximum range of [{LOWER_BOUND_DEGREE}, {UPPER_BOUND_DEGREE}] degrees to account for joints that can rotate a bit more, but present value is {values[i]} degree. This might be due to a cable connection issue creating an artificial 360 degrees jump in motor values. You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`') elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR: start_pos = self.calibration['start_pos'][calib_idx] end_pos = self.calibration['end_pos'][calib_idx] values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100 if values[i] < LOWER_BOUND_LINEAR or values[i] > UPPER_BOUND_LINEAR: raise JointOutOfRangeError(f'Wrong motor position range detected for {name}. Expected to be in nominal range of [0, 100] % (a full linear translation), with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, but present value is {values[i]} %. This might be due to a cable connection issue creating an artificial jump in motor values. You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`') return values def autocorrect_calibration(self, values: np.ndarray | list, motor_names: list[str] | None): if motor_names is None: motor_names = self.motor_names values = values.astype(np.float32) for (i, name) in enumerate(motor_names): calib_idx = self.calibration['motor_names'].index(name) calib_mode = self.calibration['calib_mode'][calib_idx] if CalibrationMode[calib_mode] == CalibrationMode.DEGREE: drive_mode = self.calibration['drive_mode'][calib_idx] homing_offset = self.calibration['homing_offset'][calib_idx] (_, model) = self.motors[name] resolution = self.model_resolution[model] if drive_mode: values[i] *= -1 calib_val = (values[i] + homing_offset) / (resolution // 2) * HALF_TURN_DEGREE in_range = calib_val > LOWER_BOUND_DEGREE and calib_val < UPPER_BOUND_DEGREE low_factor = (-(resolution // 2) - values[i] - homing_offset) / resolution upp_factor = (resolution // 2 - values[i] - homing_offset) / resolution elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR: start_pos = self.calibration['start_pos'][calib_idx] end_pos = self.calibration['end_pos'][calib_idx] calib_val = (values[i] - start_pos) / (end_pos - start_pos) * 100 in_range = calib_val > LOWER_BOUND_LINEAR and calib_val < UPPER_BOUND_LINEAR low_factor = (start_pos - values[i]) / resolution upp_factor = (end_pos - values[i]) / resolution if not in_range: if low_factor < upp_factor: factor = math.ceil(low_factor) if factor > upp_factor: raise ValueError(f'No integer found between bounds [low_factor={low_factor!r}, upp_factor={upp_factor!r}]') else: factor = math.ceil(upp_factor) if factor > low_factor: raise ValueError(f'No integer found between bounds [low_factor={low_factor!r}, upp_factor={upp_factor!r}]') if CalibrationMode[calib_mode] == CalibrationMode.DEGREE: out_of_range_str = f'{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees' in_range_str = f'{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees' elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR: out_of_range_str = f'{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %' in_range_str = f'{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %' logging.warning(f"Auto-correct calibration of motor '{name}' by shifting value by {abs(factor)} full turns, from '{out_of_range_str}' to '{in_range_str}'.") self.calibration['homing_offset'][calib_idx] += resolution * factor def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None): if motor_names is None: motor_names = self.motor_names for (i, name) in enumerate(motor_names): calib_idx = self.calibration['motor_names'].index(name) calib_mode = self.calibration['calib_mode'][calib_idx] if CalibrationMode[calib_mode] == CalibrationMode.DEGREE: drive_mode = self.calibration['drive_mode'][calib_idx] homing_offset = self.calibration['homing_offset'][calib_idx] (_, model) = self.motors[name] resolution = self.model_resolution[model] values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2) values[i] -= homing_offset if drive_mode: values[i] *= -1 elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR: start_pos = self.calibration['start_pos'][calib_idx] end_pos = self.calibration['end_pos'][calib_idx] values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos values = np.round(values).astype(np.int32) return values def _read_with_motor_ids(self, motor_models, motor_ids, data_name): return_list = True if not isinstance(motor_ids, list): return_list = False motor_ids = [motor_ids] assert_same_address(self.model_ctrl_table, self.motor_models, data_name) (addr, bytes) = self.model_ctrl_table[motor_models[0]][data_name] group = GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes) for idx in motor_ids: group.addParam(idx) comm = group.txRxPacket() if comm != COMM_SUCCESS: raise ConnectionError(f'Read failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: {self.packet_handler.getTxRxResult(comm)}') values = [] for idx in motor_ids: value = group.getData(idx, addr, bytes) values.append(value) if return_list: return values else: return values[0] def read(self, data_name, motor_names: str | list[str] | None=None): if not self.is_connected: raise RobotDeviceNotConnectedError(f'DynamixelMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`.') start_time = time.perf_counter() if motor_names is None: motor_names = self.motor_names if isinstance(motor_names, str): motor_names = [motor_names] motor_ids = [] models = [] for name in motor_names: (motor_idx, model) = self.motors[name] motor_ids.append(motor_idx) models.append(model) assert_same_address(self.model_ctrl_table, models, data_name) (addr, bytes) = self.model_ctrl_table[model][data_name] group_key = get_group_sync_key(data_name, motor_names) if data_name not in self.group_readers: self.group_readers[group_key] = GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes) for idx in motor_ids: self.group_readers[group_key].addParam(idx) for _ in range(NUM_READ_RETRY): comm = self.group_readers[group_key].txRxPacket() if comm == COMM_SUCCESS: break if comm != COMM_SUCCESS: raise ConnectionError(f'Read failed due to communication error on port {self.port} for group_key {group_key}: {self.packet_handler.getTxRxResult(comm)}') values = [] for idx in motor_ids: value = self.group_readers[group_key].getData(idx, addr, bytes) values.append(value) values = np.array(values) if data_name in CONVERT_UINT32_TO_INT32_REQUIRED: values = values.astype(np.int32) if data_name in CALIBRATION_REQUIRED and self.calibration is not None: values = self.apply_calibration_autocorrect(values, motor_names) delta_ts_name = get_log_name('delta_timestamp_s', 'read', data_name, motor_names) self.logs[delta_ts_name] = time.perf_counter() - start_time ts_utc_name = get_log_name('timestamp_utc', 'read', data_name, motor_names) self.logs[ts_utc_name] = capture_timestamp_utc() return values def _write_with_motor_ids(self, motor_models, motor_ids, data_name, values): if not isinstance(motor_ids, list): motor_ids = [motor_ids] if not isinstance(values, list): values = [values] assert_same_address(self.model_ctrl_table, motor_models, data_name) (addr, bytes) = self.model_ctrl_table[motor_models[0]][data_name] group = GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes) for (idx, value) in zip(motor_ids, values, strict=True): data = convert_to_bytes(value, bytes) group.addParam(idx, data) comm = group.txPacket() if comm != COMM_SUCCESS: raise ConnectionError(f'Write failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: {self.packet_handler.getTxRxResult(comm)}') def write(self, data_name, values: int | float | np.ndarray, motor_names: str | list[str] | None=None): if not self.is_connected: raise RobotDeviceNotConnectedError(f'DynamixelMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`.') start_time = time.perf_counter() if motor_names is None: motor_names = self.motor_names if isinstance(motor_names, str): motor_names = [motor_names] if isinstance(values, (int, float, np.integer)): values = [int(values)] * len(motor_names) values = np.array(values) motor_ids = [] models = [] for name in motor_names: (motor_idx, model) = self.motors[name] motor_ids.append(motor_idx) models.append(model) if data_name in CALIBRATION_REQUIRED and self.calibration is not None: values = self.revert_calibration(values, motor_names) values = values.tolist() assert_same_address(self.model_ctrl_table, models, data_name) (addr, bytes) = self.model_ctrl_table[model][data_name] group_key = get_group_sync_key(data_name, motor_names) init_group = data_name not in self.group_readers if init_group: self.group_writers[group_key] = GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes) for (idx, value) in zip(motor_ids, values, strict=True): data = convert_to_bytes(value, bytes) if init_group: self.group_writers[group_key].addParam(idx, data) else: self.group_writers[group_key].changeParam(idx, data) comm = self.group_writers[group_key].txPacket() if comm != COMM_SUCCESS: raise ConnectionError(f'Write failed due to communication error on port {self.port} for group_key {group_key}: {self.packet_handler.getTxRxResult(comm)}') delta_ts_name = get_log_name('delta_timestamp_s', 'write', data_name, motor_names) self.logs[delta_ts_name] = time.perf_counter() - start_time ts_utc_name = get_log_name('timestamp_utc', 'write', data_name, motor_names) self.logs[ts_utc_name] = capture_timestamp_utc() def disconnect(self): if not self.is_connected: raise RobotDeviceNotConnectedError(f'DynamixelMotorsBus({self.port}) is not connected. Try running `motors_bus.connect()` first.') if self.port_handler is not None: self.port_handler.closePort() self.port_handler = None self.packet_handler = None self.group_readers = {} self.group_writers = {} self.is_connected = False def __del__(self): if getattr(self, 'is_connected', False): self.disconnect() if __name__ == '__main__': find_port() # File: lerobot-main/lerobot/common/robot_devices/robots/manipulator.py import json import logging import time import warnings from dataclasses import dataclass, field, replace from pathlib import Path from typing import Sequence import numpy as np import torch from lerobot.common.robot_devices.cameras.utils import Camera from lerobot.common.robot_devices.motors.dynamixel import CalibrationMode, TorqueMode, convert_degrees_to_steps from lerobot.common.robot_devices.motors.utils import MotorsBus from lerobot.common.robot_devices.robots.utils import get_arm_id from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError URL_TEMPLATE = 'https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp' ZERO_POSITION_DEGREE = 0 ROTATED_POSITION_DEGREE = 90 def assert_drive_mode(drive_mode): if not np.all(np.isin(drive_mode, [0, 1])): raise ValueError(f'`drive_mode` contains values other than 0 or 1: ({drive_mode})') def apply_drive_mode(position, drive_mode): assert_drive_mode(drive_mode) signed_drive_mode = -(drive_mode * 2 - 1) position *= signed_drive_mode return position def compute_nearest_rounded_position(position, models): delta_turn = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, models) nearest_pos = np.round(position.astype(float) / delta_turn) * delta_turn return nearest_pos.astype(position.dtype) def run_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str): if (arm.read('Torque_Enable') != TorqueMode.DISABLED.value).any(): raise ValueError('To run calibration, the torque must be disabled on all motors.') print(f'\nRunning calibration of {robot_type} {arm_name} {arm_type}...') print('\nMove arm to zero position') print('See: ' + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position='zero')) input('Press Enter to continue...') zero_target_pos = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models) zero_pos = arm.read('Present_Position') zero_nearest_pos = compute_nearest_rounded_position(zero_pos, arm.motor_models) homing_offset = zero_target_pos - zero_nearest_pos print('\nMove arm to rotated target position') print('See: ' + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position='rotated')) input('Press Enter to continue...') rotated_target_pos = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models) rotated_pos = arm.read('Present_Position') drive_mode = (rotated_pos < zero_pos).astype(np.int32) rotated_drived_pos = apply_drive_mode(rotated_pos, drive_mode) rotated_nearest_pos = compute_nearest_rounded_position(rotated_drived_pos, arm.motor_models) homing_offset = rotated_target_pos - rotated_nearest_pos print('\nMove arm to rest position') print('See: ' + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position='rest')) input('Press Enter to continue...') print() calib_mode = [CalibrationMode.DEGREE.name] * len(arm.motor_names) if robot_type == 'aloha' and 'gripper' in arm.motor_names: calib_idx = arm.motor_names.index('gripper') calib_mode[calib_idx] = CalibrationMode.LINEAR.name calib_data = {'homing_offset': homing_offset.tolist(), 'drive_mode': drive_mode.tolist(), 'start_pos': zero_pos.tolist(), 'end_pos': rotated_pos.tolist(), 'calib_mode': calib_mode, 'motor_names': arm.motor_names} return calib_data def ensure_safe_goal_position(goal_pos: torch.Tensor, present_pos: torch.Tensor, max_relative_target: float | list[float]): diff = goal_pos - present_pos max_relative_target = torch.tensor(max_relative_target) safe_diff = torch.minimum(diff, max_relative_target) safe_diff = torch.maximum(safe_diff, -max_relative_target) safe_goal_pos = present_pos + safe_diff if not torch.allclose(goal_pos, safe_goal_pos): logging.warning(f'Relative goal position magnitude had to be clamped to be safe.\n requested relative goal position target: {diff}\n clamped relative goal position target: {safe_diff}') return safe_goal_pos @dataclass class ManipulatorRobotConfig: robot_type: str | None = None leader_arms: dict[str, MotorsBus] = field(default_factory=lambda : {}) follower_arms: dict[str, MotorsBus] = field(default_factory=lambda : {}) cameras: dict[str, Camera] = field(default_factory=lambda : {}) max_relative_target: list[float] | float | None = None gripper_open_degree: float | None = None def __setattr__(self, prop: str, val): if prop == 'max_relative_target' and val is not None and isinstance(val, Sequence): for name in self.follower_arms: if len(self.follower_arms[name].motors) != len(val): raise ValueError(f'len(max_relative_target)={len(val)} but the follower arm with name {name} has {len(self.follower_arms[name].motors)} motors. Please make sure that the `max_relative_target` list has as many parameters as there are motors per arm. Note: This feature does not yet work with robots where different follower arms have different numbers of motors.') super().__setattr__(prop, val) class ManipulatorRobot: def __init__(self, config: ManipulatorRobotConfig | None=None, calibration_dir: Path='.cache/calibration/koch', **kwargs): if config is None: config = ManipulatorRobotConfig() self.config = replace(config, **kwargs) self.calibration_dir = Path(calibration_dir) self.robot_type = self.config.robot_type self.leader_arms = self.config.leader_arms self.follower_arms = self.config.follower_arms self.cameras = self.config.cameras self.is_connected = False self.logs = {} def connect(self): if self.is_connected: raise RobotDeviceAlreadyConnectedError('ManipulatorRobot is already connected. Do not run `robot.connect()` twice.') if not self.leader_arms and (not self.follower_arms) and (not self.cameras): raise ValueError("ManipulatorRobot doesn't have any device to connect. See example of usage in docstring of the class.") for name in self.follower_arms: print(f'Connecting {name} follower arm.') self.follower_arms[name].connect() print(f'Connecting {name} leader arm.') self.leader_arms[name].connect() for name in self.follower_arms: self.follower_arms[name].write('Torque_Enable', TorqueMode.DISABLED.value) for name in self.leader_arms: self.leader_arms[name].write('Torque_Enable', TorqueMode.DISABLED.value) self.activate_calibration() if self.robot_type == 'koch': self.set_koch_robot_preset() elif self.robot_type == 'aloha': self.set_aloha_robot_preset() else: warnings.warn(f'No preset found for robot type: {self.robot_type}', stacklevel=1) for name in self.follower_arms: print(f'Activating torque on {name} follower arm.') self.follower_arms[name].write('Torque_Enable', 1) if self.config.gripper_open_degree is not None: for name in self.leader_arms: self.leader_arms[name].write('Torque_Enable', 1, 'gripper') self.leader_arms[name].write('Goal_Position', self.config.gripper_open_degree, 'gripper') for name in self.cameras: self.cameras[name].connect() self.is_connected = True def activate_calibration(self): def load_or_run_calibration_(name, arm, arm_type): arm_id = get_arm_id(name, arm_type) arm_calib_path = self.calibration_dir / f'{arm_id}.json' if arm_calib_path.exists(): with open(arm_calib_path) as f: calibration = json.load(f) else: print(f"Missing calibration file '{arm_calib_path}'") calibration = run_arm_calibration(arm, self.robot_type, name, arm_type) print(f"Calibration is done! Saving calibration file '{arm_calib_path}'") arm_calib_path.parent.mkdir(parents=True, exist_ok=True) with open(arm_calib_path, 'w') as f: json.dump(calibration, f) return calibration for (name, arm) in self.follower_arms.items(): calibration = load_or_run_calibration_(name, arm, 'follower') arm.set_calibration(calibration) for (name, arm) in self.leader_arms.items(): calibration = load_or_run_calibration_(name, arm, 'leader') arm.set_calibration(calibration) def set_koch_robot_preset(self): def set_operating_mode_(arm): if (arm.read('Torque_Enable') != TorqueMode.DISABLED.value).any(): raise ValueError('To run set robot preset, the torque must be disabled on all motors.') all_motors_except_gripper = [name for name in arm.motor_names if name != 'gripper'] if len(all_motors_except_gripper) > 0: arm.write('Operating_Mode', 4, all_motors_except_gripper) arm.write('Operating_Mode', 5, 'gripper') for name in self.follower_arms: set_operating_mode_(self.follower_arms[name]) self.follower_arms[name].write('Position_P_Gain', 1500, 'elbow_flex') self.follower_arms[name].write('Position_I_Gain', 0, 'elbow_flex') self.follower_arms[name].write('Position_D_Gain', 600, 'elbow_flex') if self.config.gripper_open_degree is not None: for name in self.leader_arms: set_operating_mode_(self.leader_arms[name]) self.leader_arms[name].write('Torque_Enable', 1, 'gripper') self.leader_arms[name].write('Goal_Position', self.config.gripper_open_degree, 'gripper') def set_aloha_robot_preset(self): def set_shadow_(arm): if 'shoulder_shadow' in arm.motor_names: shoulder_idx = arm.read('ID', 'shoulder') arm.write('Secondary_ID', shoulder_idx, 'shoulder_shadow') if 'elbow_shadow' in arm.motor_names: elbow_idx = arm.read('ID', 'elbow') arm.write('Secondary_ID', elbow_idx, 'elbow_shadow') for name in self.follower_arms: set_shadow_(self.follower_arms[name]) for name in self.leader_arms: set_shadow_(self.leader_arms[name]) for name in self.follower_arms: self.follower_arms[name].write('Velocity_Limit', 131) all_motors_except_gripper = [name for name in self.follower_arms[name].motor_names if name != 'gripper'] if len(all_motors_except_gripper) > 0: self.follower_arms[name].write('Operating_Mode', 4, all_motors_except_gripper) self.follower_arms[name].write('Operating_Mode', 5, 'gripper') if self.config.gripper_open_degree is not None: warnings.warn(f'`gripper_open_degree` is set to {self.config.gripper_open_degree}, but None is expected for Aloha instead', stacklevel=1) def teleop_step(self, record_data=False) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]: if not self.is_connected: raise RobotDeviceNotConnectedError('ManipulatorRobot is not connected. You need to run `robot.connect()`.') leader_pos = {} for name in self.leader_arms: before_lread_t = time.perf_counter() leader_pos[name] = self.leader_arms[name].read('Present_Position') leader_pos[name] = torch.from_numpy(leader_pos[name]) self.logs[f'read_leader_{name}_pos_dt_s'] = time.perf_counter() - before_lread_t follower_goal_pos = {} for name in self.follower_arms: before_fwrite_t = time.perf_counter() goal_pos = leader_pos[name] if self.config.max_relative_target is not None: present_pos = self.follower_arms[name].read('Present_Position') present_pos = torch.from_numpy(present_pos) goal_pos = ensure_safe_goal_position(goal_pos, present_pos, self.config.max_relative_target) follower_goal_pos[name] = goal_pos goal_pos = goal_pos.numpy().astype(np.int32) self.follower_arms[name].write('Goal_Position', goal_pos) self.logs[f'write_follower_{name}_goal_pos_dt_s'] = time.perf_counter() - before_fwrite_t if not record_data: return follower_pos = {} for name in self.follower_arms: before_fread_t = time.perf_counter() follower_pos[name] = self.follower_arms[name].read('Present_Position') follower_pos[name] = torch.from_numpy(follower_pos[name]) self.logs[f'read_follower_{name}_pos_dt_s'] = time.perf_counter() - before_fread_t state = [] for name in self.follower_arms: if name in follower_pos: state.append(follower_pos[name]) state = torch.cat(state) action = [] for name in self.follower_arms: if name in follower_goal_pos: action.append(follower_goal_pos[name]) action = torch.cat(action) images = {} for name in self.cameras: before_camread_t = time.perf_counter() images[name] = self.cameras[name].async_read() images[name] = torch.from_numpy(images[name]) self.logs[f'read_camera_{name}_dt_s'] = self.cameras[name].logs['delta_timestamp_s'] self.logs[f'async_read_camera_{name}_dt_s'] = time.perf_counter() - before_camread_t (obs_dict, action_dict) = ({}, {}) obs_dict['observation.state'] = state action_dict['action'] = action for name in self.cameras: obs_dict[f'observation.images.{name}'] = images[name] return (obs_dict, action_dict) def capture_observation(self): if not self.is_connected: raise RobotDeviceNotConnectedError('ManipulatorRobot is not connected. You need to run `robot.connect()`.') follower_pos = {} for name in self.follower_arms: before_fread_t = time.perf_counter() follower_pos[name] = self.follower_arms[name].read('Present_Position') follower_pos[name] = torch.from_numpy(follower_pos[name]) self.logs[f'read_follower_{name}_pos_dt_s'] = time.perf_counter() - before_fread_t state = [] for name in self.follower_arms: if name in follower_pos: state.append(follower_pos[name]) state = torch.cat(state) images = {} for name in self.cameras: before_camread_t = time.perf_counter() images[name] = self.cameras[name].async_read() images[name] = torch.from_numpy(images[name]) self.logs[f'read_camera_{name}_dt_s'] = self.cameras[name].logs['delta_timestamp_s'] self.logs[f'async_read_camera_{name}_dt_s'] = time.perf_counter() - before_camread_t obs_dict = {} obs_dict['observation.state'] = state for name in self.cameras: obs_dict[f'observation.images.{name}'] = images[name] return obs_dict def send_action(self, action: torch.Tensor) -> torch.Tensor: if not self.is_connected: raise RobotDeviceNotConnectedError('ManipulatorRobot is not connected. You need to run `robot.connect()`.') from_idx = 0 to_idx = 0 action_sent = [] for name in self.follower_arms: to_idx += len(self.follower_arms[name].motor_names) goal_pos = action[from_idx:to_idx] from_idx = to_idx if self.config.max_relative_target is not None: present_pos = self.follower_arms[name].read('Present_Position') present_pos = torch.from_numpy(present_pos) goal_pos = ensure_safe_goal_position(goal_pos, present_pos, self.config.max_relative_target) action_sent.append(goal_pos) goal_pos = goal_pos.numpy().astype(np.int32) self.follower_arms[name].write('Goal_Position', goal_pos) return torch.cat(action_sent) def disconnect(self): if not self.is_connected: raise RobotDeviceNotConnectedError('ManipulatorRobot is not connected. You need to run `robot.connect()` before disconnecting.') for name in self.follower_arms: self.follower_arms[name].disconnect() for name in self.leader_arms: self.leader_arms[name].disconnect() for name in self.cameras: self.cameras[name].disconnect() self.is_connected = False def __del__(self): if getattr(self, 'is_connected', False): self.disconnect() # File: lerobot-main/lerobot/common/robot_devices/robots/utils.py from typing import Protocol def get_arm_id(name, arm_type): return f'{name}_{arm_type}' class Robot(Protocol): def init_teleop(self): ... def run_calibration(self): ... def teleop_step(self, record_data=False): ... def capture_observation(self): ... def send_action(self, action): ... # File: lerobot-main/lerobot/common/robot_devices/utils.py import platform import time def busy_wait(seconds): if platform.system() == 'Darwin': end_time = time.perf_counter() + seconds while time.perf_counter() < end_time: pass elif seconds > 0: time.sleep(seconds) class RobotDeviceNotConnectedError(Exception): def __init__(self, message='This robot device is not connected. Try calling `robot_device.connect()` first.'): self.message = message super().__init__(self.message) class RobotDeviceAlreadyConnectedError(Exception): def __init__(self, message='This robot device is already connected. Try not calling `robot_device.connect()` twice.'): self.message = message super().__init__(self.message)