|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""This script demonstrates how to train Diffusion Policy on the PushT environment. |
|
|
|
Once you have trained a model with this script, you can try to evaluate it on |
|
examples/2_evaluate_pretrained_policy.py |
|
""" |
|
|
|
from pathlib import Path |
|
|
|
import torch |
|
|
|
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata |
|
from lerobot.common.datasets.utils import dataset_to_policy_features |
|
from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig |
|
from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy |
|
from lerobot.configs.types import FeatureType |
|
|
|
|
|
def main(): |
|
|
|
output_directory = Path("outputs/train/example_pusht_diffusion") |
|
output_directory.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
device = torch.device("cuda") |
|
|
|
|
|
|
|
training_steps = 5000 |
|
log_freq = 1 |
|
|
|
|
|
|
|
|
|
|
|
dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht") |
|
features = dataset_to_policy_features(dataset_metadata.features) |
|
output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} |
|
input_features = {key: ft for key, ft in features.items() if key not in output_features} |
|
|
|
|
|
|
|
cfg = DiffusionConfig(input_features=input_features, output_features=output_features) |
|
|
|
|
|
policy = DiffusionPolicy(cfg, dataset_stats=dataset_metadata.stats) |
|
policy.train() |
|
policy.to(device) |
|
|
|
|
|
|
|
delta_timestamps = { |
|
"observation.image": [i / dataset_metadata.fps for i in cfg.observation_delta_indices], |
|
"observation.state": [i / dataset_metadata.fps for i in cfg.observation_delta_indices], |
|
"action": [i / dataset_metadata.fps for i in cfg.action_delta_indices], |
|
} |
|
|
|
|
|
delta_timestamps = { |
|
|
|
|
|
"observation.image": [-0.1, 0.0], |
|
"observation.state": [-0.1, 0.0], |
|
|
|
|
|
|
|
"action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4], |
|
} |
|
|
|
|
|
dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps) |
|
|
|
|
|
optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4) |
|
dataloader = torch.utils.data.DataLoader( |
|
dataset, |
|
num_workers=4, |
|
batch_size=64, |
|
shuffle=True, |
|
pin_memory=device.type != "cpu", |
|
drop_last=True, |
|
) |
|
|
|
|
|
step = 0 |
|
done = False |
|
while not done: |
|
for batch in dataloader: |
|
batch = {k: (v.to(device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()} |
|
loss, _ = policy.forward(batch) |
|
loss.backward() |
|
optimizer.step() |
|
optimizer.zero_grad() |
|
|
|
if step % log_freq == 0: |
|
print(f"step: {step} loss: {loss.item():.3f}") |
|
step += 1 |
|
if step >= training_steps: |
|
done = True |
|
break |
|
|
|
|
|
policy.save_pretrained(output_directory) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|