File size: 3,271 Bytes
268b296 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
resume: false
device: cuda
use_amp: false
seed: 100000
dataset_repo_id: hellorobotinc/kitchen_cabinet_diagonal
video_backend: pyav
training:
offline_steps: 200000
online_steps: 0
online_steps_between_rollouts: 1
online_sampling_ratio: 0.5
online_env_seed: ???
eval_freq: -1
log_freq: 250
save_checkpoint: true
save_freq: 10000
num_workers: 16
batch_size: 64
image_transforms:
enable: true
max_num_transforms: 3
random_order: false
brightness:
weight: 1
min_max:
- 0.8
- 1.2
contrast:
weight: 1
min_max:
- 0.8
- 1.2
saturation:
weight: 1
min_max:
- 0.5
- 1.5
hue:
weight: 1
min_max:
- -0.05
- 0.05
sharpness:
weight: 1
min_max:
- 0.8
- 1.2
grad_clip_norm: 10
lr: 0.0001
lr_scheduler: cosine
lr_warmup_steps: 500
adam_betas:
- 0.95
- 0.999
adam_eps: 1.0e-08
adam_weight_decay: 1.0e-06
delta_timestamps:
observation.images.gripper:
- -0.16666666666666666
- 0.0
observation.images.head:
- -0.16666666666666666
- 0.0
observation.state:
- -0.16666666666666666
- 0.0
action:
- -0.16666666666666666
- 0.0
- 0.16666666666666666
- 0.3333333333333333
- 0.5
- 0.6666666666666666
- 0.8333333333333334
- 1.0
- 1.1666666666666667
- 1.3333333333333333
- 1.5
- 1.6666666666666667
- 1.8333333333333333
- 2.0
- 2.1666666666666665
- 2.3333333333333335
drop_n_last_frames: 7
eval:
n_episodes: 50
batch_size: 50
use_async_envs: false
wandb:
enable: true
disable_artifact: true
project: diffusion-kitchen-diagonal
notes: no depth
fps: 6
env:
name: stretch_real
task: stretch_baseX
state_dim: 9
action_dim: 9
fps: ${fps}
episode_length: 400
gym:
fps: ${fps}
repo_version: main
override_dataset_stats:
observation.images.gripper:
mean:
- - - 0.485
- - - 0.456
- - - 0.406
std:
- - - 0.229
- - - 0.224
- - - 0.225
observation.images.head:
mean:
- - - 0.485
- - - 0.456
- - - 0.406
std:
- - - 0.229
- - - 0.224
- - - 0.225
policy:
name: diffusion
n_obs_steps: 2
horizon: 16
n_action_steps: 8
input_shapes:
observation.images.gripper:
- 3
- 320
- 320
observation.images.head:
- 3
- 320
- 320
observation.state:
- ${env.state_dim}
output_shapes:
action:
- ${env.action_dim}
input_normalization_modes:
observation.images.gripper: mean_std
observation.images.head: mean_std
observation.state: min_max
output_normalization_modes:
action: min_max
vision_backbone: resnet18
crop_shape:
- 320
- 320
crop_is_random: false
pretrained_backbone_weights: null
use_group_norm: true
spatial_softmax_num_keypoints: 32
down_dims:
- 512
- 1024
- 2048
kernel_size: 5
n_groups: 8
diffusion_step_embed_dim: 128
use_film_scale_modulation: true
noise_scheduler_type: DDPM
num_train_timesteps: 100
beta_schedule: squaredcos_cap_v2
beta_start: 0.0001
beta_end: 0.02
prediction_type: epsilon
clip_sample: true
clip_sample_range: 1.0
num_inference_steps: 100
do_mask_loss_for_padding: false
|