Subtasks in episodes

#2
by 229nagibator229 - opened

Thank you for open-sourcing the dataset!
I have a question about subtasks in episodes. Can low level english language instruction change during the episode?

Galaxea org

Thank you for open-sourcing the dataset!
I have a question about subtasks in episodes. Can low level english language instruction change during the episode?

In our dataset, each subtask was manually annotated with a low-level instruction in Chinese, along with its start and end time within the episode. Then during the dataset processing pipeline, we used DeepSeek-V3 to translate the instructions into English and assign them to each observation–action step.

Thank you for reply!
So, as I understood, the low-level instruction can change during the episode? How to obtain the start and end time for a specific low-level instruction in episode?
I think currently there is only one low-level instruction in episode, I checked all 5 parts of dataset you provided using the code below and it seems like low-level instruction not changing in episodes

import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.python.lib.io import file_io
import tensorflow_io as tfio
from tqdm import tqdm
import os

def parse_subtasks(episode):
    episode_subtasks = set()
    episode_instructions = episode['language_instruction'].numpy().tolist()
    for instruction in episode_instructions:
        subtask = instruction.decode('utf-8').split('@')[-1]
        episode_subtasks.update([subtask])
    return episode_subtasks

def _broadcast_metadata_rlds(i: tf.Tensor, traj: dict) -> dict:
    """
    In the RLDS format, each trajectory has some top-level metadata that is explicitly separated out, and a "steps"
    entry. This function moves the "steps" entry to the top level, broadcasting any metadata to the length of the
    trajectory. This function also adds the extra metadata fields `_len`, `_traj_index`, and `_frame_index`.

    NOTE: adapted from DLimp library https://github.com/kvablack/dlimp/
    """
    steps = traj.pop("steps")

    traj_len = tf.shape(tf.nest.flatten(steps)[0])[0]

    # broadcast metadata to the length of the trajectory
    metadata = tf.nest.map_structure(lambda x: tf.repeat(x, traj_len), traj)

    # put steps back in
    assert "traj_metadata" not in steps
    traj = {**steps, "traj_metadata": metadata}

    assert "_len" not in traj
    assert "_traj_index" not in traj
    assert "_frame_index" not in traj
    traj["_len"] = tf.repeat(traj_len, traj_len)
    traj["_traj_index"] = tf.repeat(i, traj_len)
    traj["_frame_index"] = tf.range(traj_len)

    return traj

ds_builder = tfds.builder_from_directory(str("Galaxea-Open-World-Dataset/rlds/part5_r1_lite/1.0.0/"))
dataset = ds_builder.as_dataset(
    split="all",
    decoders={"steps": tfds.decode.SkipDecoding()},
)
dataset_info = ds_builder.info
ds_length = len(dataset)
dataset = dataset.take(ds_length)
# # "flatten" the dataset as such we can apply trajectory level map() easily
# # each [obs][key] has a shape of (frame_size, ...)
dataset = dataset.enumerate().map(_broadcast_metadata_rlds)





dataset_iterator = iter(dataset)
for ep_idx in tqdm(range(ds_length)):
    episode = next(dataset_iterator)
    subtasks = parse_subtasks(episode)
    if len(subtasks) == 1:
        continue
    else:
        print(f"There is more than one subtask in episode {ep_idx}")
Galaxea org

Thank you for reply!
So, as I understood, the low-level instruction can change during the episode? How to obtain the start and end time for a specific low-level instruction in episode?
I think currently there is only one low-level instruction in episode, I checked all 5 parts of dataset you provided using the code below and it seems like low-level instruction not changing in episodes

import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.python.lib.io import file_io
import tensorflow_io as tfio
from tqdm import tqdm
import os

def parse_subtasks(episode):
    episode_subtasks = set()
    episode_instructions = episode['language_instruction'].numpy().tolist()
    for instruction in episode_instructions:
        subtask = instruction.decode('utf-8').split('@')[-1]
        episode_subtasks.update([subtask])
    return episode_subtasks

def _broadcast_metadata_rlds(i: tf.Tensor, traj: dict) -> dict:
    """
    In the RLDS format, each trajectory has some top-level metadata that is explicitly separated out, and a "steps"
    entry. This function moves the "steps" entry to the top level, broadcasting any metadata to the length of the
    trajectory. This function also adds the extra metadata fields `_len`, `_traj_index`, and `_frame_index`.

    NOTE: adapted from DLimp library https://github.com/kvablack/dlimp/
    """
    steps = traj.pop("steps")

    traj_len = tf.shape(tf.nest.flatten(steps)[0])[0]

    # broadcast metadata to the length of the trajectory
    metadata = tf.nest.map_structure(lambda x: tf.repeat(x, traj_len), traj)

    # put steps back in
    assert "traj_metadata" not in steps
    traj = {**steps, "traj_metadata": metadata}

    assert "_len" not in traj
    assert "_traj_index" not in traj
    assert "_frame_index" not in traj
    traj["_len"] = tf.repeat(traj_len, traj_len)
    traj["_traj_index"] = tf.repeat(i, traj_len)
    traj["_frame_index"] = tf.range(traj_len)

    return traj

ds_builder = tfds.builder_from_directory(str("Galaxea-Open-World-Dataset/rlds/part5_r1_lite/1.0.0/"))
dataset = ds_builder.as_dataset(
    split="all",
    decoders={"steps": tfds.decode.SkipDecoding()},
)
dataset_info = ds_builder.info
ds_length = len(dataset)
dataset = dataset.take(ds_length)
# # "flatten" the dataset as such we can apply trajectory level map() easily
# # each [obs][key] has a shape of (frame_size, ...)
dataset = dataset.enumerate().map(_broadcast_metadata_rlds)





dataset_iterator = iter(dataset)
for ep_idx in tqdm(range(ds_length)):
    episode = next(dataset_iterator)
    subtasks = parse_subtasks(episode)
    if len(subtasks) == 1:
        continue
    else:
        print(f"There is more than one subtask in episode {ep_idx}")

Thanks for your careful consideration.

In our dataset, the episode you get in RLDS is already an subtask in the origin data.
As shown in our dataset schema, format of language instruction is "high level"@"low level chinese"@"low level english".
And you can also use our example to visualize some episodes to prove this.

Hope my reply could help solve your confusion.

lllliuxiao23 changed discussion status to closed

Sign up or log in to comment