Pretrain-Dataset / scripts /split_dataset.py
jjw0126's picture
Batch upload - split_dataset.py
7fe963c verified
raw
history blame
8.99 kB
import os
import sys
import argparse
import math
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
from megatron.core.datasets.indexed_dataset import (
IndexedDataset,
IndexedDatasetBuilder,
get_bin_path,
get_idx_path,
)
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title="input data")
group.add_argument(
"--input-prefix",
type=str,
required=True,
help="Path to binary input file without suffix",
)
group = parser.add_argument_group(title="output data")
group.add_argument(
"--output-dir",
type=str,
required=True,
help="Directory to output split files",
)
group.add_argument(
"--output-prefix",
type=str,
default="split",
help="Prefix for output files (default: split)",
)
group = parser.add_argument_group(title="split options")
group.add_argument(
"--num-splits",
type=int,
default=None,
help="Number of splits to create. If not provided, will be determined by max-split-size-gb",
)
group.add_argument(
"--max-split-size-gb",
type=float,
default=40.0,
help="Maximum size of each split in GB (default: 40.0)",
)
group.add_argument(
"--split-by-documents",
action="store_true",
help="Split by documents instead of sequences (default: split by sequences)",
)
group = parser.add_argument_group(title="miscellaneous")
group.add_argument(
"--multimodal",
action="store_true",
help="Whether the dataset is assumed to be multimodal"
)
args = parser.parse_args()
# Check input file exists
bin_path = get_bin_path(args.input_prefix)
idx_path = get_idx_path(args.input_prefix)
assert os.path.isfile(bin_path), f"ERROR: {bin_path} does not exist"
assert os.path.isfile(idx_path), f"ERROR: {idx_path} does not exist"
# Check output directory exists
assert os.path.isdir(args.output_dir), f"ERROR: {args.output_dir} is not a directory or does not exist"
return args
def split_by_sequences(dataset, output_dir, output_prefix, multimodal, max_split_size_bytes, num_splits=None):
"""Split dataset by sequences, respecting max_split_size_bytes."""
total_sequences = len(dataset)
if total_sequences == 0:
print("Warning: No sequences found in dataset")
return
print(f"Total sequences: {total_sequences}")
split_idx = 0
start_seq_idx = 0
while start_seq_idx < total_sequences:
print(f"Creating split {split_idx + 1}...")
# Create output paths
split_prefix = os.path.join(output_dir, f"{output_prefix}_{split_idx:03d}")
bin_path = get_bin_path(split_prefix)
idx_path = get_idx_path(split_prefix)
# Create builder
builder = IndexedDatasetBuilder(bin_path, dtype=dataset.index.dtype, multimodal=multimodal)
current_split_size = 0
sequences_in_split = 0
# Determine target number of sequences for this split if num_splits is provided
if num_splits is not None:
sequences_per_split = math.ceil(total_sequences / num_splits)
end_seq_idx_target = min(start_seq_idx + sequences_per_split, total_sequences)
else:
end_seq_idx_target = total_sequences
for seq_idx in range(start_seq_idx, end_seq_idx_target):
sequence_pointer, sequence_length, sequence_mode = dataset.index[seq_idx]
sequence_size = sequence_length * dataset.index.dtype_size
if sequences_in_split > 0 and current_split_size + sequence_size > max_split_size_bytes:
break
sequence = dataset.bin_reader.read(
dtype=dataset.index.dtype, count=sequence_length, offset=sequence_pointer
)
import torch
tensor = torch.from_numpy(sequence.copy())
mode = sequence_mode if multimodal else 0
builder.add_item(tensor, mode)
current_split_size += sequence_size
sequences_in_split += 1
# Finalize the split
builder.finalize(idx_path)
end_seq_idx = start_seq_idx + sequences_in_split
print(f"Split {split_idx + 1} completed: sequences {start_seq_idx} to {end_seq_idx - 1} ({sequences_in_split} sequences), size: {current_split_size / (1024**3):.2f} GB")
start_seq_idx = end_seq_idx
split_idx += 1
def split_by_documents(dataset, output_dir, output_prefix, multimodal, max_split_size_bytes, num_splits=None):
"""Split dataset by documents, respecting max_split_size_bytes."""
document_indices = dataset.document_indices
total_documents = len(document_indices) - 1
if total_documents == 0:
print("Warning: No documents found in dataset")
return
print(f"Total documents: {total_documents}")
split_idx = 0
start_doc_idx = 0
while start_doc_idx < total_documents:
print(f"Creating split {split_idx + 1}...")
split_prefix = os.path.join(output_dir, f"{output_prefix}_{split_idx:03d}")
bin_path = get_bin_path(split_prefix)
idx_path = get_idx_path(split_prefix)
builder = IndexedDatasetBuilder(bin_path, dtype=dataset.index.dtype, multimodal=multimodal)
current_split_size = 0
documents_in_split = 0
if num_splits is not None:
docs_per_split = math.ceil(total_documents / num_splits)
end_doc_idx_target = min(start_doc_idx + docs_per_split, total_documents)
else:
end_doc_idx_target = total_documents
for doc_idx in range(start_doc_idx, end_doc_idx_target):
doc_start_seq = document_indices[doc_idx]
doc_end_seq = document_indices[doc_idx + 1]
doc_size = 0
for seq_idx in range(doc_start_seq, doc_end_seq):
_, sequence_length, _ = dataset.index[seq_idx]
doc_size += sequence_length * dataset.index.dtype_size
if documents_in_split > 0 and current_split_size + doc_size > max_split_size_bytes:
break
for seq_idx in range(doc_start_seq, doc_end_seq):
sequence_pointer, sequence_length, sequence_mode = dataset.index[seq_idx]
sequence = dataset.bin_reader.read(
dtype=dataset.index.dtype, count=sequence_length, offset=sequence_pointer
)
import torch
tensor = torch.from_numpy(sequence.copy())
mode = sequence_mode if multimodal else 0
builder.add_item(tensor, mode)
builder.end_document()
current_split_size += doc_size
documents_in_split += 1
builder.finalize(idx_path)
end_doc_idx = start_doc_idx + documents_in_split
print(f"Split {split_idx + 1} completed: documents {start_doc_idx} to {end_doc_idx - 1} ({documents_in_split} documents), size: {current_split_size / (1024**3):.2f} GB")
start_doc_idx = end_doc_idx
split_idx += 1
def main():
args = get_args()
print(f"Loading dataset from {args.input_prefix}")
dataset = IndexedDataset(args.input_prefix, multimodal=args.multimodal)
print(f"Dataset loaded: {len(dataset)} sequences")
if args.multimodal:
print(f"Multimodal dataset with {len(dataset.document_indices) - 1} documents")
else:
print(f"Standard dataset with {len(dataset.document_indices) - 1} documents")
max_split_size_bytes = args.max_split_size_gb * 1024 * 1024 * 1024
# If num_splits is provided, check if it respects the max size.
if args.num_splits is not None:
input_bin_path = get_bin_path(args.input_prefix)
total_size_bytes = os.path.getsize(input_bin_path)
size_per_split = total_size_bytes / args.num_splits
if size_per_split > max_split_size_bytes:
print(f"Warning: With {args.num_splits} splits, the average split size would be {size_per_split / (1024**3):.2f} GB, which is larger than the specified max of {args.max_split_size_gb} GB.")
print("The script will create more splits if necessary to respect the size limit.")
if args.split_by_documents:
split_by_documents(dataset, args.output_dir, args.output_prefix, args.multimodal, max_split_size_bytes, args.num_splits)
else:
split_by_sequences(dataset, args.output_dir, args.output_prefix, args.multimodal, max_split_size_bytes, args.num_splits)
print("Dataset splitting completed!")
if __name__ == '__main__':
main()