File size: 8,985 Bytes
7fe963c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
import os
import sys
import argparse
import math

sys.path.append(
    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)

from megatron.core.datasets.indexed_dataset import (
    IndexedDataset,
    IndexedDatasetBuilder,
    get_bin_path,
    get_idx_path,
)


def get_args():
    parser = argparse.ArgumentParser()

    group = parser.add_argument_group(title="input data")
    group.add_argument(
        "--input-prefix",
        type=str,
        required=True,
        help="Path to binary input file without suffix",
    )

    group = parser.add_argument_group(title="output data")
    group.add_argument(
        "--output-dir",
        type=str,
        required=True,
        help="Directory to output split files",
    )
    group.add_argument(
        "--output-prefix",
        type=str,
        default="split",
        help="Prefix for output files (default: split)",
    )

    group = parser.add_argument_group(title="split options")
    group.add_argument(
        "--num-splits",
        type=int,
        default=None,
        help="Number of splits to create. If not provided, will be determined by max-split-size-gb",
    )
    group.add_argument(
        "--max-split-size-gb",
        type=float,
        default=40.0,
        help="Maximum size of each split in GB (default: 40.0)",
    )
    group.add_argument(
        "--split-by-documents",
        action="store_true",
        help="Split by documents instead of sequences (default: split by sequences)",
    )

    group = parser.add_argument_group(title="miscellaneous")
    group.add_argument(
        "--multimodal",
        action="store_true",
        help="Whether the dataset is assumed to be multimodal"
    )

    args = parser.parse_args()

    # Check input file exists
    bin_path = get_bin_path(args.input_prefix)
    idx_path = get_idx_path(args.input_prefix)
    assert os.path.isfile(bin_path), f"ERROR: {bin_path} does not exist"
    assert os.path.isfile(idx_path), f"ERROR: {idx_path} does not exist"

    # Check output directory exists
    assert os.path.isdir(args.output_dir), f"ERROR: {args.output_dir} is not a directory or does not exist"

    return args


def split_by_sequences(dataset, output_dir, output_prefix, multimodal, max_split_size_bytes, num_splits=None):
    """Split dataset by sequences, respecting max_split_size_bytes."""
    total_sequences = len(dataset)
    if total_sequences == 0:
        print("Warning: No sequences found in dataset")
        return

    print(f"Total sequences: {total_sequences}")
    
    split_idx = 0
    start_seq_idx = 0

    while start_seq_idx < total_sequences:
        print(f"Creating split {split_idx + 1}...")
        
        # Create output paths
        split_prefix = os.path.join(output_dir, f"{output_prefix}_{split_idx:03d}")
        bin_path = get_bin_path(split_prefix)
        idx_path = get_idx_path(split_prefix)
        
        # Create builder
        builder = IndexedDatasetBuilder(bin_path, dtype=dataset.index.dtype, multimodal=multimodal)
        
        current_split_size = 0
        sequences_in_split = 0
        
        # Determine target number of sequences for this split if num_splits is provided
        if num_splits is not None:
            sequences_per_split = math.ceil(total_sequences / num_splits)
            end_seq_idx_target = min(start_seq_idx + sequences_per_split, total_sequences)
        else:
            end_seq_idx_target = total_sequences

        for seq_idx in range(start_seq_idx, end_seq_idx_target):
            sequence_pointer, sequence_length, sequence_mode = dataset.index[seq_idx]
            sequence_size = sequence_length * dataset.index.dtype_size
            
            if sequences_in_split > 0 and current_split_size + sequence_size > max_split_size_bytes:
                break
                
            sequence = dataset.bin_reader.read(
                dtype=dataset.index.dtype, count=sequence_length, offset=sequence_pointer
            )
            
            import torch
            tensor = torch.from_numpy(sequence.copy())
            mode = sequence_mode if multimodal else 0
            builder.add_item(tensor, mode)
            
            current_split_size += sequence_size
            sequences_in_split += 1
        
        # Finalize the split
        builder.finalize(idx_path)
        end_seq_idx = start_seq_idx + sequences_in_split
        print(f"Split {split_idx + 1} completed: sequences {start_seq_idx} to {end_seq_idx - 1} ({sequences_in_split} sequences), size: {current_split_size / (1024**3):.2f} GB")
        
        start_seq_idx = end_seq_idx
        split_idx += 1


def split_by_documents(dataset, output_dir, output_prefix, multimodal, max_split_size_bytes, num_splits=None):
    """Split dataset by documents, respecting max_split_size_bytes."""
    document_indices = dataset.document_indices
    total_documents = len(document_indices) - 1
    
    if total_documents == 0:
        print("Warning: No documents found in dataset")
        return
        
    print(f"Total documents: {total_documents}")
    
    split_idx = 0
    start_doc_idx = 0

    while start_doc_idx < total_documents:
        print(f"Creating split {split_idx + 1}...")
        
        split_prefix = os.path.join(output_dir, f"{output_prefix}_{split_idx:03d}")
        bin_path = get_bin_path(split_prefix)
        idx_path = get_idx_path(split_prefix)
        
        builder = IndexedDatasetBuilder(bin_path, dtype=dataset.index.dtype, multimodal=multimodal)
        
        current_split_size = 0
        documents_in_split = 0
        
        if num_splits is not None:
            docs_per_split = math.ceil(total_documents / num_splits)
            end_doc_idx_target = min(start_doc_idx + docs_per_split, total_documents)
        else:
            end_doc_idx_target = total_documents
            
        for doc_idx in range(start_doc_idx, end_doc_idx_target):
            doc_start_seq = document_indices[doc_idx]
            doc_end_seq = document_indices[doc_idx + 1]
            
            doc_size = 0
            for seq_idx in range(doc_start_seq, doc_end_seq):
                _, sequence_length, _ = dataset.index[seq_idx]
                doc_size += sequence_length * dataset.index.dtype_size
            
            if documents_in_split > 0 and current_split_size + doc_size > max_split_size_bytes:
                break

            for seq_idx in range(doc_start_seq, doc_end_seq):
                sequence_pointer, sequence_length, sequence_mode = dataset.index[seq_idx]
                sequence = dataset.bin_reader.read(
                    dtype=dataset.index.dtype, count=sequence_length, offset=sequence_pointer
                )
                
                import torch
                tensor = torch.from_numpy(sequence.copy())
                mode = sequence_mode if multimodal else 0
                builder.add_item(tensor, mode)
            
            builder.end_document()
            current_split_size += doc_size
            documents_in_split += 1
            
        builder.finalize(idx_path)
        end_doc_idx = start_doc_idx + documents_in_split
        print(f"Split {split_idx + 1} completed: documents {start_doc_idx} to {end_doc_idx - 1} ({documents_in_split} documents), size: {current_split_size / (1024**3):.2f} GB")
        
        start_doc_idx = end_doc_idx
        split_idx += 1


def main():
    args = get_args()
    
    print(f"Loading dataset from {args.input_prefix}")
    dataset = IndexedDataset(args.input_prefix, multimodal=args.multimodal)
    
    print(f"Dataset loaded: {len(dataset)} sequences")
    if args.multimodal:
        print(f"Multimodal dataset with {len(dataset.document_indices) - 1} documents")
    else:
        print(f"Standard dataset with {len(dataset.document_indices) - 1} documents")

    max_split_size_bytes = args.max_split_size_gb * 1024 * 1024 * 1024
    
    # If num_splits is provided, check if it respects the max size.
    if args.num_splits is not None:
        input_bin_path = get_bin_path(args.input_prefix)
        total_size_bytes = os.path.getsize(input_bin_path)
        size_per_split = total_size_bytes / args.num_splits
        if size_per_split > max_split_size_bytes:
            print(f"Warning: With {args.num_splits} splits, the average split size would be {size_per_split / (1024**3):.2f} GB, which is larger than the specified max of {args.max_split_size_gb} GB.")
            print("The script will create more splits if necessary to respect the size limit.")
            
    if args.split_by_documents:
        split_by_documents(dataset, args.output_dir, args.output_prefix, args.multimodal, max_split_size_bytes, args.num_splits)
    else:
        split_by_sequences(dataset, args.output_dir, args.output_prefix, args.multimodal, max_split_size_bytes, args.num_splits)
    
    print("Dataset splitting completed!")


if __name__ == '__main__':
    main()