Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
- train/math-sampled/split_122-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_122-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_122-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_135-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_135-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_135-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_151-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_151-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_151-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_152-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_152-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_152-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_155-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_158-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_158-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_158-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_16-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_16-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_16-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_174-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_18-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
- train/math-sampled/split_186-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_186-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_186-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_197-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_197-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_197-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_21-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
- train/math-sampled/split_266-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_31-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_31-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_31-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_312-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_315-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_315-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_315-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_316-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_316-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_316-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_321-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_321-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_321-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_339-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_339-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_339-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_347-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_380-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_380-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_380-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cbfeb6625a967edeecec93f157a5551532668b01305b2f497b6f615d93afd905
|
3 |
+
size 30151902
|
train/math-sampled/split_122-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 30777752, "hashes": {}}, "samples": 16000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 6635171, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_122-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 7542629, "total_tokens_skipped": 0, "percentiles": {"0th": 118, "10th": 279, "20th": 324, "30th": 368, "40th": 409, "50th": 447, "60th": 487, "70th": 529, "80th": 589, "90th": 680, "95th": 792, "99th": 1075, "100th": 1188}}
|
train/math-sampled/split_122-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_135-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54352168, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11634614, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_135-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13303717, "total_tokens_skipped": 0, "percentiles": {"0th": 129, "10th": 257, "20th": 301, "30th": 341, "40th": 381, "50th": 420, "60th": 457, "70th": 501, "80th": 552, "90th": 644, "95th": 751, "99th": 1076, "100th": 1219}}
|
train/math-sampled/split_135-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_151-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 55919520, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12001469, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_151-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13695432, "total_tokens_skipped": 0, "percentiles": {"0th": 123, "10th": 265, "20th": 311, "30th": 354, "40th": 393, "50th": 432, "60th": 471, "70th": 514, "80th": 569, "90th": 665, "95th": 771, "99th": 1078, "100th": 1230}}
|
train/math-sampled/split_151-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_152-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 56004341, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12026248, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_152-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13716629, "total_tokens_skipped": 0, "percentiles": {"0th": 126, "10th": 266, "20th": 312, "30th": 356, "40th": 395, "50th": 434, "60th": 474, "70th": 516, "80th": 569, "90th": 660, "95th": 766, "99th": 1079, "100th": 1209}}
|
train/math-sampled/split_152-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_155-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27c3fadbbd8fe3ee435c44bcd4055f2853029bd01090be14c2af953f639fca12
|
3 |
+
size 54259689
|
train/math-sampled/split_158-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 55732117, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11973992, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_158-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13648581, "total_tokens_skipped": 0, "percentiles": {"0th": 130, "10th": 265, "20th": 310, "30th": 352, "40th": 392, "50th": 430, "60th": 470, "70th": 514, "80th": 569, "90th": 661, "95th": 766, "99th": 1079, "100th": 1221}}
|
train/math-sampled/split_158-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_16-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67105606, "hashes": {}}, "samples": 26094, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 19823572, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17431204, "hashes": {}}, "samples": 3906, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5320186, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_16-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 20858615, "total_tokens_skipped": 0, "percentiles": {"0th": 24, "10th": 29, "20th": 29, "30th": 29, "40th": 579, "50th": 838, "60th": 939, "70th": 1038, "80th": 1162, "90th": 1358, "95th": 1553, "99th": 2003, "100th": 2924}}
|
train/math-sampled/split_16-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_174-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96bf2e8e6b9dfc3ad76b9f524def78c72c312de4f2ff1eb56d01725a271db06f
|
3 |
+
size 56450994
|
train/math-sampled/split_18-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:48c8754cab7289a6bccc898429140e6026f8a5878556af1e5f41bf2cc20b538b
|
3 |
+
size 16273194
|
train/math-sampled/split_186-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54299323, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11619061, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_186-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13290504, "total_tokens_skipped": 0, "percentiles": {"0th": 108, "10th": 257, "20th": 301, "30th": 341, "40th": 382, "50th": 419, "60th": 456, "70th": 499, "80th": 553, "90th": 640, "95th": 750, "99th": 1077, "100th": 1231}}
|
train/math-sampled/split_186-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_197-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54186241, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11594148, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_197-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13262244, "total_tokens_skipped": 0, "percentiles": {"0th": 97, "10th": 256, "20th": 300, "30th": 340, "40th": 380, "50th": 418, "60th": 456, "70th": 499, "80th": 551, "90th": 643, "95th": 746, "99th": 1078, "100th": 1230}}
|
train/math-sampled/split_197-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_21-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e11399b3d5b4e8dd26be04958be1b44e6b68afcc58d029807ad01c4a2599e090
|
3 |
+
size 14465365
|
train/math-sampled/split_266-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:46f3e0cae4029ff77ade901a1ee977e888be30f093e564de5e83c35c344119d3
|
3 |
+
size 22085159
|
train/math-sampled/split_31-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107353, "hashes": {}}, "samples": 15123, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20419278, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 66160975, "hashes": {}}, "samples": 14877, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 20176445, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_31-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 33009475, "total_tokens_skipped": 0, "percentiles": {"0th": 331, "10th": 783, "20th": 860, "30th": 920, "40th": 978, "50th": 1039, "60th": 1107, "70th": 1190, "80th": 1303, "90th": 1492, "95th": 1692, "99th": 2167, "100th": 2912}}
|
train/math-sampled/split_31-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_312-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab8f812a0a62cb08310e793240336f8a9f4deac3b712be34f9bcfc82603a1610
|
3 |
+
size 54365996
|
train/math-sampled/split_315-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 50318076, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11142838, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_315-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 12294924, "total_tokens_skipped": 0, "percentiles": {"0th": 76, "10th": 272, "20th": 304, "30th": 331, "40th": 356, "50th": 380, "60th": 408, "70th": 442, "80th": 489, "90th": 578, "95th": 672, "99th": 1025, "100th": 1338}}
|
train/math-sampled/split_315-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_316-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54162986, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11563354, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_316-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13256422, "total_tokens_skipped": 0, "percentiles": {"0th": 133, "10th": 257, "20th": 300, "30th": 340, "40th": 379, "50th": 417, "60th": 455, "70th": 498, "80th": 550, "90th": 641, "95th": 745, "99th": 1078, "100th": 1214}}
|
train/math-sampled/split_316-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_321-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54359551, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11632545, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_321-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13305536, "total_tokens_skipped": 0, "percentiles": {"0th": 101, "10th": 258, "20th": 302, "30th": 343, "40th": 382, "50th": 419, "60th": 457, "70th": 500, "80th": 554, "90th": 645, "95th": 747, "99th": 1072, "100th": 1219}}
|
train/math-sampled/split_321-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_339-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54363238, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11624067, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_339-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13306488, "total_tokens_skipped": 0, "percentiles": {"0th": 115, "10th": 257, "20th": 301, "30th": 342, "40th": 382, "50th": 419, "60th": 458, "70th": 501, "80th": 555, "90th": 644, "95th": 747, "99th": 1076, "100th": 1230}}
|
train/math-sampled/split_339-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_347-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:009c3eb39c30f0bea80f366bc0cc93088a28933a010d01495d88356c6e0f2b7c
|
3 |
+
size 54328372
|
train/math-sampled/split_380-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 50899597, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11226546, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_380-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 12440343, "total_tokens_skipped": 0, "percentiles": {"0th": 119, "10th": 270, "20th": 304, "30th": 332, "40th": 359, "50th": 384, "60th": 413, "70th": 449, "80th": 500, "90th": 589, "95th": 683, "99th": 1037, "100th": 1341}}
|
train/math-sampled/split_380-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|