Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- train/math-sampled/split_110-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_110-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_110-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_112-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_112-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_112-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_131-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_131-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_131-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_132-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_147-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_171-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_173-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_183-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_189-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_192-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_256-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_266-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_266-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_266-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +1 -0
- train/math-sampled/split_282-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_29-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_308-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_327-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_327-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_327-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_343-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
- train/math-sampled/split_345-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_345-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_345-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_351-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_351-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_351-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_359-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_359-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_359-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_381-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_381-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_381-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_387-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_387-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_387-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_394-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
- train/math-sampled/split_394-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
- train/math-sampled/split_394-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
- train/math-sampled/split_4-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
- train/math-sampled/split_403-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
train/math-sampled/split_110-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57549405, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12410384, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_110-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 14102767, "total_tokens_skipped": 0, "percentiles": {"0th": 126, "10th": 279, "20th": 324, "30th": 366, "40th": 407, "50th": 445, "60th": 485, "70th": 530, "80th": 585, "90th": 679, "95th": 790, "99th": 1076, "100th": 1240}}
|
train/math-sampled/split_110-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_112-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57877774, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12452532, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_112-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 14184836, "total_tokens_skipped": 0, "percentiles": {"0th": 123, "10th": 279, "20th": 326, "30th": 371, "40th": 411, "50th": 448, "60th": 487, "70th": 531, "80th": 587, "90th": 682, "95th": 794, "99th": 1082, "100th": 1203}}
|
train/math-sampled/split_112-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108302, "hashes": {}}, "samples": 20743, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 23122075, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 30151902, "hashes": {}}, "samples": 9257, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10372483, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 23955063, "total_tokens_skipped": 0, "percentiles": {"0th": 125, "10th": 495, "20th": 572, "30th": 631, "40th": 685, "50th": 743, "60th": 808, "70th": 887, "80th": 996, "90th": 1186, "95th": 1375, "99th": 1807, "100th": 2440}}
|
train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_131-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57571901, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12398806, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_131-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 14108394, "total_tokens_skipped": 0, "percentiles": {"0th": 126, "10th": 277, "20th": 324, "30th": 367, "40th": 406, "50th": 446, "60th": 486, "70th": 530, "80th": 587, "90th": 680, "95th": 786, "99th": 1078, "100th": 1208}}
|
train/math-sampled/split_131-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_132-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f5db4acf3e070fefe49d602f31654e6746f684cbe1a2aab657fabac28c47274
|
3 |
+
size 57812910
|
train/math-sampled/split_147-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eaf010b18d0153e3f2046b96a76bec41fe372f3fcce3284ae65a4f1787c2341d
|
3 |
+
size 67106393
|
train/math-sampled/split_171-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d14c90b330080efe809fd911f52b7977d1dfae95740dbe638f22af3b89bd798
|
3 |
+
size 54772644
|
train/math-sampled/split_173-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5bd4a898783bea8107353361aca0aceb345bdcd75c00e866839c3693fa1ac5ef
|
3 |
+
size 55933655
|
train/math-sampled/split_183-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8ca7625fcc685c8591ae190c8d2d34bacadf6c6cbf3c289929d17a036e4848eb
|
3 |
+
size 54338610
|
train/math-sampled/split_189-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb46ab9cc7ce1da72f52e43e05ec555bc8d765dc23bdadad18636ef517608363
|
3 |
+
size 54249169
|
train/math-sampled/split_192-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7e573d22895e2222f21ffa56b157d2158fa7320d47bff3572f41f4b4e2a02681
|
3 |
+
size 54377674
|
train/math-sampled/split_256-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b766d465969d8d57552d67c0b1321ec6631c7d4ce22bc998c06cb7cbbfa779e
|
3 |
+
size 53287424
|
train/math-sampled/split_266-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 22085159, "hashes": {}}, "samples": 1305, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 6934717, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_266-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 5506933, "total_tokens_skipped": 0, "percentiles": {"0th": 68, "10th": 190, "20th": 393, "30th": 1860, "40th": 3007, "50th": 4106, "60th": 5272, "70th": 6793, "80th": 8190, "90th": 8190, "95th": 8191, "99th": 8191, "100th": 8191}}
|
train/math-sampled/split_266-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"0k": [2, 8, 9, 10, 12, 18, 20, 21, 29, 30, 33, 35, 40, 41, 47, 59, 61, 73, 78, 79, 81, 82, 83, 85, 89, 90, 92, 97, 98, 100, 106, 110, 116, 117, 120, 125, 128, 132, 134, 137, 138, 143, 145, 147, 153, 154, 155, 156, 157, 158, 164, 165, 166, 186, 190, 196, 197, 202, 209, 214, 216, 220, 221, 224, 230, 232, 236, 237, 243, 245, 252, 256, 258, 261, 263, 267, 268, 271, 272, 276, 282, 284, 289, 296, 300, 307, 308, 312, 313, 315, 316, 318, 324, 329, 336, 337, 341, 345, 347, 351, 356, 359, 364, 367, 374, 387, 389, 391, 392, 399, 400, 402, 405, 406, 412, 421, 429, 434, 436, 439, 441, 445, 449, 454, 455, 464, 466, 467, 477, 486, 487, 491, 495, 500, 505, 506, 515, 520, 528, 529, 533, 538, 562, 566, 568, 570, 572, 575, 576, 577, 591, 601, 604, 605, 613, 614, 615, 620, 627, 650, 654, 664, 665, 674, 676, 681, 692, 695, 705, 707, 710, 713, 715, 718, 721, 722, 726, 735, 737, 741, 743, 744, 751, 765, 768, 770, 771, 776, 780, 781, 784, 787, 792, 803, 804, 817, 818, 819, 822, 823, 824, 832, 839, 849, 850, 851, 852, 862, 868, 870, 871, 872, 875, 876, 877, 882, 886, 894, 896, 897, 899, 901, 902, 906, 911, 912, 917, 923, 926, 928, 936, 942, 949, 958, 959, 961, 964, 967, 968, 972, 974, 983, 988, 989, 991, 995, 1008, 1032, 1035, 1036, 1039, 1041, 1048, 1056, 1058, 1059, 1062, 1067, 1069, 1070, 1075, 1081, 1082, 1083, 1113, 1114, 1123, 1124, 1127, 1129, 1130, 1131, 1132, 1135, 1139, 1140, 1150, 1156, 1162, 1164, 1166, 1168, 1169, 1182, 1184, 1186, 1188, 1189, 1194, 1203, 1207, 1211, 1214, 1217, 1221, 1233, 1237, 1243, 1252, 1256, 1268, 1269, 1279, 1280, 1281, 1282, 1287, 1288, 1289, 1291, 1292, 1304], "1k": [6, 11, 24, 38, 42, 44, 63, 70, 71, 88, 95, 124, 130, 144, 170, 172, 201, 208, 235, 253, 262, 291, 310, 317, 320, 326, 346, 349, 358, 373, 376, 383, 408, 411, 427, 432, 438, 442, 446, 456, 470, 484, 507, 508, 514, 523, 527, 536, 554, 574, 580, 585, 587, 592, 594, 596, 603, 621, 675, 679, 709, 719, 725, 738, 752, 760, 791, 838, 873, 898, 931, 938, 987, 994, 999, 1002, 1014, 1029, 1031, 1037, 1055, 1106, 1110, 1116, 1120, 1122, 1138, 1143, 1152, 1165, 1187, 1192, 1210, 1227, 1232, 1244, 1246, 1273, 1285, 1296], "2k": [22, 37, 45, 51, 54, 67, 75, 84, 91, 105, 111, 139, 141, 148, 151, 180, 189, 206, 219, 260, 273, 287, 298, 309, 332, 333, 334, 339, 371, 382, 390, 393, 413, 447, 452, 461, 463, 475, 476, 482, 509, 551, 556, 567, 581, 590, 648, 662, 667, 670, 677, 689, 696, 702, 714, 717, 730, 734, 745, 749, 758, 773, 789, 793, 797, 807, 825, 855, 867, 904, 914, 951, 965, 966, 971, 984, 1000, 1009, 1021, 1043, 1045, 1052, 1066, 1073, 1078, 1094, 1099, 1105, 1111, 1128, 1163, 1173, 1175, 1178, 1185, 1199, 1204, 1212, 1224, 1229, 1240, 1249, 1254, 1257, 1260, 1267, 1274, 1295, 1302], "3k": [0, 1, 7, 16, 58, 86, 96, 102, 115, 152, 168, 183, 184, 185, 195, 204, 233, 241, 242, 249, 257, 294, 299, 303, 311, 319, 335, 340, 355, 361, 368, 384, 386, 396, 401, 407, 428, 435, 472, 483, 498, 499, 501, 522, 526, 532, 541, 544, 563, 569, 578, 595, 600, 607, 608, 619, 623, 629, 633, 644, 661, 663, 680, 683, 690, 711, 712, 720, 736, 756, 764, 782, 794, 799, 820, 830, 836, 856, 864, 865, 869, 889, 893, 905, 927, 929, 930, 944, 953, 955, 957, 969, 973, 986, 1005, 1007, 1010, 1015, 1044, 1050, 1053, 1060, 1064, 1071, 1089, 1091, 1102, 1108, 1125, 1149, 1153, 1195, 1198, 1216, 1218, 1242, 1255, 1258, 1270, 1276, 1294], "4k": [3, 5, 15, 17, 25, 32, 39, 53, 55, 60, 69, 76, 87, 113, 122, 127, 163, 173, 179, 205, 207, 217, 225, 231, 239, 251, 255, 266, 270, 277, 278, 304, 344, 348, 350, 353, 360, 363, 365, 366, 388, 404, 415, 420, 425, 444, 478, 485, 518, 543, 546, 549, 552, 597, 612, 616, 625, 632, 637, 658, 659, 706, 724, 732, 739, 795, 802, 805, 827, 843, 854, 858, 884, 918, 924, 935, 939, 940, 962, 975, 977, 990, 1001, 1011, 1016, 1019, 1038, 1040, 1068, 1088, 1090, 1092, 1103, 1107, 1118, 1126, 1133, 1134, 1147, 1160, 1171, 1177, 1196, 1197, 1200, 1208, 1271, 1286, 1293, 1300], "5k": [4, 14, 36, 99, 108, 119, 123, 140, 159, 174, 203, 215, 248, 250, 275, 281, 314, 321, 323, 328, 354, 370, 377, 380, 398, 422, 448, 453, 459, 465, 471, 489, 494, 496, 504, 519, 534, 537, 548, 555, 582, 618, 624, 626, 636, 646, 649, 653, 655, 656, 660, 666, 668, 669, 684, 694, 697, 699, 740, 769, 783, 788, 798, 829, 861, 880, 908, 919, 921, 932, 933, 941, 954, 981, 982, 985, 996, 1025, 1027, 1034, 1042, 1046, 1047, 1054, 1095, 1136, 1144, 1159, 1206, 1213, 1220, 1235, 1238, 1250, 1251, 1275, 1297, 1299, 1301], "6k": [28, 49, 80, 93, 107, 136, 142, 146, 162, 198, 212, 222, 246, 254, 265, 288, 295, 305, 322, 325, 330, 338, 342, 403, 414, 450, 451, 458, 473, 479, 488, 493, 503, 598, 610, 657, 673, 700, 703, 731, 742, 746, 763, 767, 775, 778, 790, 821, 874, 881, 907, 922, 960, 976, 992, 1012, 1013, 1018, 1022, 1026, 1057, 1076, 1080, 1093, 1096, 1097, 1121, 1154, 1161, 1170, 1179, 1183, 1193, 1202, 1222, 1228, 1236, 1247, 1262, 1278, 1298, 1303], "7k": [19, 34, 48, 57, 68, 72, 74, 103, 135, 175, 178, 193, 194, 238, 280, 293, 306, 327, 352, 362, 375, 379, 397, 443, 457, 474, 492, 502, 517, 525, 547, 609, 634, 652, 682, 716, 727, 729, 755, 761, 774, 779, 786, 796, 815, 831, 848, 887, 895, 963, 997, 1023, 1061, 1084, 1086, 1115, 1141, 1155, 1167, 1172, 1181, 1190, 1215, 1231, 1241, 1245, 1284, 1290], "8k": [13, 23, 26, 27, 31, 43, 46, 50, 52, 56, 62, 64, 65, 66, 77, 94, 101, 104, 109, 112, 114, 118, 121, 126, 129, 131, 133, 149, 150, 160, 161, 167, 169, 171, 176, 177, 181, 182, 187, 188, 191, 192, 199, 200, 210, 211, 213, 218, 223, 226, 227, 228, 229, 234, 240, 244, 247, 259, 264, 269, 274, 279, 283, 285, 286, 290, 292, 297, 301, 302, 331, 343, 357, 369, 372, 378, 381, 385, 394, 395, 409, 410, 416, 417, 418, 419, 423, 424, 426, 430, 431, 433, 437, 440, 460, 462, 468, 469, 480, 481, 490, 497, 510, 511, 512, 513, 516, 521, 524, 530, 531, 535, 539, 540, 542, 545, 550, 553, 557, 558, 559, 560, 561, 564, 565, 571, 573, 579, 583, 584, 586, 588, 589, 593, 599, 602, 606, 611, 617, 622, 628, 630, 631, 635, 638, 639, 640, 641, 642, 643, 645, 647, 651, 671, 672, 678, 685, 686, 687, 688, 691, 693, 698, 701, 704, 708, 723, 728, 733, 747, 748, 750, 753, 754, 757, 759, 762, 766, 772, 777, 785, 800, 801, 806, 808, 809, 810, 811, 812, 813, 814, 816, 826, 828, 833, 834, 835, 837, 840, 841, 842, 844, 845, 846, 847, 853, 857, 859, 860, 863, 866, 878, 879, 883, 885, 888, 890, 891, 892, 900, 903, 909, 910, 913, 915, 916, 920, 925, 934, 937, 943, 945, 946, 947, 948, 950, 952, 956, 970, 978, 979, 980, 993, 998, 1003, 1004, 1006, 1017, 1020, 1024, 1028, 1030, 1033, 1049, 1051, 1063, 1065, 1072, 1074, 1077, 1079, 1085, 1087, 1098, 1100, 1101, 1104, 1109, 1112, 1117, 1119, 1137, 1142, 1145, 1146, 1148, 1151, 1157, 1158, 1174, 1176, 1180, 1191, 1201, 1205, 1209, 1219, 1223, 1225, 1226, 1230, 1234, 1239, 1248, 1253, 1259, 1261, 1263, 1264, 1265, 1266, 1272, 1277, 1283]}
|
train/math-sampled/split_282-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d53e5f089d5f7e2200ee93bdcc4ff08935cc718d3f5e35e2ceaee56ce3cc6d17
|
3 |
+
size 54044424
|
train/math-sampled/split_29-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c71336ffdee1980e4edb1c6820f5a69b1817c745e00934687484ccf27633611
|
3 |
+
size 64930501
|
train/math-sampled/split_308-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:677933d8e5d15bf0f625a217cc518683549adc56219fbf1805c56428001b1cab
|
3 |
+
size 54397467
|
train/math-sampled/split_327-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57626156, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12414290, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_327-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 14121955, "total_tokens_skipped": 0, "percentiles": {"0th": 116, "10th": 277, "20th": 322, "30th": 367, "40th": 407, "50th": 446, "60th": 486, "70th": 530, "80th": 587, "90th": 680, "95th": 791, "99th": 1083, "100th": 1228}}
|
train/math-sampled/split_327-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_343-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a685af5a55a227cb73df5fab380689aa062a71f5bd4cb0e39b54a0015383dca7
|
3 |
+
size 54288570
|
train/math-sampled/split_345-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 50479724, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11181660, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_345-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 12335352, "total_tokens_skipped": 0, "percentiles": {"0th": 127, "10th": 270, "20th": 303, "30th": 331, "40th": 357, "50th": 381, "60th": 410, "70th": 444, "80th": 493, "90th": 583, "95th": 673, "99th": 1003, "100th": 1342}}
|
train/math-sampled/split_345-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_351-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54259465, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11616260, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_351-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13280540, "total_tokens_skipped": 0, "percentiles": {"0th": 96, "10th": 257, "20th": 302, "30th": 342, "40th": 381, "50th": 419, "60th": 457, "70th": 499, "80th": 550, "90th": 643, "95th": 747, "99th": 1072, "100th": 1207}}
|
train/math-sampled/split_351-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_359-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54528865, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11664861, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_359-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13347856, "total_tokens_skipped": 0, "percentiles": {"0th": 112, "10th": 259, "20th": 302, "30th": 343, "40th": 383, "50th": 421, "60th": 459, "70th": 501, "80th": 553, "90th": 644, "95th": 754, "99th": 1072, "100th": 1209}}
|
train/math-sampled/split_359-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_381-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 52759414, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11562744, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_381-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 12905259, "total_tokens_skipped": 0, "percentiles": {"0th": 111, "10th": 273, "20th": 309, "30th": 340, "40th": 368, "50th": 396, "60th": 428, "70th": 468, "80th": 525, "90th": 619, "95th": 725, "99th": 1058, "100th": 1342}}
|
train/math-sampled/split_381-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_387-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54056866, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11572467, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_387-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13229911, "total_tokens_skipped": 0, "percentiles": {"0th": 134, "10th": 256, "20th": 300, "30th": 341, "40th": 381, "50th": 418, "60th": 455, "70th": 498, "80th": 549, "90th": 638, "95th": 739, "99th": 1076, "100th": 1215}}
|
train/math-sampled/split_387-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_394-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 50386312, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11172516, "hashes": {}}}], "version": 2}
|
train/math-sampled/split_394-tokenized-chunked-8192-512-32-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 12311987, "total_tokens_skipped": 0, "percentiles": {"0th": 148, "10th": 270, "20th": 303, "30th": 330, "40th": 357, "50th": 381, "60th": 408, "70th": 441, "80th": 490, "90th": 579, "95th": 673, "99th": 1024, "100th": 1340}}
|
train/math-sampled/split_394-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math-sampled/split_4-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7a0aa63c648c4637888f5b114f465580841ccb3f9e6443c0513d5f5064b51f53
|
3 |
+
size 17788996
|
train/math-sampled/split_403-tokenized-chunked-8192-512-32-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 50347089, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11145540, "hashes": {}}}], "version": 2}
|