Datasets:

ArXiv:
License:
orionweller commited on
Commit
fbe89fc
·
verified ·
1 Parent(s): d5ba749

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. train/math-sampled/split_113-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  2. train/math-sampled/split_113-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  3. train/math-sampled/split_113-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  4. train/math-sampled/split_126-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  5. train/math-sampled/split_143-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  6. train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  7. train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  8. train/math-sampled/split_164-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  9. train/math-sampled/split_164-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  10. train/math-sampled/split_164-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  11. train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  12. train/math-sampled/split_18-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  13. train/math-sampled/split_18-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  14. train/math-sampled/split_18-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  15. train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  16. train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  17. train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  18. train/math-sampled/split_21-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  19. train/math-sampled/split_21-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  20. train/math-sampled/split_21-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  21. train/math-sampled/split_221-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  22. train/math-sampled/split_221-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  23. train/math-sampled/split_221-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  24. train/math-sampled/split_226-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  25. train/math-sampled/split_226-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  26. train/math-sampled/split_226-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  27. train/math-sampled/split_231-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  28. train/math-sampled/split_231-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  29. train/math-sampled/split_231-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  30. train/math-sampled/split_257-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  31. train/math-sampled/split_257-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  32. train/math-sampled/split_257-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  33. train/math-sampled/split_292-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  34. train/math-sampled/split_292-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  35. train/math-sampled/split_292-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  36. train/math-sampled/split_298-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  37. train/math-sampled/split_298-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  38. train/math-sampled/split_298-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  39. train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  40. train/math-sampled/split_300-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  41. train/math-sampled/split_314-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  42. train/math-sampled/split_340-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  43. train/math-sampled/split_366-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  44. train/math-sampled/split_385-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  45. train/math-sampled/split_41-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  46. train/math-sampled/split_41-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  47. train/math-sampled/split_41-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  48. train/math-sampled/split_423-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  49. train/math-sampled/split_43-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  50. train/math-sampled/split_43-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
train/math-sampled/split_113-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57539930, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12407411, "hashes": {}}}], "version": 2}
train/math-sampled/split_113-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 14100389, "total_tokens_skipped": 0, "percentiles": {"0th": 127, "10th": 278, "20th": 324, "30th": 369, "40th": 407, "50th": 445, "60th": 485, "70th": 530, "80th": 585, "90th": 680, "95th": 786, "99th": 1078, "100th": 1217}}
train/math-sampled/split_113-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_126-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06041b9a11021792b7febf4f7ddffb02c505bb04e7ce51024ecdd2784b2399df
3
+ size 58033568
train/math-sampled/split_143-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fa3679cce55a235471abb2705d40512c154af1760c267f0538d6d963856bb3e
3
+ size 57671689
train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb7fc33efc54def8ff6bf28ce75ffbad2bbab42ab02681defd40a74e418cd10c
3
+ size 30046560
train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4548852918610c47386aa3a6b28fefe7c2982d565563384fe96261a2d40f456c
3
+ size 67105863
train/math-sampled/split_164-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54802580, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11754617, "hashes": {}}}], "version": 2}
train/math-sampled/split_164-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13416279, "total_tokens_skipped": 0, "percentiles": {"0th": 128, "10th": 260, "20th": 306, "30th": 347, "40th": 385, "50th": 423, "60th": 462, "70th": 505, "80th": 556, "90th": 648, "95th": 756, "99th": 1075, "100th": 1228}}
train/math-sampled/split_164-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a644ae53ec33dde20c42303250a7d75d8a68196bd517143da2c266bdf0042f12
3
+ size 30616423
train/math-sampled/split_18-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108802, "hashes": {}}, "samples": 26361, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20032783, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16273194, "hashes": {}}, "samples": 3640, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4966875, "hashes": {}}}], "version": 2}
train/math-sampled/split_18-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 20572596, "total_tokens_skipped": 0, "percentiles": {"0th": 9, "10th": 13, "20th": 14, "30th": 14, "40th": 527, "50th": 836, "60th": 937, "70th": 1034, "80th": 1157, "90th": 1349, "95th": 1538, "99th": 2007, "100th": 8190}}
train/math-sampled/split_18-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c762f9b2d1f977d2ee0b5b90c4b9509c56f87adc19c4cae83079a6afcfa46201
3
+ size 67104398
train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b51219217400290cd5e6bd3201984d97af3c017a0f0ded9020cff0ec922bd7f9
3
+ size 67106379
train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:347bef1e2b76748c1c8ee9f7855c0d8f01095a9876804e4d9f4785515c3b5b10
3
+ size 62239961
train/math-sampled/split_21-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67102885, "hashes": {}}, "samples": 26777, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 19792991, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14465365, "hashes": {}}, "samples": 3223, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4428593, "hashes": {}}}], "version": 2}
train/math-sampled/split_21-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 20124230, "total_tokens_skipped": 0, "percentiles": {"0th": 38, "10th": 42, "20th": 43, "30th": 43, "40th": 328, "50th": 814, "60th": 923, "70th": 1025, "80th": 1148, "90th": 1336, "95th": 1533, "99th": 2004, "100th": 2969}}
train/math-sampled/split_21-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_221-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54219406, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11594841, "hashes": {}}}], "version": 2}
train/math-sampled/split_221-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13270528, "total_tokens_skipped": 0, "percentiles": {"0th": 114, "10th": 257, "20th": 302, "30th": 342, "40th": 381, "50th": 420, "60th": 457, "70th": 499, "80th": 552, "90th": 639, "95th": 739, "99th": 1079, "100th": 1216}}
train/math-sampled/split_221-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_226-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54124800, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11591255, "hashes": {}}}], "version": 2}
train/math-sampled/split_226-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13246863, "total_tokens_skipped": 0, "percentiles": {"0th": 122, "10th": 257, "20th": 301, "30th": 341, "40th": 380, "50th": 418, "60th": 455, "70th": 498, "80th": 551, "90th": 639, "95th": 741, "99th": 1072, "100th": 1256}}
train/math-sampled/split_226-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_231-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54171558, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11583189, "hashes": {}}}], "version": 2}
train/math-sampled/split_231-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13258571, "total_tokens_skipped": 0, "percentiles": {"0th": 106, "10th": 256, "20th": 299, "30th": 340, "40th": 380, "50th": 417, "60th": 456, "70th": 500, "80th": 552, "90th": 642, "95th": 752, "99th": 1078, "100th": 1201}}
train/math-sampled/split_231-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_257-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 53202880, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11475396, "hashes": {}}}], "version": 2}
train/math-sampled/split_257-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13016404, "total_tokens_skipped": 0, "percentiles": {"0th": 117, "10th": 256, "20th": 297, "30th": 334, "40th": 372, "50th": 409, "60th": 447, "70th": 489, "80th": 542, "90th": 628, "95th": 728, "99th": 1071, "100th": 1206}}
train/math-sampled/split_257-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_292-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54328028, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11626468, "hashes": {}}}], "version": 2}
train/math-sampled/split_292-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13297668, "total_tokens_skipped": 0, "percentiles": {"0th": 122, "10th": 258, "20th": 302, "30th": 343, "40th": 383, "50th": 419, "60th": 458, "70th": 501, "80th": 552, "90th": 641, "95th": 745, "99th": 1072, "100th": 1213}}
train/math-sampled/split_292-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_298-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57737944, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12444467, "hashes": {}}}], "version": 2}
train/math-sampled/split_298-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 14149883, "total_tokens_skipped": 0, "percentiles": {"0th": 124, "10th": 279, "20th": 325, "30th": 368, "40th": 408, "50th": 446, "60th": 487, "70th": 531, "80th": 586, "90th": 680, "95th": 795, "99th": 1078, "100th": 1207}}
train/math-sampled/split_298-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13368f1f90d1e65c60eb851b8fd9a1cc5c96264a3dbaf6f75ed842dee39ee05a
3
+ size 67106795
train/math-sampled/split_300-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:109675131800f053866d05a2f02529e5cb32a4085ae9fa18e31e2d7cc3aecf6a
3
+ size 57094948
train/math-sampled/split_314-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd80e3075406fc80c95e2e6cc502b4fbd9e0f2c815c3f4664be042c393fa305d
3
+ size 54270591
train/math-sampled/split_340-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a7c1041b1dd027403a1edd89449f6c30becf81534dd70ca764496a6dc54d1e9
3
+ size 54185605
train/math-sampled/split_366-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f8c85483e81bf6a43dd133e6ab3e6623b9aac9d765cbf64856ef3aa76eea6cd
3
+ size 51010338
train/math-sampled/split_385-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb4c4d5791f278b55c6a330fd45f327bd6f98bd9bec865121b697920ea6c68e8
3
+ size 30345508
train/math-sampled/split_41-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67103902, "hashes": {}}, "samples": 15096, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20444991, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 66233634, "hashes": {}}, "samples": 14904, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 20222828, "hashes": {}}}], "version": 2}
train/math-sampled/split_41-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 33026777, "total_tokens_skipped": 0, "percentiles": {"0th": 313, "10th": 786, "20th": 861, "30th": 920, "40th": 979, "50th": 1041, "60th": 1110, "70th": 1192, "80th": 1304, "90th": 1495, "95th": 1695, "99th": 2148, "100th": 2989}}
train/math-sampled/split_41-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_423-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2ec1f5c4b3b4c316d27b19e775e1ebb4a72f4ba1b6f169acd83f6dac683bc4d
3
+ size 50242372
train/math-sampled/split_43-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107999, "hashes": {}}, "samples": 15097, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20432554, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 66348041, "hashes": {}}, "samples": 14903, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 20204939, "hashes": {}}}], "version": 2}
train/math-sampled/split_43-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 33056403, "total_tokens_skipped": 0, "percentiles": {"0th": 344, "10th": 785, "20th": 860, "30th": 920, "40th": 978, "50th": 1041, "60th": 1110, "70th": 1192, "80th": 1305, "90th": 1497, "95th": 1694, "99th": 2158, "100th": 3143}}