Datasets:

ArXiv:
License:
orionweller commited on
Commit
b6206f7
·
verified ·
1 Parent(s): 5a28c22

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  2. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  3. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  4. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds +3 -0
  5. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds +3 -0
  6. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds +3 -0
  7. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds +3 -0
  8. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds +3 -0
  9. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds +3 -0
  10. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds +3 -0
  11. train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds +3 -0
  12. train/math-sampled/split_142-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  13. train/math-sampled/split_142-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  14. train/math-sampled/split_142-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  15. train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  16. train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  17. train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  18. train/math-sampled/split_156-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  19. train/math-sampled/split_156-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  20. train/math-sampled/split_156-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  21. train/math-sampled/split_159-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  22. train/math-sampled/split_159-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  23. train/math-sampled/split_159-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  24. train/math-sampled/split_160-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  25. train/math-sampled/split_160-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  26. train/math-sampled/split_160-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  27. train/math-sampled/split_169-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  28. train/math-sampled/split_169-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  29. train/math-sampled/split_169-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  30. train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  31. train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  32. train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  33. train/math-sampled/split_185-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  34. train/math-sampled/split_212-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  35. train/math-sampled/split_3-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  36. train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  37. train/math-sampled/split_307-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  38. train/math-sampled/split_307-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  39. train/math-sampled/split_307-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  40. train/math-sampled/split_309-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  41. train/math-sampled/split_331-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  42. train/math-sampled/split_346-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  43. train/math-sampled/split_347-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  44. train/math-sampled/split_347-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  45. train/math-sampled/split_347-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  46. train/math-sampled/split_363-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  47. train/math-sampled/split_385-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  48. train/math-sampled/split_385-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  49. train/math-sampled/split_385-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  50. train/math-sampled/split_391-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dd34433bcb58053833f7cd66a7bd2f354b57dcf931a2fb92152cefbdbb4b304
3
+ size 67107950
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ea195eb40ae50dee12c87b0c4eef8ee4255dcfaa99cadb56d4dbdf3289f0478
3
+ size 67104103
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95ac3e3b49fd8a76adca93d5e90a2441e6258a820a16f31e93a039263310ac92
3
+ size 67106900
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b1b6c8ad0572e1000cc2484b1e3d40f48117e0f2507a8793bb824705baa5d00
3
+ size 67103571
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4b1aaf8f213794255ac1e76c2e524608ea2c8d3646929f8d5b01c68039317ac
3
+ size 67099473
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:414e9c7053168b8b3b6c6660b6624d9dc222bcd06ac593574d3421a9ee4ce18c
3
+ size 67097771
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7607b020ce8985f3b309f6f9bb4b942d96a728dca17c2e1189f811dc88572e02
3
+ size 67098003
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ffa8f94058560808087e24c4228b50b76c4f0b8f0bced748776fd90eac40a49
3
+ size 67106572
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c44854950aa997674e1aad8628d98336d01a73de81dfd12dfaa70eb77ca6568
3
+ size 67092310
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f98018982d12f57d8931b447a146ad1a547c5228a89aef8281a440bc2098c7ee
3
+ size 67100429
train/books-gutenberg-dup-sampled/shard_00004-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfa513e8a4d3baef9ce0b5e766ac78d1261add8af1e7546e105f351f5226b1a9
3
+ size 67093541
train/math-sampled/split_142-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57719610, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12448204, "hashes": {}}}], "version": 2}
train/math-sampled/split_142-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 14145308, "total_tokens_skipped": 0, "percentiles": {"0th": 106, "10th": 279, "20th": 327, "30th": 371, "40th": 410, "50th": 447, "60th": 487, "70th": 529, "80th": 586, "90th": 680, "95th": 790, "99th": 1075, "100th": 1210}}
train/math-sampled/split_142-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67104766, "hashes": {}}, "samples": 20695, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 23140823, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 30046560, "hashes": {}}, "samples": 9305, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10373040, "hashes": {}}}], "version": 2}
train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 23927839, "total_tokens_skipped": 0, "percentiles": {"0th": 129, "10th": 499, "20th": 575, "30th": 630, "40th": 686, "50th": 742, "60th": 805, "70th": 886, "80th": 991, "90th": 1180, "95th": 1368, "99th": 1794, "100th": 2483}}
train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_156-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 55871933, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11986887, "hashes": {}}}], "version": 2}
train/math-sampled/split_156-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13683570, "total_tokens_skipped": 0, "percentiles": {"0th": 127, "10th": 263, "20th": 310, "30th": 354, "40th": 393, "50th": 431, "60th": 470, "70th": 515, "80th": 570, "90th": 663, "95th": 770, "99th": 1079, "100th": 1226}}
train/math-sampled/split_156-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_159-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 55900731, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12006433, "hashes": {}}}], "version": 2}
train/math-sampled/split_159-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13690732, "total_tokens_skipped": 0, "percentiles": {"0th": 112, "10th": 265, "20th": 311, "30th": 355, "40th": 393, "50th": 433, "60th": 472, "70th": 516, "80th": 569, "90th": 659, "95th": 766, "99th": 1078, "100th": 1231}}
train/math-sampled/split_159-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_160-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 55919197, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12027665, "hashes": {}}}], "version": 2}
train/math-sampled/split_160-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13695334, "total_tokens_skipped": 0, "percentiles": {"0th": 96, "10th": 266, "20th": 312, "30th": 355, "40th": 393, "50th": 432, "60th": 472, "70th": 516, "80th": 569, "90th": 662, "95th": 767, "99th": 1074, "100th": 1212}}
train/math-sampled/split_160-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_169-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54804760, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11734951, "hashes": {}}}], "version": 2}
train/math-sampled/split_169-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13416813, "total_tokens_skipped": 0, "percentiles": {"0th": 100, "10th": 260, "20th": 305, "30th": 348, "40th": 386, "50th": 423, "60th": 462, "70th": 505, "80th": 558, "90th": 648, "95th": 747, "99th": 1074, "100th": 1298}}
train/math-sampled/split_169-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106594, "hashes": {}}, "samples": 20541, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 23108050, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 30616423, "hashes": {}}, "samples": 9459, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10571655, "hashes": {}}}], "version": 2}
train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 24070764, "total_tokens_skipped": 0, "percentiles": {"0th": 128, "10th": 496, "20th": 573, "30th": 633, "40th": 688, "50th": 746, "60th": 812, "70th": 893, "80th": 1002, "90th": 1191, "95th": 1390, "99th": 1801, "100th": 2490}}
train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_185-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d57ce5c2371784ade0a0dc1557da6b826a3b352d768c37ce880ee3f322b07f7d
3
+ size 54168190
train/math-sampled/split_212-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadb60abb04dc1f918de9f53b02a219449fd64027db1f5a6cbc4267de17cb0eb
3
+ size 54481295
train/math-sampled/split_3-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce884cb5c17a6897b7e50f17b01498d8dc120eacf51da18cdf6fa10408cbc4b1
3
+ size 67108378
train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df6bee66ad38d5b741c895faebdc8d413f9ebb4c2ea2dfade046e1cadb177d1e
3
+ size 30521227
train/math-sampled/split_307-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54145098, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11587019, "hashes": {}}}], "version": 2}
train/math-sampled/split_307-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13251962, "total_tokens_skipped": 0, "percentiles": {"0th": 123, "10th": 256, "20th": 300, "30th": 340, "40th": 380, "50th": 419, "60th": 457, "70th": 499, "80th": 550, "90th": 640, "95th": 745, "99th": 1078, "100th": 1206}}
train/math-sampled/split_307-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_309-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55dd7ebad33507dc190baffc6976ebf9c0419a5fbc9e4828b7de28ad6ff99634
3
+ size 54184256
train/math-sampled/split_331-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab8bbfcd5af6b856307a62938310c6b49e3297a51b3677f545df8e35bda06673
3
+ size 54327290
train/math-sampled/split_346-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0f4070b76c3955e7602b01175ad95227b15273e9738db44a8fd91c6b31bdf7c
3
+ size 54508270
train/math-sampled/split_347-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54328372, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11607143, "hashes": {}}}], "version": 2}
train/math-sampled/split_347-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13297773, "total_tokens_skipped": 0, "percentiles": {"0th": 113, "10th": 256, "20th": 301, "30th": 342, "40th": 382, "50th": 419, "60th": 458, "70th": 500, "80th": 552, "90th": 646, "95th": 744, "99th": 1074, "100th": 1225}}
train/math-sampled/split_347-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_363-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:189b0c17a977cb845138d86a96336e5d69864e1883f94dc18051a7588290bc64
3
+ size 52464640
train/math-sampled/split_385-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 30345508, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 7713700, "hashes": {}}}], "version": 2}
train/math-sampled/split_385-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 7238677, "total_tokens_skipped": 0, "percentiles": {"0th": 28, "10th": 122, "20th": 146, "30th": 166, "40th": 188, "50th": 212, "60th": 241, "70th": 274, "80th": 320, "90th": 391, "95th": 463, "99th": 687, "100th": 1667}}
train/math-sampled/split_385-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_391-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 49508493, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 10823002, "hashes": {}}}], "version": 2}