Datasets:

ArXiv:
License:
orionweller commited on
Commit
e3adc5d
·
verified ·
1 Parent(s): a8d425c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  2. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds +3 -0
  3. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds +3 -0
  4. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds +3 -0
  5. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds +3 -0
  6. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds +3 -0
  7. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds +3 -0
  8. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds +3 -0
  9. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds +3 -0
  10. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds +3 -0
  11. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds +3 -0
  12. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  13. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  14. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds +3 -0
  15. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds +3 -0
  16. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds +3 -0
  17. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds +3 -0
  18. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds +3 -0
  19. train/math-sampled/split_173-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  20. train/math-sampled/split_173-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  21. train/math-sampled/split_173-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  22. train/math-sampled/split_183-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  23. train/math-sampled/split_183-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  24. train/math-sampled/split_183-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  25. train/math-sampled/split_192-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  26. train/math-sampled/split_192-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  27. train/math-sampled/split_192-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  28. train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  29. train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  30. train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  31. train/math-sampled/split_25-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  32. train/math-sampled/split_25-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  33. train/math-sampled/split_25-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  34. train/math-sampled/split_282-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  35. train/math-sampled/split_282-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  36. train/math-sampled/split_282-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  37. train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  38. train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  39. train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  40. train/math-sampled/split_300-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  41. train/math-sampled/split_300-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  42. train/math-sampled/split_300-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  43. train/math-sampled/split_308-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  44. train/math-sampled/split_308-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  45. train/math-sampled/split_308-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  46. train/math-sampled/split_343-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  47. train/math-sampled/split_343-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  48. train/math-sampled/split_343-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  49. train/math-sampled/split_406-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  50. train/math-sampled/split_406-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7327c68c3126e6738e21981694579a6daa0965fdc5e38ddd31e15c2472c5fcb8
3
+ size 67097205
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6552c299b62758952802a74f75afac359561faef14fca83f3b0044cfb6c7b96d
3
+ size 67108213
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:753638dbb736eba2046e87e9f95c3f9a63b01cc9c04a98826bb312a9b06cd3ff
3
+ size 67097052
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:078cbbb3d10240d97b788c0142a6d2109457a9731b219826743bcff5b2f97a79
3
+ size 67085384
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:245fda09f15765b2ca749326b672e6a163de2057b87641ce9c254265d7862232
3
+ size 67094180
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dca26af7ca08b0571bb6984bb71bbfb0531249b93ea2ce90f48db61d555b5f6c
3
+ size 67105366
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6c481992c5e330b27feb42e364caa290cbfa0a6c9576724ac73d92ae8351853
3
+ size 67083976
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:823395777c701c1736ab23ceb224016bfdb07d8f25e10546b641c6a026aedb5e
3
+ size 67105767
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:081d9459ae4c81c5474bdd1088e9cbf76d050a7360201e0c22fee32c6132082c
3
+ size 67102693
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bdbb2e41910de11cfe36f3923491c1dd6497d125ecd11e0f721d0838faa92eb
3
+ size 67107551
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e59c099f521fca2c3ccdbb83c76949cca9a9692a034b8f3defda81ec9dcb02e9
3
+ size 67087891
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f773b1cc8223a6d439221775a2923ee66d5ccc6d5e4d1c851d9d960d0faf3984
3
+ size 67104143
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ffb720db3a621d9edb8934be7cf9512992deb5ef52bb0cb764e73130bc0e696
3
+ size 67108424
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4945a64995306cf42c3caca868c39222bea0da03eb67816652cf1b077987e92d
3
+ size 67092412
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0f49fc3e26f0baa0df7a719b1b4226bc08b5813ad8de0608f471c46ede9ee48
3
+ size 67092816
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5cff9007b23aba609fcf107d26dcc5698e99c8fe8a5f0f03cb744c7840b18ac
3
+ size 67098691
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c689c986af31ee95e0802164c2c50e6c09699f5cd743c96a8eb137ebebd77d60
3
+ size 67099421
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f4f050559fab9ff78c3451fe9e1a537512aa21600d0fe74463dceaf97807a4b
3
+ size 67083904
train/math-sampled/split_173-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 55933655, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12010399, "hashes": {}}}], "version": 2}
train/math-sampled/split_173-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13698969, "total_tokens_skipped": 0, "percentiles": {"0th": 100, "10th": 265, "20th": 311, "30th": 354, "40th": 394, "50th": 432, "60th": 472, "70th": 515, "80th": 570, "90th": 664, "95th": 772, "99th": 1078, "100th": 1219}}
train/math-sampled/split_173-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_183-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54338610, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11603963, "hashes": {}}}], "version": 2}
train/math-sampled/split_183-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13300342, "total_tokens_skipped": 0, "percentiles": {"0th": 123, "10th": 256, "20th": 300, "30th": 341, "40th": 381, "50th": 419, "60th": 457, "70th": 500, "80th": 552, "90th": 645, "95th": 751, "99th": 1076, "100th": 1229}}
train/math-sampled/split_183-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_192-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54377674, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11622948, "hashes": {}}}], "version": 2}
train/math-sampled/split_192-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13310106, "total_tokens_skipped": 0, "percentiles": {"0th": 105, "10th": 256, "20th": 302, "30th": 343, "40th": 383, "50th": 419, "60th": 459, "70th": 501, "80th": 553, "90th": 641, "95th": 749, "99th": 1077, "100th": 1223}}
train/math-sampled/split_192-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67104398, "hashes": {}}, "samples": 26294, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20274337, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16527748, "hashes": {}}, "samples": 3706, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5028460, "hashes": {}}}], "version": 2}
train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 20627724, "total_tokens_skipped": 0, "percentiles": {"0th": 19, "10th": 21, "20th": 22, "30th": 22, "40th": 423, "50th": 835, "60th": 942, "70th": 1047, "80th": 1177, "90th": 1383, "95th": 1593, "99th": 2082, "100th": 5941}}
train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_25-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 8938575, "hashes": {}}, "samples": 23000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 2438707, "hashes": {}}}], "version": 2}
train/math-sampled/split_25-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 2046829, "total_tokens_skipped": 0, "percentiles": {"0th": 9, "10th": 12, "20th": 12, "30th": 13, "40th": 13, "50th": 17, "60th": 17, "70th": 21, "80th": 29, "90th": 310, "95th": 597, "99th": 1055, "100th": 2345}}
train/math-sampled/split_25-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_282-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54044424, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11561665, "hashes": {}}}], "version": 2}
train/math-sampled/split_282-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13226808, "total_tokens_skipped": 0, "percentiles": {"0th": 117, "10th": 255, "20th": 299, "30th": 339, "40th": 378, "50th": 417, "60th": 456, "70th": 499, "80th": 550, "90th": 643, "95th": 742, "99th": 1072, "100th": 1204}}
train/math-sampled/split_282-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106795, "hashes": {}}, "samples": 20595, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 23144691, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 30521227, "hashes": {}}, "samples": 9405, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10537445, "hashes": {}}}], "version": 2}
train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 24047020, "total_tokens_skipped": 0, "percentiles": {"0th": 131, "10th": 496, "20th": 575, "30th": 634, "40th": 690, "50th": 747, "60th": 812, "70th": 891, "80th": 1001, "90th": 1191, "95th": 1377, "99th": 1782, "100th": 2473}}
train/math-sampled/split_30-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_300-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57094948, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12295005, "hashes": {}}}], "version": 2}
train/math-sampled/split_300-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13989187, "total_tokens_skipped": 0, "percentiles": {"0th": 125, "10th": 274, "20th": 320, "30th": 365, "40th": 403, "50th": 442, "60th": 482, "70th": 526, "80th": 581, "90th": 674, "95th": 781, "99th": 1074, "100th": 1222}}
train/math-sampled/split_300-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_308-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54397467, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11626354, "hashes": {}}}], "version": 2}
train/math-sampled/split_308-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13315053, "total_tokens_skipped": 0, "percentiles": {"0th": 111, "10th": 256, "20th": 300, "30th": 340, "40th": 380, "50th": 418, "60th": 457, "70th": 500, "80th": 553, "90th": 650, "95th": 763, "99th": 1076, "100th": 1227}}
train/math-sampled/split_308-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_343-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54288570, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11617707, "hashes": {}}}], "version": 2}
train/math-sampled/split_343-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13287797, "total_tokens_skipped": 0, "percentiles": {"0th": 101, "10th": 258, "20th": 303, "30th": 343, "40th": 382, "50th": 419, "60th": 456, "70th": 499, "80th": 551, "90th": 639, "95th": 750, "99th": 1077, "100th": 1230}}
train/math-sampled/split_343-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_406-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 50252338, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11124584, "hashes": {}}}], "version": 2}
train/math-sampled/split_406-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 12278491, "total_tokens_skipped": 0, "percentiles": {"0th": 136, "10th": 271, "20th": 304, "30th": 330, "40th": 356, "50th": 379, "60th": 408, "70th": 442, "80th": 488, "90th": 576, "95th": 670, "99th": 1009, "100th": 1341}}