Datasets:

ArXiv:
License:
orionweller commited on
Commit
1bb000e
·
verified ·
1 Parent(s): d7dee91

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. train/dclm-filtered_sampled-ext/split_10518-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  2. train/dclm-filtered_sampled-ext/split_10518-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  3. train/dclm-filtered_sampled-ext/split_10518-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  4. train/dclm-filtered_sampled-ext/split_10518-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  5. train/dclm-filtered_sampled-ext/split_10563-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  6. train/dclm-filtered_sampled-ext/split_10563-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  7. train/dclm-filtered_sampled-ext/split_10563-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  8. train/dclm-filtered_sampled-ext/split_11210-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  9. train/dclm-filtered_sampled-ext/split_11210-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  10. train/dclm-filtered_sampled-ext/split_11210-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  11. train/dclm-filtered_sampled-ext/split_11210-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  12. train/dclm-filtered_sampled-ext/split_11806-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  13. train/dclm-filtered_sampled-ext/split_11806-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  14. train/dclm-filtered_sampled-ext/split_11806-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  15. train/dclm-filtered_sampled-ext/split_12424-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  16. train/dclm-filtered_sampled-ext/split_12424-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  17. train/dclm-filtered_sampled-ext/split_12424-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  18. train/dclm-filtered_sampled-ext/split_12424-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  19. train/dclm-filtered_sampled-ext/split_12769-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  20. train/dclm-filtered_sampled-ext/split_12769-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  21. train/dclm-filtered_sampled-ext/split_12769-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  22. train/dclm-filtered_sampled-ext/split_12976-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  23. train/dclm-filtered_sampled-ext/split_12976-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  24. train/dclm-filtered_sampled-ext/split_12976-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  25. train/dclm-filtered_sampled-ext/split_12976-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  26. train/dclm-filtered_sampled-ext/split_13230-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  27. train/dclm-filtered_sampled-ext/split_13889-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  28. train/dclm-filtered_sampled-ext/split_13889-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  29. train/dclm-filtered_sampled-ext/split_13889-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  30. train/dclm-filtered_sampled-ext/split_13889-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  31. train/dclm-filtered_sampled-ext/split_14188-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  32. train/dclm-filtered_sampled-ext/split_14188-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  33. train/dclm-filtered_sampled-ext/split_14188-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  34. train/dclm-filtered_sampled-ext/split_1499-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  35. train/dclm-filtered_sampled-ext/split_1499-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  36. train/dclm-filtered_sampled-ext/split_1499-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  37. train/dclm-filtered_sampled-ext/split_16064-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  38. train/dclm-filtered_sampled-ext/split_16064-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  39. train/dclm-filtered_sampled-ext/split_16064-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  40. train/dclm-filtered_sampled-ext/split_16064-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  41. train/dclm-filtered_sampled-ext/split_17404-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  42. train/dclm-filtered_sampled-ext/split_17404-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  43. train/dclm-filtered_sampled-ext/split_17404-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  44. train/dclm-filtered_sampled-ext/split_17404-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  45. train/dclm-filtered_sampled-ext/split_18246-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  46. train/dclm-filtered_sampled-ext/split_18246-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  47. train/dclm-filtered_sampled-ext/split_18246-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  48. train/dclm-filtered_sampled-ext/split_19208-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  49. train/dclm-filtered_sampled-ext/split_19208-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  50. train/dclm-filtered_sampled-ext/split_19208-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
train/dclm-filtered_sampled-ext/split_10518-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67104422, "hashes": {}}, "samples": 13261, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27966948, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67080977, "hashes": {}}, "samples": 13680, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 28012960, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 18527054, "hashes": {}}, "samples": 3712, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 7719906, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_10518-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:779d272ef876ad5a0a09fc8d31c839a1271e5c21fdea2436b58510faddb6842d
3
+ size 18527054
train/dclm-filtered_sampled-ext/split_10518-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37519306, "total_tokens_skipped": 102, "percentiles": {"0th": 46, "10th": 226, "20th": 342, "30th": 456, "40th": 578, "50th": 716, "60th": 897, "70th": 1148, "80th": 1559, "90th": 2658, "95th": 4477, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_10518-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_10563-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107652, "hashes": {}}, "samples": 13380, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27971452, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67107616, "hashes": {}}, "samples": 13865, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 28070482, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 16907480, "hashes": {}}, "samples": 3388, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 7065873, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_10563-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37123573, "total_tokens_skipped": 31, "percentiles": {"0th": 34, "10th": 232, "20th": 343, "30th": 455, "40th": 573, "50th": 709, "60th": 890, "70th": 1140, "80th": 1550, "90th": 2603, "95th": 4328, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_10563-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_11210-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67101562, "hashes": {}}, "samples": 13192, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27648546, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67108182, "hashes": {}}, "samples": 13146, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27968705, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 23805444, "hashes": {}}, "samples": 4349, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 9790428, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_11210-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b619d8e64571171ad86468348bd643b0a26dd7af810d721e96b90750744a1a68
3
+ size 23805444
train/dclm-filtered_sampled-ext/split_11210-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38844890, "total_tokens_skipped": 19, "percentiles": {"0th": 33, "10th": 226, "20th": 341, "30th": 446, "40th": 574, "50th": 722, "60th": 912, "70th": 1182, "80th": 1635, "90th": 2832, "95th": 4756, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_11210-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_11806-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67102650, "hashes": {}}, "samples": 12365, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27746787, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67092040, "hashes": {}}, "samples": 13100, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27734570, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 25188590, "hashes": {}}, "samples": 5220, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 10391672, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_11806-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 39182137, "total_tokens_skipped": 26, "percentiles": {"0th": 33, "10th": 223, "20th": 339, "30th": 449, "40th": 578, "50th": 725, "60th": 918, "70th": 1185, "80th": 1658, "90th": 2935, "95th": 4799, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_11806-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_12424-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106066, "hashes": {}}, "samples": 13699, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27769518, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67107368, "hashes": {}}, "samples": 12811, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27736358, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 19849207, "hashes": {}}, "samples": 4085, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 8276534, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_12424-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eff511431a92e81af2babb101ccff168217fadd4dba33cb6effa3a18f43a519
3
+ size 19849207
train/dclm-filtered_sampled-ext/split_12424-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37848836, "total_tokens_skipped": 84, "percentiles": {"0th": 43, "10th": 217, "20th": 335, "30th": 446, "40th": 578, "50th": 727, "60th": 911, "70th": 1173, "80th": 1631, "90th": 2745, "95th": 4423, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_12424-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_12769-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67104284, "hashes": {}}, "samples": 13473, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 28042128, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67107139, "hashes": {}}, "samples": 13130, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27915908, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 20765921, "hashes": {}}, "samples": 4026, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 8609011, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_12769-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38095289, "total_tokens_skipped": 46, "percentiles": {"0th": 39, "10th": 223, "20th": 338, "30th": 452, "40th": 579, "50th": 719, "60th": 900, "70th": 1153, "80th": 1613, "90th": 2765, "95th": 4620, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_12769-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_12976-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67105742, "hashes": {}}, "samples": 12521, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27706466, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67108808, "hashes": {}}, "samples": 13549, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27647865, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 20404242, "hashes": {}}, "samples": 4568, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 8494610, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_12976-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8ba2965da7bd5ebc5b6f69b2be69582f74c31f8f56df2f7a6f90d7f79431265
3
+ size 20404242
train/dclm-filtered_sampled-ext/split_12976-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37989831, "total_tokens_skipped": 122, "percentiles": {"0th": 35, "10th": 219, "20th": 332, "30th": 436, "40th": 565, "50th": 712, "60th": 903, "70th": 1166, "80th": 1627, "90th": 2745, "95th": 4564, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_12976-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_13230-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:788957e1144174fa05187e7ae424e57616aa76d0a4d3df795cbe69ed0394e2bd
3
+ size 10878434
train/dclm-filtered_sampled-ext/split_13889-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107844, "hashes": {}}, "samples": 13676, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27988353, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67105317, "hashes": {}}, "samples": 13591, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27942581, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 14466198, "hashes": {}}, "samples": 3225, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 6044963, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_13889-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a995d407f52c7bf175ecadfe0586e855005254cf9df3d7ade3d836335d60f47
3
+ size 14466198
train/dclm-filtered_sampled-ext/split_13889-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 36496117, "total_tokens_skipped": 29, "percentiles": {"0th": 29, "10th": 236, "20th": 366, "30th": 492, "40th": 618, "50th": 754, "60th": 924, "70th": 1165, "80th": 1545, "90th": 2479, "95th": 3986, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_13889-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_14188-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67096656, "hashes": {}}, "samples": 13275, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27906407, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67078635, "hashes": {}}, "samples": 12495, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27759556, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 25324028, "hashes": {}}, "samples": 4953, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 10442861, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_14188-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 39209685, "total_tokens_skipped": 42, "percentiles": {"0th": 37, "10th": 224, "20th": 344, "30th": 459, "40th": 586, "50th": 733, "60th": 924, "70th": 1187, "80th": 1648, "90th": 2870, "95th": 4789, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_14188-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_1499-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106895, "hashes": {}}, "samples": 13467, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27830860, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67108415, "hashes": {}}, "samples": 13597, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27893401, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 16124502, "hashes": {}}, "samples": 3358, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 6702545, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_1499-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 36915171, "total_tokens_skipped": 37, "percentiles": {"0th": 37, "10th": 244, "20th": 381, "30th": 517, "40th": 652, "50th": 802, "60th": 988, "70th": 1239, "80th": 1609, "90th": 2482, "95th": 3795, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_1499-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_16064-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107026, "hashes": {}}, "samples": 12866, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27804320, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67098683, "hashes": {}}, "samples": 13533, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27979287, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 19810884, "hashes": {}}, "samples": 4243, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 8272468, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_16064-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b9878bccaacf79c740b255e3957f16ace0b8100c65b19941500fe3b454ed2e3
3
+ size 19810884
train/dclm-filtered_sampled-ext/split_16064-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37852255, "total_tokens_skipped": 58, "percentiles": {"0th": 35, "10th": 222, "20th": 338, "30th": 449, "40th": 575, "50th": 715, "60th": 896, "70th": 1152, "80th": 1575, "90th": 2736, "95th": 4571, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_16064-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_17404-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107977, "hashes": {}}, "samples": 12755, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27727978, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67099428, "hashes": {}}, "samples": 12993, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27793081, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 26129914, "hashes": {}}, "samples": 4979, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 10857091, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_17404-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e22603f06b5fbd86879f8bf1b5533ea3e4b98ff1ec4ed5bb468fdbfd13111ad
3
+ size 26129914
train/dclm-filtered_sampled-ext/split_17404-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 39419194, "total_tokens_skipped": 11, "percentiles": {"0th": 40, "10th": 221, "20th": 340, "30th": 452, "40th": 584, "50th": 737, "60th": 929, "70th": 1199, "80th": 1676, "90th": 2899, "95th": 4814, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_17404-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_18246-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67102551, "hashes": {}}, "samples": 13835, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27978861, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67105862, "hashes": {}}, "samples": 13877, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27934149, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 14089854, "hashes": {}}, "samples": 2811, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 5764654, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_18246-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 36403962, "total_tokens_skipped": 49, "percentiles": {"0th": 35, "10th": 232, "20th": 356, "30th": 477, "40th": 603, "50th": 735, "60th": 909, "70th": 1138, "80th": 1535, "90th": 2517, "95th": 4050, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_18246-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-filtered_sampled-ext/split_19208-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67100438, "hashes": {}}, "samples": 13481, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 27959533, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67108310, "hashes": {}}, "samples": 12690, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 27802760, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 23492380, "hashes": {}}, "samples": 4487, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 9767065, "hashes": {}}}], "version": 2}
train/dclm-filtered_sampled-ext/split_19208-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38762195, "total_tokens_skipped": 99, "percentiles": {"0th": 33, "10th": 227, "20th": 347, "30th": 463, "40th": 590, "50th": 736, "60th": 919, "70th": 1183, "80th": 1636, "90th": 2807, "95th": 4712, "99th": 8190, "100th": 8191}}
train/dclm-filtered_sampled-ext/split_19208-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff