mookiezi
commited on
Commit
·
b465002
1
Parent(s):
b6d271b
Remove more blanks and ToS breaking content
Browse files- README.md +22 -20
- data/train.parquet +2 -2
- dataset_infos.json +5 -5
README.md
CHANGED
@@ -114,20 +114,20 @@ The full end-to-end pipeline is documented in the [dataset-pipeline GitHub repos
|
|
114 |
<div>
|
115 |
| Metric | Value |
|
116 |
| ---------------------- | ------------: |
|
117 |
-
| Samples (count) | 7,
|
118 |
-
| Total turns | 16,881,
|
119 |
-
| Total assistant turns | 9,
|
120 |
-
| Min length (tokens) |
|
121 |
| Max length (tokens) | 2,542 |
|
122 |
| Mean length (tokens) | 32.79 |
|
123 |
| Median length (tokens) | 28 |
|
124 |
| Std dev (tokens) | 16.56 |
|
125 |
-
| Skew | 6.
|
126 |
-
| Kurtosis | 326.
|
127 |
-
| Total tokens | 239,
|
128 |
-
| Total characters | 1,
|
129 |
-
| Total words | 139,
|
130 |
-
| Avg chars per sample | 170.
|
131 |
| Avg words per sample | 19.16 |
|
132 |
| Avg chars per word | 8.88 |
|
133 |
| Tokens per char | 0.19 |
|
@@ -136,11 +136,11 @@ The full end-to-end pipeline is documented in the [dataset-pipeline GitHub repos
|
|
136 |
<div>
|
137 |
| Tokens | Count |
|
138 |
| --------- | --------: |
|
139 |
-
| 8–16 |
|
140 |
-
| 16–32 | 4,
|
141 |
-
| 32–64 | 2,566,
|
142 |
-
| 64–128 | 334,
|
143 |
-
| 128–256 | 15,
|
144 |
| 256–384 | 363 |
|
145 |
| 384–512 | 71 |
|
146 |
| 512–768 | 78 |
|
@@ -152,11 +152,11 @@ The full end-to-end pipeline is documented in the [dataset-pipeline GitHub repos
|
|
152 |
<div>
|
153 |
| Turns | Count |
|
154 |
| ----- | --------: |
|
155 |
-
| 2 | 5,
|
156 |
-
| 3 | 1,038,
|
157 |
-
| 4 | 304,
|
158 |
-
| 5 | 96,
|
159 |
-
| 6 | 38,
|
160 |
| 7 | 15,714 |
|
161 |
| 8 | 7,108 |
|
162 |
| 9 | 3,391 |
|
@@ -189,6 +189,8 @@ The full end-to-end pipeline is documented in the [dataset-pipeline GitHub repos
|
|
189 |
|
190 |
Although filtering reduced the exchanges by about 75% (leaving roughly 7.5% of the full data dump), this dataset is still intended as a large-scale dump. For best training results, further curation to target high-signal data relevant to your goals is recommended.
|
191 |
|
|
|
|
|
192 |
---
|
193 |
|
194 |
## License
|
|
|
114 |
<div>
|
115 |
| Metric | Value |
|
116 |
| ---------------------- | ------------: |
|
117 |
+
| Samples (count) | 7,300,966 |
|
118 |
+
| Total turns | 16,881,035 |
|
119 |
+
| Total assistant turns | 9,013,687 |
|
120 |
+
| Min length (tokens) | 12 |
|
121 |
| Max length (tokens) | 2,542 |
|
122 |
| Mean length (tokens) | 32.79 |
|
123 |
| Median length (tokens) | 28 |
|
124 |
| Std dev (tokens) | 16.56 |
|
125 |
+
| Skew | 6.05 |
|
126 |
+
| Kurtosis | 326.66 |
|
127 |
+
| Total tokens | 239,415,029 |
|
128 |
+
| Total characters | 1,241,973,327 |
|
129 |
+
| Total words | 139,902,554 |
|
130 |
+
| Avg chars per sample | 170.11 |
|
131 |
| Avg words per sample | 19.16 |
|
132 |
| Avg chars per word | 8.88 |
|
133 |
| Tokens per char | 0.19 |
|
|
|
136 |
<div>
|
137 |
| Tokens | Count |
|
138 |
| --------- | --------: |
|
139 |
+
| 8–16 | 105,965 |
|
140 |
+
| 16–32 | 4,277,610 |
|
141 |
+
| 32–64 | 2,566,085 |
|
142 |
+
| 64–128 | 334,825 |
|
143 |
+
| 128–256 | 15,919 |
|
144 |
| 256–384 | 363 |
|
145 |
| 384–512 | 71 |
|
146 |
| 512–768 | 78 |
|
|
|
152 |
<div>
|
153 |
| Turns | Count |
|
154 |
| ----- | --------: |
|
155 |
+
| 2 | 5,792,613 |
|
156 |
+
| 3 | 1,038,432 |
|
157 |
+
| 4 | 304,428 |
|
158 |
+
| 5 | 96,751 |
|
159 |
+
| 6 | 38,617 |
|
160 |
| 7 | 15,714 |
|
161 |
| 8 | 7,108 |
|
162 |
| 9 | 3,391 |
|
|
|
189 |
|
190 |
Although filtering reduced the exchanges by about 75% (leaving roughly 7.5% of the full data dump), this dataset is still intended as a large-scale dump. For best training results, further curation to target high-signal data relevant to your goals is recommended.
|
191 |
|
192 |
+
In addition to the raw text, the dataset includes supporting columns such as characters, words, tokens, and turns. These provide length statistics and turn counts per exchange, which can be directly useful for sampling, weighting, or filtering strategies.
|
193 |
+
|
194 |
---
|
195 |
|
196 |
## License
|
data/train.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:241e350e7f651085c5c2cb4d5274f7cb671b84b3d5fba091101823678da454ec
|
3 |
+
size 346784147
|
dataset_infos.json
CHANGED
@@ -14,14 +14,14 @@
|
|
14 |
"splits": {
|
15 |
"train": {
|
16 |
"name": "train",
|
17 |
-
"num_bytes":
|
18 |
-
"num_examples":
|
19 |
"dataset_name": "default"
|
20 |
}
|
21 |
},
|
22 |
-
"download_size":
|
23 |
-
"dataset_size":
|
24 |
-
"size_in_bytes":
|
25 |
"data_files": {
|
26 |
"train": [{ "filename": "data/train.parquet" }]
|
27 |
}
|
|
|
14 |
"splits": {
|
15 |
"train": {
|
16 |
"name": "train",
|
17 |
+
"num_bytes": 346784147,
|
18 |
+
"num_examples": 7300966,
|
19 |
"dataset_name": "default"
|
20 |
}
|
21 |
},
|
22 |
+
"download_size": 346784147,
|
23 |
+
"dataset_size": 346784147,
|
24 |
+
"size_in_bytes": 346784147,
|
25 |
"data_files": {
|
26 |
"train": [{ "filename": "data/train.parquet" }]
|
27 |
}
|