mookiezi
commited on
Commit
Β·
67a3071
1
Parent(s):
9904104
Remove more blanks and ToS breaking content
Browse files- CHANGEGLOG +2 -1
- README.md +71 -78
- data/train.parquet +2 -2
- dataset_infos.json +5 -5
CHANGEGLOG
CHANGED
@@ -3,4 +3,5 @@ v.02 - Further deduping
|
|
3 |
v.03 - ToS filtered. Added filters script repo
|
4 |
v.04 - Fixed end tags and emoticons having missing leading spaces
|
5 |
v.05 - Added dataset pipeline
|
6 |
-
v.06 - Removed entries with blank messages
|
|
|
|
3 |
v.03 - ToS filtered. Added filters script repo
|
4 |
v.04 - Fixed end tags and emoticons having missing leading spaces
|
5 |
v.05 - Added dataset pipeline
|
6 |
+
v.06 - Removed entries with blank messages
|
7 |
+
v.07 - Remove addition blanks and filtered for more ToS
|
README.md
CHANGED
@@ -27,7 +27,7 @@ size_categories:
|
|
27 |
|
28 |
> **Discord-Dialogues** is a large-scale dataset of anonymized Discord conversations from late spring to early fall 2025 for training and evaluating realistic conversational AI models in a ChatML-friendly format.
|
29 |
|
30 |
-
This dataset contains 7.
|
31 |
|
32 |
---
|
33 |
|
@@ -111,83 +111,76 @@ The full end-to-end pipeline is documented in the [dataset-pipeline GitHub repos
|
|
111 |
## Dataset Statistics <span style="font-weight:normal;">(using the [NousResearch/Hermes-3-Llama-3.1-8B tokenizer](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B))</span>
|
112 |
|
113 |
<div style="display:flex; gap:20px; align-items:flex-start;">
|
114 |
-
|
115 |
-
|
116 |
-
|
|
117 |
-
|
|
118 |
-
|
|
119 |
-
|
|
120 |
-
|
|
121 |
-
|
|
122 |
-
|
|
123 |
-
|
|
124 |
-
|
|
125 |
-
|
|
126 |
-
|
|
127 |
-
| Total
|
128 |
-
| Total
|
129 |
-
|
|
130 |
-
| Avg
|
131 |
-
| Avg
|
132 |
-
|
|
133 |
-
|
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
|
140 |
-
|
|
141 |
-
|
|
142 |
-
|
|
143 |
-
|
|
144 |
-
|
|
145 |
-
|
|
146 |
-
|
|
147 |
-
|
|
148 |
-
|
|
149 |
-
|
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
|
159 |
-
|
|
160 |
-
|
|
161 |
-
|
|
162 |
-
|
|
163 |
-
|
|
164 |
-
|
|
165 |
-
|
|
166 |
-
|
|
167 |
-
|
|
168 |
-
|
|
169 |
-
|
|
170 |
-
|
|
171 |
-
|
|
172 |
-
|
|
173 |
-
|
|
174 |
-
|
|
175 |
-
|
|
176 |
-
|
|
177 |
-
|
|
178 |
-
|
|
179 |
-
|
|
180 |
-
|
|
181 |
-
|
|
182 |
-
|
|
183 |
-
|
184 |
-
| 27 | 2 |
|
185 |
-
| 29 | 1 |
|
186 |
-
| 32 | 1 |
|
187 |
-
| 33 | 2 |
|
188 |
-
|
189 |
-
</div>
|
190 |
-
|
191 |
</div>
|
192 |
|
193 |
---
|
|
|
27 |
|
28 |
> **Discord-Dialogues** is a large-scale dataset of anonymized Discord conversations from late spring to early fall 2025 for training and evaluating realistic conversational AI models in a ChatML-friendly format.
|
29 |
|
30 |
+
This dataset contains 7.3 million exchanges spread out over 16 million turns, with more than 139 million words.
|
31 |
|
32 |
---
|
33 |
|
|
|
111 |
## Dataset Statistics <span style="font-weight:normal;">(using the [NousResearch/Hermes-3-Llama-3.1-8B tokenizer](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B))</span>
|
112 |
|
113 |
<div style="display:flex; gap:20px; align-items:flex-start;">
|
114 |
+
<div>
|
115 |
+
| Metric | Value |
|
116 |
+
| ---------------------- | ------------: |
|
117 |
+
| Samples (count) | 7,303,464 |
|
118 |
+
| Total turns | 16,881,010 |
|
119 |
+
| Total assistant turns | 9,016,287 |
|
120 |
+
| Min length (tokens) | 10 |
|
121 |
+
| Max length (tokens) | 2,542 |
|
122 |
+
| Mean length (tokens) | 32.79 |
|
123 |
+
| Median length (tokens) | 28 |
|
124 |
+
| Std dev (tokens) | 16.56 |
|
125 |
+
| Skew | 6.04 |
|
126 |
+
| Kurtosis | 326.54 |
|
127 |
+
| Total tokens | 239,458,213 |
|
128 |
+
| Total characters | 1,242,238,794 |
|
129 |
+
| Total words | 139,922,950 |
|
130 |
+
| Avg chars per sample | 170.09 |
|
131 |
+
| Avg words per sample | 19.16 |
|
132 |
+
| Avg chars per word | 8.88 |
|
133 |
+
| Tokens per char | 0.19 |
|
134 |
+
</div>
|
135 |
+
|
136 |
+
<div>
|
137 |
+
| Tokens | Count |
|
138 |
+
| --------- | --------: |
|
139 |
+
| 8β16 | 107,264 |
|
140 |
+
| 16β32 | 4,278,713 |
|
141 |
+
| 32β64 | 2,566,176 |
|
142 |
+
| 64β128 | 334,829 |
|
143 |
+
| 128β256 | 15,920 |
|
144 |
+
| 256β384 | 363 |
|
145 |
+
| 384β512 | 71 |
|
146 |
+
| 512β768 | 78 |
|
147 |
+
| 768β1024 | 30 |
|
148 |
+
| 1024β2048 | 17 |
|
149 |
+
| 2048β4096 | 3 |
|
150 |
+
</div>
|
151 |
+
|
152 |
+
<div>
|
153 |
+
| Turns | Count |
|
154 |
+
| ----- | --------: |
|
155 |
+
| 2 | 5,795,019 |
|
156 |
+
| 3 | 1,038,500 |
|
157 |
+
| 4 | 304,442 |
|
158 |
+
| 5 | 96,758 |
|
159 |
+
| 6 | 38,620 |
|
160 |
+
| 7 | 15,714 |
|
161 |
+
| 8 | 7,108 |
|
162 |
+
| 9 | 3,391 |
|
163 |
+
| 10 | 1,709 |
|
164 |
+
| 11 | 909 |
|
165 |
+
| 12 | 526 |
|
166 |
+
| 13 | 291 |
|
167 |
+
| 14 | 163 |
|
168 |
+
| 15 | 113 |
|
169 |
+
| 16 | 58 |
|
170 |
+
| 17 | 57 |
|
171 |
+
| 18 | 28 |
|
172 |
+
| 19 | 20 |
|
173 |
+
| 20 | 7 |
|
174 |
+
| 21 | 10 |
|
175 |
+
| 22 | 10 |
|
176 |
+
| 23 | 2 |
|
177 |
+
| 24 | 1 |
|
178 |
+
| 25 | 2 |
|
179 |
+
| 27 | 2 |
|
180 |
+
| 29 | 1 |
|
181 |
+
| 32 | 1 |
|
182 |
+
| 33 | 2 |
|
183 |
+
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
</div>
|
185 |
|
186 |
---
|
data/train.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53aac105a033b9acfd3e4f039d2791284e6485366f79292e69a7e9a8439b67b8
|
3 |
+
size 346817370
|
dataset_infos.json
CHANGED
@@ -14,14 +14,14 @@
|
|
14 |
"splits": {
|
15 |
"train": {
|
16 |
"name": "train",
|
17 |
-
"num_bytes":
|
18 |
-
"num_examples":
|
19 |
"dataset_name": "default"
|
20 |
}
|
21 |
},
|
22 |
-
"download_size":
|
23 |
-
"dataset_size":
|
24 |
-
"size_in_bytes":
|
25 |
"data_files": {
|
26 |
"train": [{ "filename": "data/train.parquet" }]
|
27 |
}
|
|
|
14 |
"splits": {
|
15 |
"train": {
|
16 |
"name": "train",
|
17 |
+
"num_bytes": 346817370,
|
18 |
+
"num_examples": 7303464,
|
19 |
"dataset_name": "default"
|
20 |
}
|
21 |
},
|
22 |
+
"download_size": 346817370,
|
23 |
+
"dataset_size": 346817370,
|
24 |
+
"size_in_bytes": 346817370,
|
25 |
"data_files": {
|
26 |
"train": [{ "filename": "data/train.parquet" }]
|
27 |
}
|