Datasets:
Tasks:
Token Classification
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Arabic
Size:
10K - 100K
License:
updated dataset_dict.json to include iaa_A and iaa_B splits.
Browse files- dataset_dict.json +23 -3
dataset_dict.json
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
"splits": {
|
3 |
"train": {
|
4 |
"name": "train",
|
5 |
-
"num_examples":
|
6 |
"data_files": [
|
7 |
{
|
8 |
"filename": "train.parquet",
|
@@ -12,7 +12,7 @@
|
|
12 |
},
|
13 |
"validation": {
|
14 |
"name": "validation",
|
15 |
-
"num_examples":
|
16 |
"data_files": [
|
17 |
{
|
18 |
"filename": "validation.parquet",
|
@@ -22,13 +22,33 @@
|
|
22 |
},
|
23 |
"test": {
|
24 |
"name": "test",
|
25 |
-
"num_examples":
|
26 |
"data_files": [
|
27 |
{
|
28 |
"filename": "test.parquet",
|
29 |
"url": "test.parquet"
|
30 |
}
|
31 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
}
|
33 |
}
|
34 |
}
|
|
|
2 |
"splits": {
|
3 |
"train": {
|
4 |
"name": "train",
|
5 |
+
"num_examples": 19532,
|
6 |
"data_files": [
|
7 |
{
|
8 |
"filename": "train.parquet",
|
|
|
12 |
},
|
13 |
"validation": {
|
14 |
"name": "validation",
|
15 |
+
"num_examples": 1931,
|
16 |
"data_files": [
|
17 |
{
|
18 |
"filename": "validation.parquet",
|
|
|
22 |
},
|
23 |
"test": {
|
24 |
"name": "test",
|
25 |
+
"num_examples": 1959,
|
26 |
"data_files": [
|
27 |
{
|
28 |
"filename": "test.parquet",
|
29 |
"url": "test.parquet"
|
30 |
}
|
31 |
]
|
32 |
+
},
|
33 |
+
"iaa_A": {
|
34 |
+
"name": "iaa_A",
|
35 |
+
"num_examples": 5806,
|
36 |
+
"data_files": [
|
37 |
+
{
|
38 |
+
"filename": "iaa_A.parquet",
|
39 |
+
"url": "iaa_A.parquet"
|
40 |
+
}
|
41 |
+
]
|
42 |
+
},
|
43 |
+
"iaa_B": {
|
44 |
+
"name": "iaa_B",
|
45 |
+
"num_examples": 5806,
|
46 |
+
"data_files": [
|
47 |
+
{
|
48 |
+
"filename": "iaa_B.parquet",
|
49 |
+
"url": "iaa_B.parquet"
|
50 |
+
}
|
51 |
+
]
|
52 |
}
|
53 |
}
|
54 |
}
|