Convert dataset to Parquet

#6
by astijns - opened
This view is limited to 50 files because it contains too many changes.  See the raw diff here.
Files changed (50) hide show
  1. README.md +265 -136
  2. cross_genre_1/test-00000-of-00001.parquet +3 -0
  3. cross_genre_1/train-00000-of-00001.parquet +3 -0
  4. cross_genre_1/validation-00000-of-00001.parquet +3 -0
  5. cross_genre_2/test-00000-of-00001.parquet +3 -0
  6. cross_genre_2/train-00000-of-00001.parquet +3 -0
  7. cross_genre_2/validation-00000-of-00001.parquet +3 -0
  8. cross_genre_3/test-00000-of-00001.parquet +3 -0
  9. cross_genre_3/train-00000-of-00001.parquet +3 -0
  10. cross_genre_3/validation-00000-of-00001.parquet +3 -0
  11. cross_genre_4/test-00000-of-00001.parquet +3 -0
  12. cross_genre_4/train-00000-of-00001.parquet +3 -0
  13. cross_genre_4/validation-00000-of-00001.parquet +3 -0
  14. cross_topic_1/test-00000-of-00001.parquet +3 -0
  15. cross_topic_1/train-00000-of-00001.parquet +3 -0
  16. cross_topic_1/validation-00000-of-00001.parquet +3 -0
  17. cross_topic_10/test-00000-of-00001.parquet +3 -0
  18. cross_topic_10/train-00000-of-00001.parquet +3 -0
  19. cross_topic_10/validation-00000-of-00001.parquet +3 -0
  20. cross_topic_11/test-00000-of-00001.parquet +3 -0
  21. cross_topic_11/train-00000-of-00001.parquet +3 -0
  22. cross_topic_11/validation-00000-of-00001.parquet +3 -0
  23. cross_topic_12/test-00000-of-00001.parquet +3 -0
  24. cross_topic_12/train-00000-of-00001.parquet +3 -0
  25. cross_topic_12/validation-00000-of-00001.parquet +3 -0
  26. cross_topic_2/test-00000-of-00001.parquet +3 -0
  27. cross_topic_2/train-00000-of-00001.parquet +3 -0
  28. cross_topic_2/validation-00000-of-00001.parquet +3 -0
  29. cross_topic_3/test-00000-of-00001.parquet +3 -0
  30. cross_topic_3/train-00000-of-00001.parquet +3 -0
  31. cross_topic_3/validation-00000-of-00001.parquet +3 -0
  32. cross_topic_4/test-00000-of-00001.parquet +3 -0
  33. cross_topic_4/train-00000-of-00001.parquet +3 -0
  34. cross_topic_4/validation-00000-of-00001.parquet +3 -0
  35. cross_topic_5/test-00000-of-00001.parquet +3 -0
  36. cross_topic_5/train-00000-of-00001.parquet +3 -0
  37. cross_topic_5/validation-00000-of-00001.parquet +3 -0
  38. cross_topic_6/test-00000-of-00001.parquet +3 -0
  39. cross_topic_6/train-00000-of-00001.parquet +3 -0
  40. cross_topic_6/validation-00000-of-00001.parquet +3 -0
  41. cross_topic_7/test-00000-of-00001.parquet +3 -0
  42. cross_topic_7/train-00000-of-00001.parquet +3 -0
  43. cross_topic_7/validation-00000-of-00001.parquet +3 -0
  44. cross_topic_8/test-00000-of-00001.parquet +3 -0
  45. cross_topic_8/train-00000-of-00001.parquet +3 -0
  46. cross_topic_8/validation-00000-of-00001.parquet +3 -0
  47. cross_topic_9/test-00000-of-00001.parquet +3 -0
  48. cross_topic_9/train-00000-of-00001.parquet +3 -0
  49. cross_topic_9/validation-00000-of-00001.parquet +3 -0
  50. guardian_authorship.py +0 -325
README.md CHANGED
@@ -20,7 +20,7 @@ task_ids:
20
  - topic-classification
21
  pretty_name: GuardianAuthorship
22
  dataset_info:
23
- - config_name: cross_topic_1
24
  features:
25
  - name: author
26
  dtype:
@@ -52,17 +52,17 @@ dataset_info:
52
  dtype: string
53
  splits:
54
  - name: train
55
- num_bytes: 677054
56
- num_examples: 112
57
  - name: test
58
- num_bytes: 1283126
59
- num_examples: 207
60
  - name: validation
61
- num_bytes: 374390
62
- num_examples: 62
63
- download_size: 3100749
64
- dataset_size: 2334570
65
- - config_name: cross_genre_1
66
  features:
67
  - name: author
68
  dtype:
@@ -94,17 +94,17 @@ dataset_info:
94
  dtype: string
95
  splits:
96
  - name: train
97
- num_bytes: 406144
98
  num_examples: 63
99
  - name: test
100
- num_bytes: 1657512
101
- num_examples: 269
102
  - name: validation
103
- num_bytes: 677054
104
- num_examples: 112
105
- download_size: 3100749
106
- dataset_size: 2740710
107
- - config_name: cross_topic_2
108
  features:
109
  - name: author
110
  dtype:
@@ -136,17 +136,17 @@ dataset_info:
136
  dtype: string
137
  splits:
138
  - name: train
139
- num_bytes: 677054
140
- num_examples: 112
141
  - name: test
142
- num_bytes: 1104764
143
- num_examples: 179
144
  - name: validation
145
- num_bytes: 552752
146
  num_examples: 90
147
- download_size: 3100749
148
- dataset_size: 2334570
149
- - config_name: cross_topic_3
150
  features:
151
  - name: author
152
  dtype:
@@ -178,17 +178,17 @@ dataset_info:
178
  dtype: string
179
  splits:
180
  - name: train
181
- num_bytes: 677054
182
- num_examples: 112
183
  - name: test
184
- num_bytes: 927138
185
- num_examples: 152
186
  - name: validation
187
- num_bytes: 730378
188
  num_examples: 117
189
- download_size: 3100749
190
- dataset_size: 2334570
191
- - config_name: cross_topic_4
192
  features:
193
  - name: author
194
  dtype:
@@ -220,17 +220,17 @@ dataset_info:
220
  dtype: string
221
  splits:
222
  - name: train
223
- num_bytes: 374390
224
- num_examples: 62
225
  - name: test
226
- num_bytes: 1283126
227
  num_examples: 207
228
  - name: validation
229
- num_bytes: 677054
230
- num_examples: 112
231
- download_size: 3100749
232
- dataset_size: 2334570
233
- - config_name: cross_topic_5
234
  features:
235
  - name: author
236
  dtype:
@@ -262,17 +262,17 @@ dataset_info:
262
  dtype: string
263
  splits:
264
  - name: train
265
- num_bytes: 374390
266
- num_examples: 62
267
  - name: test
268
- num_bytes: 1407428
269
- num_examples: 229
270
  - name: validation
271
- num_bytes: 552752
272
- num_examples: 90
273
- download_size: 3100749
274
- dataset_size: 2334570
275
- - config_name: cross_topic_6
276
  features:
277
  - name: author
278
  dtype:
@@ -304,17 +304,17 @@ dataset_info:
304
  dtype: string
305
  splits:
306
  - name: train
307
- num_bytes: 374390
308
- num_examples: 62
309
  - name: test
310
- num_bytes: 1229802
311
  num_examples: 202
312
  - name: validation
313
- num_bytes: 730378
314
- num_examples: 117
315
- download_size: 3100749
316
- dataset_size: 2334570
317
- - config_name: cross_topic_7
318
  features:
319
  - name: author
320
  dtype:
@@ -346,17 +346,17 @@ dataset_info:
346
  dtype: string
347
  splits:
348
  - name: train
349
- num_bytes: 552752
350
- num_examples: 90
351
  - name: test
352
- num_bytes: 1104764
353
- num_examples: 179
354
  - name: validation
355
- num_bytes: 677054
356
- num_examples: 112
357
- download_size: 3100749
358
- dataset_size: 2334570
359
- - config_name: cross_topic_8
360
  features:
361
  - name: author
362
  dtype:
@@ -388,17 +388,17 @@ dataset_info:
388
  dtype: string
389
  splits:
390
  - name: train
391
- num_bytes: 552752
392
- num_examples: 90
393
  - name: test
394
- num_bytes: 1407428
395
- num_examples: 229
396
  - name: validation
397
- num_bytes: 374390
398
- num_examples: 62
399
- download_size: 3100749
400
- dataset_size: 2334570
401
- - config_name: cross_topic_9
402
  features:
403
  - name: author
404
  dtype:
@@ -430,17 +430,17 @@ dataset_info:
430
  dtype: string
431
  splits:
432
  - name: train
433
- num_bytes: 552752
434
- num_examples: 90
435
  - name: test
436
- num_bytes: 1051440
437
- num_examples: 174
438
  - name: validation
439
- num_bytes: 730378
440
  num_examples: 117
441
- download_size: 3100749
442
- dataset_size: 2334570
443
- - config_name: cross_topic_10
444
  features:
445
  - name: author
446
  dtype:
@@ -472,17 +472,17 @@ dataset_info:
472
  dtype: string
473
  splits:
474
  - name: train
475
- num_bytes: 730378
476
- num_examples: 117
477
  - name: test
478
- num_bytes: 927138
479
- num_examples: 152
480
  - name: validation
481
- num_bytes: 677054
482
  num_examples: 112
483
- download_size: 3100749
484
- dataset_size: 2334570
485
- - config_name: cross_topic_11
486
  features:
487
  - name: author
488
  dtype:
@@ -514,17 +514,17 @@ dataset_info:
514
  dtype: string
515
  splits:
516
  - name: train
517
- num_bytes: 730378
518
- num_examples: 117
519
  - name: test
520
- num_bytes: 1229802
521
- num_examples: 202
522
  - name: validation
523
- num_bytes: 374390
524
- num_examples: 62
525
- download_size: 3100749
526
- dataset_size: 2334570
527
- - config_name: cross_topic_12
528
  features:
529
  - name: author
530
  dtype:
@@ -556,17 +556,17 @@ dataset_info:
556
  dtype: string
557
  splits:
558
  - name: train
559
- num_bytes: 730378
560
- num_examples: 117
561
  - name: test
562
- num_bytes: 1051440
563
- num_examples: 174
564
  - name: validation
565
- num_bytes: 552752
566
- num_examples: 90
567
- download_size: 3100749
568
- dataset_size: 2334570
569
- - config_name: cross_genre_2
570
  features:
571
  - name: author
572
  dtype:
@@ -598,17 +598,17 @@ dataset_info:
598
  dtype: string
599
  splits:
600
  - name: train
601
- num_bytes: 406144
602
- num_examples: 63
603
  - name: test
604
- num_bytes: 1960176
605
- num_examples: 319
606
  - name: validation
607
- num_bytes: 374390
608
- num_examples: 62
609
- download_size: 3100749
610
- dataset_size: 2740710
611
- - config_name: cross_genre_3
612
  features:
613
  - name: author
614
  dtype:
@@ -640,17 +640,17 @@ dataset_info:
640
  dtype: string
641
  splits:
642
  - name: train
643
- num_bytes: 406144
644
- num_examples: 63
645
  - name: test
646
- num_bytes: 1781814
647
- num_examples: 291
648
  - name: validation
649
- num_bytes: 552752
650
- num_examples: 90
651
- download_size: 3100749
652
- dataset_size: 2740710
653
- - config_name: cross_genre_4
654
  features:
655
  - name: author
656
  dtype:
@@ -682,16 +682,145 @@ dataset_info:
682
  dtype: string
683
  splits:
684
  - name: train
685
- num_bytes: 406144
686
- num_examples: 63
687
  - name: test
688
- num_bytes: 1604188
689
- num_examples: 264
690
  - name: validation
691
- num_bytes: 730378
692
  num_examples: 117
693
- download_size: 3100749
694
- dataset_size: 2740710
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695
  ---
696
 
697
  # Dataset Card for "guardian_authorship"
 
20
  - topic-classification
21
  pretty_name: GuardianAuthorship
22
  dataset_info:
23
+ - config_name: cross_genre_1
24
  features:
25
  - name: author
26
  dtype:
 
52
  dtype: string
53
  splits:
54
  - name: train
55
+ num_bytes: 406140
56
+ num_examples: 63
57
  - name: test
58
+ num_bytes: 1657508
59
+ num_examples: 269
60
  - name: validation
61
+ num_bytes: 677050
62
+ num_examples: 112
63
+ download_size: 1776876
64
+ dataset_size: 2740698
65
+ - config_name: cross_genre_2
66
  features:
67
  - name: author
68
  dtype:
 
94
  dtype: string
95
  splits:
96
  - name: train
97
+ num_bytes: 406140
98
  num_examples: 63
99
  - name: test
100
+ num_bytes: 1960172
101
+ num_examples: 319
102
  - name: validation
103
+ num_bytes: 374386
104
+ num_examples: 62
105
+ download_size: 1777227
106
+ dataset_size: 2740698
107
+ - config_name: cross_genre_3
108
  features:
109
  - name: author
110
  dtype:
 
136
  dtype: string
137
  splits:
138
  - name: train
139
+ num_bytes: 406140
140
+ num_examples: 63
141
  - name: test
142
+ num_bytes: 1781810
143
+ num_examples: 291
144
  - name: validation
145
+ num_bytes: 552748
146
  num_examples: 90
147
+ download_size: 1781214
148
+ dataset_size: 2740698
149
+ - config_name: cross_genre_4
150
  features:
151
  - name: author
152
  dtype:
 
178
  dtype: string
179
  splits:
180
  - name: train
181
+ num_bytes: 406140
182
+ num_examples: 63
183
  - name: test
184
+ num_bytes: 1604184
185
+ num_examples: 264
186
  - name: validation
187
+ num_bytes: 730374
188
  num_examples: 117
189
+ download_size: 1776843
190
+ dataset_size: 2740698
191
+ - config_name: cross_topic_1
192
  features:
193
  - name: author
194
  dtype:
 
220
  dtype: string
221
  splits:
222
  - name: train
223
+ num_bytes: 677050
224
+ num_examples: 112
225
  - name: test
226
+ num_bytes: 1283122
227
  num_examples: 207
228
  - name: validation
229
+ num_bytes: 374386
230
+ num_examples: 62
231
+ download_size: 1516211
232
+ dataset_size: 2334558
233
+ - config_name: cross_topic_10
234
  features:
235
  - name: author
236
  dtype:
 
262
  dtype: string
263
  splits:
264
  - name: train
265
+ num_bytes: 730374
266
+ num_examples: 117
267
  - name: test
268
+ num_bytes: 927134
269
+ num_examples: 152
270
  - name: validation
271
+ num_bytes: 677050
272
+ num_examples: 112
273
+ download_size: 1511364
274
+ dataset_size: 2334558
275
+ - config_name: cross_topic_11
276
  features:
277
  - name: author
278
  dtype:
 
304
  dtype: string
305
  splits:
306
  - name: train
307
+ num_bytes: 730374
308
+ num_examples: 117
309
  - name: test
310
+ num_bytes: 1229798
311
  num_examples: 202
312
  - name: validation
313
+ num_bytes: 374386
314
+ num_examples: 62
315
+ download_size: 1511352
316
+ dataset_size: 2334558
317
+ - config_name: cross_topic_12
318
  features:
319
  - name: author
320
  dtype:
 
346
  dtype: string
347
  splits:
348
  - name: train
349
+ num_bytes: 730374
350
+ num_examples: 117
351
  - name: test
352
+ num_bytes: 1051436
353
+ num_examples: 174
354
  - name: validation
355
+ num_bytes: 552748
356
+ num_examples: 90
357
+ download_size: 1514954
358
+ dataset_size: 2334558
359
+ - config_name: cross_topic_2
360
  features:
361
  - name: author
362
  dtype:
 
388
  dtype: string
389
  splits:
390
  - name: train
391
+ num_bytes: 677050
392
+ num_examples: 112
393
  - name: test
394
+ num_bytes: 1104760
395
+ num_examples: 179
396
  - name: validation
397
+ num_bytes: 552748
398
+ num_examples: 90
399
+ download_size: 1516273
400
+ dataset_size: 2334558
401
+ - config_name: cross_topic_3
402
  features:
403
  - name: author
404
  dtype:
 
430
  dtype: string
431
  splits:
432
  - name: train
433
+ num_bytes: 677050
434
+ num_examples: 112
435
  - name: test
436
+ num_bytes: 927134
437
+ num_examples: 152
438
  - name: validation
439
+ num_bytes: 730374
440
  num_examples: 117
441
+ download_size: 1511364
442
+ dataset_size: 2334558
443
+ - config_name: cross_topic_4
444
  features:
445
  - name: author
446
  dtype:
 
472
  dtype: string
473
  splits:
474
  - name: train
475
+ num_bytes: 374386
476
+ num_examples: 62
477
  - name: test
478
+ num_bytes: 1283122
479
+ num_examples: 207
480
  - name: validation
481
+ num_bytes: 677050
482
  num_examples: 112
483
+ download_size: 1516211
484
+ dataset_size: 2334558
485
+ - config_name: cross_topic_5
486
  features:
487
  - name: author
488
  dtype:
 
514
  dtype: string
515
  splits:
516
  - name: train
517
+ num_bytes: 374386
518
+ num_examples: 62
519
  - name: test
520
+ num_bytes: 1407424
521
+ num_examples: 229
522
  - name: validation
523
+ num_bytes: 552748
524
+ num_examples: 90
525
+ download_size: 1515780
526
+ dataset_size: 2334558
527
+ - config_name: cross_topic_6
528
  features:
529
  - name: author
530
  dtype:
 
556
  dtype: string
557
  splits:
558
  - name: train
559
+ num_bytes: 374386
560
+ num_examples: 62
561
  - name: test
562
+ num_bytes: 1229798
563
+ num_examples: 202
564
  - name: validation
565
+ num_bytes: 730374
566
+ num_examples: 117
567
+ download_size: 1511352
568
+ dataset_size: 2334558
569
+ - config_name: cross_topic_7
570
  features:
571
  - name: author
572
  dtype:
 
598
  dtype: string
599
  splits:
600
  - name: train
601
+ num_bytes: 552748
602
+ num_examples: 90
603
  - name: test
604
+ num_bytes: 1104760
605
+ num_examples: 179
606
  - name: validation
607
+ num_bytes: 677050
608
+ num_examples: 112
609
+ download_size: 1516273
610
+ dataset_size: 2334558
611
+ - config_name: cross_topic_8
612
  features:
613
  - name: author
614
  dtype:
 
640
  dtype: string
641
  splits:
642
  - name: train
643
+ num_bytes: 552748
644
+ num_examples: 90
645
  - name: test
646
+ num_bytes: 1407424
647
+ num_examples: 229
648
  - name: validation
649
+ num_bytes: 374386
650
+ num_examples: 62
651
+ download_size: 1515780
652
+ dataset_size: 2334558
653
+ - config_name: cross_topic_9
654
  features:
655
  - name: author
656
  dtype:
 
682
  dtype: string
683
  splits:
684
  - name: train
685
+ num_bytes: 552748
686
+ num_examples: 90
687
  - name: test
688
+ num_bytes: 1051436
689
+ num_examples: 174
690
  - name: validation
691
+ num_bytes: 730374
692
  num_examples: 117
693
+ download_size: 1514954
694
+ dataset_size: 2334558
695
+ configs:
696
+ - config_name: cross_genre_1
697
+ data_files:
698
+ - split: train
699
+ path: cross_genre_1/train-*
700
+ - split: test
701
+ path: cross_genre_1/test-*
702
+ - split: validation
703
+ path: cross_genre_1/validation-*
704
+ - config_name: cross_genre_2
705
+ data_files:
706
+ - split: train
707
+ path: cross_genre_2/train-*
708
+ - split: test
709
+ path: cross_genre_2/test-*
710
+ - split: validation
711
+ path: cross_genre_2/validation-*
712
+ - config_name: cross_genre_3
713
+ data_files:
714
+ - split: train
715
+ path: cross_genre_3/train-*
716
+ - split: test
717
+ path: cross_genre_3/test-*
718
+ - split: validation
719
+ path: cross_genre_3/validation-*
720
+ - config_name: cross_genre_4
721
+ data_files:
722
+ - split: train
723
+ path: cross_genre_4/train-*
724
+ - split: test
725
+ path: cross_genre_4/test-*
726
+ - split: validation
727
+ path: cross_genre_4/validation-*
728
+ - config_name: cross_topic_1
729
+ data_files:
730
+ - split: train
731
+ path: cross_topic_1/train-*
732
+ - split: test
733
+ path: cross_topic_1/test-*
734
+ - split: validation
735
+ path: cross_topic_1/validation-*
736
+ - config_name: cross_topic_10
737
+ data_files:
738
+ - split: train
739
+ path: cross_topic_10/train-*
740
+ - split: test
741
+ path: cross_topic_10/test-*
742
+ - split: validation
743
+ path: cross_topic_10/validation-*
744
+ - config_name: cross_topic_11
745
+ data_files:
746
+ - split: train
747
+ path: cross_topic_11/train-*
748
+ - split: test
749
+ path: cross_topic_11/test-*
750
+ - split: validation
751
+ path: cross_topic_11/validation-*
752
+ - config_name: cross_topic_12
753
+ data_files:
754
+ - split: train
755
+ path: cross_topic_12/train-*
756
+ - split: test
757
+ path: cross_topic_12/test-*
758
+ - split: validation
759
+ path: cross_topic_12/validation-*
760
+ - config_name: cross_topic_2
761
+ data_files:
762
+ - split: train
763
+ path: cross_topic_2/train-*
764
+ - split: test
765
+ path: cross_topic_2/test-*
766
+ - split: validation
767
+ path: cross_topic_2/validation-*
768
+ - config_name: cross_topic_3
769
+ data_files:
770
+ - split: train
771
+ path: cross_topic_3/train-*
772
+ - split: test
773
+ path: cross_topic_3/test-*
774
+ - split: validation
775
+ path: cross_topic_3/validation-*
776
+ - config_name: cross_topic_4
777
+ data_files:
778
+ - split: train
779
+ path: cross_topic_4/train-*
780
+ - split: test
781
+ path: cross_topic_4/test-*
782
+ - split: validation
783
+ path: cross_topic_4/validation-*
784
+ - config_name: cross_topic_5
785
+ data_files:
786
+ - split: train
787
+ path: cross_topic_5/train-*
788
+ - split: test
789
+ path: cross_topic_5/test-*
790
+ - split: validation
791
+ path: cross_topic_5/validation-*
792
+ - config_name: cross_topic_6
793
+ data_files:
794
+ - split: train
795
+ path: cross_topic_6/train-*
796
+ - split: test
797
+ path: cross_topic_6/test-*
798
+ - split: validation
799
+ path: cross_topic_6/validation-*
800
+ - config_name: cross_topic_7
801
+ data_files:
802
+ - split: train
803
+ path: cross_topic_7/train-*
804
+ - split: test
805
+ path: cross_topic_7/test-*
806
+ - split: validation
807
+ path: cross_topic_7/validation-*
808
+ - config_name: cross_topic_8
809
+ data_files:
810
+ - split: train
811
+ path: cross_topic_8/train-*
812
+ - split: test
813
+ path: cross_topic_8/test-*
814
+ - split: validation
815
+ path: cross_topic_8/validation-*
816
+ - config_name: cross_topic_9
817
+ data_files:
818
+ - split: train
819
+ path: cross_topic_9/train-*
820
+ - split: test
821
+ path: cross_topic_9/test-*
822
+ - split: validation
823
+ path: cross_topic_9/validation-*
824
  ---
825
 
826
  # Dataset Card for "guardian_authorship"
cross_genre_1/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00a4c9f690349a02f43f6cce6ceeff4833c3aefe3b53cc3287fbe3cb940f7f9f
3
+ size 1067589
cross_genre_1/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9abd1ef759794089923fe95a32788fdfedca18fdc6f97ca16fa26a943dacc469
3
+ size 268303
cross_genre_1/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d531aaef51bbb2cea7fdcc108c001d419f1e53cfe0c0b43c16ac59ab99855538
3
+ size 440984
cross_genre_2/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ed203476e8fb525ec544a737041cb2f0be5b325280d6e2a7c80427e1308678c
3
+ size 1269040
cross_genre_2/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9abd1ef759794089923fe95a32788fdfedca18fdc6f97ca16fa26a943dacc469
3
+ size 268303
cross_genre_2/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c02851e8657ae9b565a1b2ec05d0d5772295373271438935396fb2f131294071
3
+ size 239884
cross_genre_3/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30a313c2af388833969546bcea0dcdf9aa99edcb8c02cc28dab9857642c8ff94
3
+ size 1150062
cross_genre_3/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9abd1ef759794089923fe95a32788fdfedca18fdc6f97ca16fa26a943dacc469
3
+ size 268303
cross_genre_3/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8015763174def2de8436af55639bd22fc2d161dbfe380487d04a7a6c86088e
3
+ size 362849
cross_genre_4/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13f6f0d9af2d26681b583de64785baa55d6b7b66eaaec3ce6f1f76d4c91b9f48
3
+ size 1034440
cross_genre_4/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9abd1ef759794089923fe95a32788fdfedca18fdc6f97ca16fa26a943dacc469
3
+ size 268303
cross_genre_4/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dfa70c981e76fd37a9020053ac12a078f43badf70c0c393159e0bfcf5666d37
3
+ size 474100
cross_topic_1/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b78d0fc51aa800be1ebdc23b843236e40fdc4ea1e2631ae9e7d642a738bf16f
3
+ size 835343
cross_topic_1/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d531aaef51bbb2cea7fdcc108c001d419f1e53cfe0c0b43c16ac59ab99855538
3
+ size 440984
cross_topic_1/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c02851e8657ae9b565a1b2ec05d0d5772295373271438935396fb2f131294071
3
+ size 239884
cross_topic_10/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3555e8540e8dccb8accf4210d2f1c8f0871faa0af22583bcaac95ab8023976ba
3
+ size 596280
cross_topic_10/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dfa70c981e76fd37a9020053ac12a078f43badf70c0c393159e0bfcf5666d37
3
+ size 474100
cross_topic_10/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d531aaef51bbb2cea7fdcc108c001d419f1e53cfe0c0b43c16ac59ab99855538
3
+ size 440984
cross_topic_11/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9b4e12b379878cd4e85d44a9458bd4e392c76300dee5051dd089fa152fb5194
3
+ size 797368
cross_topic_11/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dfa70c981e76fd37a9020053ac12a078f43badf70c0c393159e0bfcf5666d37
3
+ size 474100
cross_topic_11/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c02851e8657ae9b565a1b2ec05d0d5772295373271438935396fb2f131294071
3
+ size 239884
cross_topic_12/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdd4931260e2a4119e72f35779f7a41f374b751503b657564a51245a7a1c0560
3
+ size 678005
cross_topic_12/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dfa70c981e76fd37a9020053ac12a078f43badf70c0c393159e0bfcf5666d37
3
+ size 474100
cross_topic_12/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8015763174def2de8436af55639bd22fc2d161dbfe380487d04a7a6c86088e
3
+ size 362849
cross_topic_2/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa9ad805fd4b16a7418c52675bde1e03d408be023f00d7039981cb687fcbb145
3
+ size 712440
cross_topic_2/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d531aaef51bbb2cea7fdcc108c001d419f1e53cfe0c0b43c16ac59ab99855538
3
+ size 440984
cross_topic_2/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8015763174def2de8436af55639bd22fc2d161dbfe380487d04a7a6c86088e
3
+ size 362849
cross_topic_3/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3555e8540e8dccb8accf4210d2f1c8f0871faa0af22583bcaac95ab8023976ba
3
+ size 596280
cross_topic_3/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d531aaef51bbb2cea7fdcc108c001d419f1e53cfe0c0b43c16ac59ab99855538
3
+ size 440984
cross_topic_3/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dfa70c981e76fd37a9020053ac12a078f43badf70c0c393159e0bfcf5666d37
3
+ size 474100
cross_topic_4/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b78d0fc51aa800be1ebdc23b843236e40fdc4ea1e2631ae9e7d642a738bf16f
3
+ size 835343
cross_topic_4/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c02851e8657ae9b565a1b2ec05d0d5772295373271438935396fb2f131294071
3
+ size 239884
cross_topic_4/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d531aaef51bbb2cea7fdcc108c001d419f1e53cfe0c0b43c16ac59ab99855538
3
+ size 440984
cross_topic_5/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e70e8f3d67c0ba7c77211d26317910fecfebec8417d9c17231a8103fcc8ee909
3
+ size 913047
cross_topic_5/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c02851e8657ae9b565a1b2ec05d0d5772295373271438935396fb2f131294071
3
+ size 239884
cross_topic_5/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8015763174def2de8436af55639bd22fc2d161dbfe380487d04a7a6c86088e
3
+ size 362849
cross_topic_6/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9b4e12b379878cd4e85d44a9458bd4e392c76300dee5051dd089fa152fb5194
3
+ size 797368
cross_topic_6/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c02851e8657ae9b565a1b2ec05d0d5772295373271438935396fb2f131294071
3
+ size 239884
cross_topic_6/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dfa70c981e76fd37a9020053ac12a078f43badf70c0c393159e0bfcf5666d37
3
+ size 474100
cross_topic_7/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa9ad805fd4b16a7418c52675bde1e03d408be023f00d7039981cb687fcbb145
3
+ size 712440
cross_topic_7/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8015763174def2de8436af55639bd22fc2d161dbfe380487d04a7a6c86088e
3
+ size 362849
cross_topic_7/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d531aaef51bbb2cea7fdcc108c001d419f1e53cfe0c0b43c16ac59ab99855538
3
+ size 440984
cross_topic_8/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e70e8f3d67c0ba7c77211d26317910fecfebec8417d9c17231a8103fcc8ee909
3
+ size 913047
cross_topic_8/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8015763174def2de8436af55639bd22fc2d161dbfe380487d04a7a6c86088e
3
+ size 362849
cross_topic_8/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c02851e8657ae9b565a1b2ec05d0d5772295373271438935396fb2f131294071
3
+ size 239884
cross_topic_9/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdd4931260e2a4119e72f35779f7a41f374b751503b657564a51245a7a1c0560
3
+ size 678005
cross_topic_9/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8015763174def2de8436af55639bd22fc2d161dbfe380487d04a7a6c86088e
3
+ size 362849
cross_topic_9/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dfa70c981e76fd37a9020053ac12a078f43badf70c0c393159e0bfcf5666d37
3
+ size 474100
guardian_authorship.py DELETED
@@ -1,325 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """This is an authorship attribution dataset based on the work of Stamatatos 2013. """
16
-
17
-
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @article{article,
25
- author = {Stamatatos, Efstathios},
26
- year = {2013},
27
- month = {01},
28
- pages = {421-439},
29
- title = {On the robustness of authorship attribution based on character n-gram features},
30
- volume = {21},
31
- journal = {Journal of Law and Policy}
32
- }
33
-
34
- @inproceedings{stamatatos2017authorship,
35
- title={Authorship attribution using text distortion},
36
- author={Stamatatos, Efstathios},
37
- booktitle={Proc. of the 15th Conf. of the European Chapter of the Association for Computational Linguistics},
38
- volume={1}
39
- pages={1138--1149},
40
- year={2017}
41
- }
42
- """
43
-
44
- _DESCRIPTION = """\
45
- A dataset cross-topic authorship attribution. The dataset is provided by Stamatatos 2013.
46
- 1- The cross-topic scenarios are based on Table-4 in Stamatatos 2017 (Ex. cross_topic_1 => row 1:P S U&W ).
47
- 2- The cross-genre scenarios are based on Table-5 in the same paper. (Ex. cross_genre_1 => row 1:B P S&U&W).
48
-
49
- 3- The same-topic/genre scenario is created by grouping all the datasts as follows.
50
- For ex., to use same_topic and split the data 60-40 use:
51
- train_ds = load_dataset('guardian_authorship', name="cross_topic_<<#>>",
52
- split='train[:60%]+validation[:60%]+test[:60%]')
53
- tests_ds = load_dataset('guardian_authorship', name="cross_topic_<<#>>",
54
- split='train[-40%:]+validation[-40%:]+test[-40%:]')
55
-
56
- IMPORTANT: train+validation+test[:60%] will generate the wrong splits because the data is imbalanced
57
-
58
- * See https://huggingface.co/docs/datasets/splits.html for detailed/more examples
59
- """
60
-
61
- _URL = "https://www.dropbox.com/s/lc5mje0owl9shms/Guardian.zip?dl=1"
62
-
63
-
64
- # Using a specific configuration class is optional, you can also use the base class if you don't need
65
- # to add specific attributes.
66
- # here we give an example for three sub-set of the dataset with difference sizes.
67
- class GuardianAuthorshipConfig(datasets.BuilderConfig):
68
- """BuilderConfig for NewDataset"""
69
-
70
- def __init__(self, train_folder, valid_folder, test_folder, **kwargs):
71
- """
72
- Args:
73
- Train_folder: Topic/genre used for training
74
- valid_folder: ~ ~ for validation
75
- test_folder: ~ ~ for testing
76
-
77
- **kwargs: keyword arguments forwarded to super.
78
- """
79
- super(GuardianAuthorshipConfig, self).__init__(**kwargs)
80
- self.train_folder = train_folder
81
- self.valid_folder = valid_folder
82
- self.test_folder = test_folder
83
-
84
-
85
- class GuardianAuthorship(datasets.GeneratorBasedBuilder):
86
- """dataset for same- and cross-topic authorship attribution"""
87
-
88
- config_counter = 0
89
- BUILDER_CONFIG_CLASS = GuardianAuthorshipConfig
90
- BUILDER_CONFIGS = [
91
- # cross-topic
92
- GuardianAuthorshipConfig(
93
- name=f"cross_topic_{1}",
94
- version=datasets.Version(f"{1}.0.0", description=f"The Original DS with the cross-topic scenario no.{1}"),
95
- train_folder="Politics",
96
- valid_folder="Society",
97
- test_folder="UK,World",
98
- ),
99
- GuardianAuthorshipConfig(
100
- name=f"cross_topic_{2}",
101
- version=datasets.Version(f"{2}.0.0", description=f"The Original DS with the cross-topic scenario no.{2}"),
102
- train_folder="Politics",
103
- valid_folder="UK",
104
- test_folder="Society,World",
105
- ),
106
- GuardianAuthorshipConfig(
107
- name=f"cross_topic_{3}",
108
- version=datasets.Version(f"{3}.0.0", description=f"The Original DS with the cross-topic scenario no.{3}"),
109
- train_folder="Politics",
110
- valid_folder="World",
111
- test_folder="Society,UK",
112
- ),
113
- GuardianAuthorshipConfig(
114
- name=f"cross_topic_{4}",
115
- version=datasets.Version(f"{4}.0.0", description=f"The Original DS with the cross-topic scenario no.{4}"),
116
- train_folder="Society",
117
- valid_folder="Politics",
118
- test_folder="UK,World",
119
- ),
120
- GuardianAuthorshipConfig(
121
- name=f"cross_topic_{5}",
122
- version=datasets.Version(f"{5}.0.0", description=f"The Original DS with the cross-topic scenario no.{5}"),
123
- train_folder="Society",
124
- valid_folder="UK",
125
- test_folder="Politics,World",
126
- ),
127
- GuardianAuthorshipConfig(
128
- name=f"cross_topic_{6}",
129
- version=datasets.Version(f"{6}.0.0", description=f"The Original DS with the cross-topic scenario no.{6}"),
130
- train_folder="Society",
131
- valid_folder="World",
132
- test_folder="Politics,UK",
133
- ),
134
- GuardianAuthorshipConfig(
135
- name=f"cross_topic_{7}",
136
- version=datasets.Version(f"{7}.0.0", description=f"The Original DS with the cross-topic scenario no.{7}"),
137
- train_folder="UK",
138
- valid_folder="Politics",
139
- test_folder="Society,World",
140
- ),
141
- GuardianAuthorshipConfig(
142
- name=f"cross_topic_{8}",
143
- version=datasets.Version(f"{8}.0.0", description=f"The Original DS with the cross-topic scenario no.{8}"),
144
- train_folder="UK",
145
- valid_folder="Society",
146
- test_folder="Politics,World",
147
- ),
148
- GuardianAuthorshipConfig(
149
- name=f"cross_topic_{9}",
150
- version=datasets.Version(f"{9}.0.0", description=f"The Original DS with the cross-topic scenario no.{9}"),
151
- train_folder="UK",
152
- valid_folder="World",
153
- test_folder="Politics,Society",
154
- ),
155
- GuardianAuthorshipConfig(
156
- name=f"cross_topic_{10}",
157
- version=datasets.Version(
158
- f"{10}.0.0", description=f"The Original DS with the cross-topic scenario no.{10}"
159
- ),
160
- train_folder="World",
161
- valid_folder="Politics",
162
- test_folder="Society,UK",
163
- ),
164
- GuardianAuthorshipConfig(
165
- name=f"cross_topic_{11}",
166
- version=datasets.Version(
167
- f"{11}.0.0", description=f"The Original DS with the cross-topic scenario no.{11}"
168
- ),
169
- train_folder="World",
170
- valid_folder="Society",
171
- test_folder="Politics,UK",
172
- ),
173
- GuardianAuthorshipConfig(
174
- name=f"cross_topic_{12}",
175
- version=datasets.Version(
176
- f"{12}.0.0", description=f"The Original DS with the cross-topic scenario no.{12}"
177
- ),
178
- train_folder="World",
179
- valid_folder="UK",
180
- test_folder="Politics,Society",
181
- ),
182
- # # cross-genre
183
- GuardianAuthorshipConfig(
184
- name=f"cross_genre_{1}",
185
- version=datasets.Version(f"{1}.0.0", description=f"The Original DS with the cross-genre scenario no.{1}"),
186
- train_folder="Books",
187
- valid_folder="Politics",
188
- test_folder="Society,UK,World",
189
- ),
190
- GuardianAuthorshipConfig(
191
- name=f"cross_genre_{2}",
192
- version=datasets.Version(f"{2}.0.0", description=f"The Original DS with the cross-genre scenario no.{2}"),
193
- train_folder="Books",
194
- valid_folder="Society",
195
- test_folder="Politics,UK,World",
196
- ),
197
- GuardianAuthorshipConfig(
198
- name=f"cross_genre_{3}",
199
- version=datasets.Version(f"{3}.0.0", description=f"The Original DS with the cross-genre scenario no.{3}"),
200
- train_folder="Books",
201
- valid_folder="UK",
202
- test_folder="Politics,Society,World",
203
- ),
204
- GuardianAuthorshipConfig(
205
- name=f"cross_genre_{4}",
206
- version=datasets.Version(f"{4}.0.0", description=f"The Original DS with the cross-genre scenario no.{4}"),
207
- train_folder="Books",
208
- valid_folder="World",
209
- test_folder="Politics,Society,UK",
210
- ),
211
- ]
212
-
213
- def _info(self):
214
- # Specifies the datasets.DatasetInfo object
215
- return datasets.DatasetInfo(
216
- # This is the description that will appear on the datasets page.
217
- description=_DESCRIPTION,
218
- features=datasets.Features(
219
- {
220
- # These are the features of your dataset like images, labels ...
221
- # There are 13 authors in this dataset
222
- "author": datasets.features.ClassLabel(
223
- names=[
224
- "catherinebennett",
225
- "georgemonbiot",
226
- "hugoyoung",
227
- "jonathanfreedland",
228
- "martinkettle",
229
- "maryriddell",
230
- "nickcohen",
231
- "peterpreston",
232
- "pollytoynbee",
233
- "royhattersley",
234
- "simonhoggart",
235
- "willhutton",
236
- "zoewilliams",
237
- ]
238
- ),
239
- # There are book reviews, and articles on the following four topics
240
- "topic": datasets.features.ClassLabel(names=["Politics", "Society", "UK", "World", "Books"]),
241
- "article": datasets.Value("string"),
242
- }
243
- ),
244
- # If there's a common (input, target) tuple from the features,
245
- # specify them here. They'll be used if as_supervised=True in
246
- # builder.as_dataset.
247
- supervised_keys=[("article", "author")],
248
- # Homepage of the dataset for documentation
249
- homepage="http://www.icsd.aegean.gr/lecturers/stamatatos/papers/JLP2013.pdf",
250
- citation=_CITATION,
251
- )
252
-
253
- def _split_generators(self, dl_manager):
254
- """Returns SplitGenerators."""
255
- # dl_manager is a datasets.download.DownloadManager that can be used to
256
- # download and extract URLs
257
- dl_dir = dl_manager.download_and_extract(_URL)
258
-
259
- # This folder contains the orginal/2013 dataset
260
- data_dir = os.path.join(dl_dir, "Guardian", "Guardian_original")
261
-
262
- return [
263
- datasets.SplitGenerator(
264
- name=datasets.Split.TRAIN,
265
- # These kwargs will be passed to _generate_examples
266
- gen_kwargs={"data_dir": data_dir, "samples_folders": self.config.train_folder, "split": "train"},
267
- ),
268
- datasets.SplitGenerator(
269
- name=datasets.Split.TEST,
270
- # These kwargs will be passed to _generate_examples
271
- gen_kwargs={"data_dir": data_dir, "samples_folders": self.config.test_folder, "split": "test"},
272
- ),
273
- datasets.SplitGenerator(
274
- name=datasets.Split.VALIDATION,
275
- # These kwargs will be passed to _generate_examples
276
- gen_kwargs={"data_dir": data_dir, "samples_folders": self.config.valid_folder, "split": "valid"},
277
- ),
278
- ]
279
-
280
- def _generate_examples(self, data_dir, samples_folders, split):
281
- """Yields examples."""
282
- # Yields (key, example) tuples from the dataset
283
-
284
- # Training and validation are on 1 topic/genre, while testing is on multiple topics
285
- # We convert the sample folders into list (from string)
286
- if samples_folders.count(",") == 0:
287
- samples_folders = [samples_folders]
288
- else:
289
- samples_folders = samples_folders.split(",")
290
-
291
- # the dataset is structured as:
292
- # |-Topic1
293
- # |---author 1
294
- # |------- article-1
295
- # |------- article-2
296
- # |---author 2
297
- # |------- article-1
298
- # |------- article-2
299
- # |-Topic2
300
- # ...
301
-
302
- for topic in samples_folders:
303
- full_path = os.path.join(data_dir, topic)
304
-
305
- for author in os.listdir(full_path):
306
-
307
- list_articles = os.listdir(os.path.join(full_path, author))
308
- if len(list_articles) == 0:
309
- # Some authors have no articles on certain topics
310
- continue
311
-
312
- for id_, article in enumerate(list_articles):
313
- path_2_author = os.path.join(full_path, author)
314
- path_2_article = os.path.join(path_2_author, article)
315
-
316
- with open(path_2_article, "r", encoding="utf8", errors="ignore") as f:
317
- art = f.readlines()
318
-
319
- # The whole article is stored as one line. We access the 1st element of the list
320
- # to store it as string, not as a list
321
- yield f"{topic}_{author}_{id_}", {
322
- "article": art[0],
323
- "author": author,
324
- "topic": topic,
325
- }