File size: 5,842 Bytes
04a41df 5f92742 2d5f16d 04a41df 2d5f16d 04a41df c3bcc10 806a8ab 04a41df 10630a1 04a41df 185f86f 04a41df 2d5f16d 04a41df 2d5f16d c3bcc10 04a41df c3bcc10 2d5f16d 04a41df 2d5f16d c3bcc10 04a41df 2cafdd0 04a41df 2cafdd0 2d5f16d 04a41df 2cafdd0 04a41df 2d5f16d 04a41df 2d5f16d 04a41df f0bc1ac 2934055 2cafdd0 2d5f16d c3bcc10 2d5f16d c3bcc10 10630a1 2cafdd0 04a41df 2cafdd0 2d5f16d 04a41df c3bcc10 04a41df 2cafdd0 04a41df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
# coding=utf-8
"""COSUJU: The Court Summaries and Judgements Dataset."""
from __future__ import absolute_import, division, print_function
import json
import os
import re
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {CoSuJu 500+ Court Judegements and Summaries for Machine Text Summarization},
authors = {Busani Ndlovu, Luke Jordan},
year = {2021}
}
"""
# TODO: Complete description
_DESCRIPTION = """\
Court Summaries and Judgements (CoSuJu) Dataset
"""
# Redundant but may useful in future.
_URL = "https://frtnx.github.io/cosuju-extractor/dataset/"
_URLS = {
# 'train': _URL + 'train-v1.1.json',
'train': _URL + 'train-v1.0.json'
}
class CosujuConfig(datasets.BuilderConfig):
"""BuilderConfig for COSUJU."""
def __init__(self, **kwargs):
"""BuilderConfig for COSUJU.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CosujuConfig, self).__init__(**kwargs)
class Cosuju(datasets.GeneratorBasedBuilder):
"""COSUJU: The Court Summaries and Judgements Dataset. Version 1.1.0"""
VERSION = datasets.Version("1.1.0")
# Allows configuration to be chosen at run time
# data = datasets.load_dataset('my_dataset', 'include_no_summary')
# data = datasets.load_dataset('my_dataset', 'exclude_no_summary')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="include_no_summary", version=VERSION, description="Includes rows with no summary documents"),
datasets.BuilderConfig(name='exclude_no_summary', version=VERSION, description="Excludes rows with no summary documents")
]
DEFAULT_CONFIG_NAME = 'exclude_no_summary'
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
'id': datasets.Value('string'),
'title': datasets.Value('string'),
'url': datasets.Value('string'),
'year': datasets.Value('string'),
'update_date': datasets.Value('string'),
'summary_document': {
'filename': datasets.Value('string'),
'file_url': datasets.Value('string'),
'file_content': datasets.Value('string')
},
'judgement_document': {
'filename': datasets.Value('string'),
'file_url': datasets.Value('string'),
'file_content': datasets.Value('string')
},
'judgement_paragraphs': [datasets.Value('string')],
'max_length_judgement_paras': datasets.Value('int64')
}
),
supervised_keys=None,
homepage='https://huggingface.co/datasets/FRTNX/cosuju',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']})
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info('generating examples from = %s', filepath)
with open(filepath, encoding="utf-8") as f:
cosuju = json.load(f)
for row in cosuju['data']:
# exclude cases with no summaries when configuration demands it
if self.config.name == 'exclude_no_summary':
if not row['summary_document']:
continue
# edge case sanitization
if type(row['summary_document']) == dict and \
row['summary_document']['file_content'] == '':
continue
if 'judgement_document' not in list(row):
continue
if not row['judgement_document']:
continue
judgement_text = row['judgement_document']['file_content']
if not judgement_text:
continue
judgement_paragraphs = re.split('\\n\\n\[\d+\]', judgement_text)
clean_paragraphs = [paragraph.replace('\n', ' ').strip() for paragraph in judgement_paragraphs]
squeaky_clean_paragraphs = [re.sub(' +', ' ', paragraph) for paragraph in clean_paragraphs]
ultimate_paragraphs = [paragraph.replace('\x0c2', '').replace(' ', ' ') for paragraph in squeaky_clean_paragraphs]
row['judgement_paragraphs'] = ultimate_paragraphs
row['max_length_judgement_paras'] = max([len(paragraph.split(' ')) for paragraph in ultimate_paragraphs])
id_ = row['id']
result = {
'id': row['id'],
'title': row['title'],
'url': row['url'],
'year': row['year'],
'update_date': row['update_date'],
'judgement_paragraphs': row['judgement_paragraphs'],
'max_length_judgement_paras': row['max_length_judgement_paras']
}
# This is to keep things feature compliant
for prop in ['summary_document', 'judgement_document']:
if row[prop]:
result[prop] = row[prop]
else:
result[prop] = { 'filename': '', 'file_url': '', 'file_content': '' }
yield id_, result
|