Joelito commited on
Commit
6f3c226
·
1 Parent(s): c1e32d7

Create MultiLegalPile_Chunks_500.py

Browse files
Files changed (1) hide show
  1. MultiLegalPile_Chunks_500.py +121 -0
MultiLegalPile_Chunks_500.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MultiLegalPile Chunks 500"""
2
+
3
+ import json
4
+
5
+ import datasets
6
+ from huggingface_hub.file_download import hf_hub_url
7
+
8
+ try:
9
+ import lzma as xz
10
+ except ImportError:
11
+ import pylzma as xz
12
+
13
+ datasets.logging.set_verbosity_info()
14
+ logger = datasets.logging.get_logger(__name__)
15
+
16
+ _CITATION = """
17
+ """
18
+
19
+ _DESCRIPTION = """
20
+ A chunked version of the MultiLegalPile dataset.
21
+ """
22
+
23
+ _URL = "https://huggingface.co/datasets/joelito/MultiLegalPile_Chunks_500"
24
+
25
+ _LANGUAGES = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr",
26
+ "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]
27
+
28
+ _TYPES = ["caselaw", "contracts", "legislation", "other"]
29
+
30
+ _JURISDICTONS = ["Austria", "Belgium", "Bulgaria", "Croatia", "Czechia", "Denmark", "Estonia", "Finland",
31
+ "France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg",
32
+ "Malta", "Netherlands", "Poland", "Portugal", "Romania", "Slovakia", "Slovenia", "Spain", "Sweden",
33
+ "EU", "Switzerland", "UK", "US", "Canada", "N/A"]
34
+
35
+ # IMPORTANT: Increase this once larger datasets are available (en_caselaw has 4 at the moment)
36
+ _HIGHEST_NUMBER_OF_SHARDS = 4
37
+
38
+
39
+ class MultiLegalPileChunks500Config(datasets.BuilderConfig):
40
+ """BuilderConfig for MultiLegalPileChunks500."""
41
+
42
+ def __init__(self, name: str, **kwargs):
43
+ """BuilderConfig for MultiLegalPileChunks500.
44
+ Args:
45
+ name: combination of language and type with _
46
+ language: One of bg,cs,da,de,el,en,es,et,fi,fr,ga,hr,hu,it,lt,lv,mt,nl,pl,pt,ro,sk,sl,sv or all
47
+ type: One of caselaw,contracts,legislation,other or all
48
+ **kwargs: keyword arguments forwarded to super.
49
+ """
50
+ super(MultiLegalPileChunks500Config, self).__init__(**kwargs)
51
+ self.name = name
52
+ self.language = name.split("_")[0]
53
+ self.type = name.split("_")[1]
54
+
55
+
56
+ class MultiLegalPileChunks500(datasets.GeneratorBasedBuilder):
57
+ """
58
+ MultiLegalPileChunks500:
59
+ A filtered dataset of multilingual legal data in the EU languages
60
+ """
61
+ BUILDER_CONFIG_CLASS = MultiLegalPileChunks500Config
62
+
63
+ BUILDER_CONFIGS = [MultiLegalPileChunks500Config(f"{language}_{type}")
64
+ for type in _TYPES + ["all"]
65
+ for language in _LANGUAGES + ["all"]]
66
+
67
+ def _info(self):
68
+ return datasets.DatasetInfo(
69
+ description=_DESCRIPTION,
70
+ features=datasets.Features(
71
+ {
72
+ "language": datasets.Value("string"), # one of _LANGUAGES
73
+ "type": datasets.Value("string"), # one of _TYPES
74
+ "jurisdiction": datasets.Value("string"), # one of _JURISDICTONS
75
+ "text": datasets.Value("string"),
76
+ }
77
+ ),
78
+ supervised_keys=None,
79
+ homepage=_URL,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ def download_url(file_name):
85
+ url = hf_hub_url(repo_id="joelito/MultiLegalPile_Chunks_500",
86
+ filename=f"data/{file_name}.jsonl.xz", repo_type="dataset")
87
+ return dl_manager.download(url)
88
+
89
+ languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
90
+ types = _TYPES if self.config.type == "all" else [self.config.type]
91
+
92
+ split_generators = []
93
+ for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION]:
94
+ filepaths = []
95
+ for language in languages:
96
+ for type in types:
97
+ for shard in range(_HIGHEST_NUMBER_OF_SHARDS):
98
+ try:
99
+ filepaths.append(download_url(f"{language}_{type}_{split}_{shard}"))
100
+ except:
101
+ break # we found the last shard
102
+ split_generators.append(
103
+ datasets.SplitGenerator(name=split, gen_kwargs={"filepaths": filepaths})
104
+ )
105
+ return split_generators
106
+
107
+ def _generate_examples(self, filepaths):
108
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
109
+ id_ = 0
110
+ for filepath in filepaths:
111
+ logger.info("Generating examples from = %s", filepath)
112
+ try:
113
+ with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
114
+ for line in f:
115
+ if line:
116
+ example = json.loads(line)
117
+ if example is not None and isinstance(example, dict):
118
+ yield id_, example
119
+ id_ += 1
120
+ except Exception:
121
+ logger.exception("Error while processing file %s", filepath)