File size: 2,237 Bytes
ff6f055
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d78ea0a
 
 
 
 
 
 
ff6f055
d78ea0a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
---
dataset_info:
  features:
  - name: id
    dtype: int64
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: content
    dtype: string
  splits:
  - name: train
    num_bytes: 127910
    num_examples: 100
  download_size: 67600
  dataset_size: 127910
configs:
- config_name: default
  data_files:
  - split: train
    path: data/train-*
license: apache-2.0
language:
- es
tags:
- legal
size_categories:
- n<1K
---


```python
# -*- coding: utf-8 -*-
"""
Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/1iAhLoc8FxHXijhyljdKhrIJbn342bhPD
"""

# Commented out IPython magic to ensure Python compatibility.
# %pip install --upgrade langchain datasets

import requests
from bs4 import BeautifulSoup

CONFIG = {
    'title': 'Constitución Española',
    'url': "https://www.boe.es/diario_boe/xml.php?id=BOE-A-1978-31229",
    'chunk_size': 1300,
    'chunk_overlap': 150,
}

"""# Downloading BOE document"""

response = requests.get(CONFIG['url'])
response.raise_for_status()
soup = BeautifulSoup(response.text, "lxml")

filename = "constitucion.txt"
with open(filename, 'w') as fn:
    text = soup.select_one("documento > texto").get_text()
    fn.write(text)

"""# Splitting by chunks the document"""

from langchain_community.document_loaders import TextLoader

loader = TextLoader(filename)
document = loader.load()

from langchain.text_splitter import RecursiveCharacterTextSplitter

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=CONFIG["chunk_size"],
    chunk_overlap=CONFIG["chunk_overlap"],
)

docs_chunks = text_splitter.split_documents(document)

print(len(docs_chunks))

docs_chunks

"""# Loading chunks in a dataset"""

from datasets import Dataset

data_dict = {
    'id': [],
    'url': [],
    'title': [],
    'content': []
}

for idx, chunk in enumerate(docs_chunks):
  data_dict['id'].append(idx)
  data_dict['url'].append(CONFIG['url'])
  data_dict['title'].append(CONFIG['title'])
  data_dict['content'].append(chunk.page_content)

dataset = Dataset.from_dict(data_dict)

"""# Loading to HuggingFace"""

# !huggingface-cli login

dataset.push_to_hub("dariolopez/justicio-BOE-A-1978-31229-constitucion-100-chunks")
```