Datasets:
ArXiv:
from collections import defaultdict | |
from collections.abc import Iterable | |
import copy | |
from io import TextIOBase | |
from functools import lru_cache | |
import json | |
from typing import Any, Optional | |
import click | |
import datetime | |
import ftfy # type: ignore | |
import requests | |
from tqdm import tqdm | |
import trafilatura # type: ignore | |
from humanfriendly import format_number | |
# Download parameters | |
DOWNLOAD_TIMEOUT = 10 # Timeout in seconds of each HTTP request | |
DOWNLOAD_BUFFER_SIZE = 65_536 # Size of each HTTP request chunk in bytes | |
CODE_MAX_NUM_CHARS = 1_000_000 # Downloaded source code character threshold (applied after decoding to text) | |
# Extraction parameters | |
INCLUDE_COMMENTS = False # Whether to include WP user comments in extracted text | |
INCLUDE_TABLES = True # Whether to include markdown for HTML tables in extracted text | |
INCLUDE_LINKS = True # Whether to include markdown for HTML links in extracted text | |
TEXT_MIN_NUM_WORDS = 100 # Extracted text word (whitespace-delimited token) threshold; None to disable | |
TEXT_MIN_NUM_CHARS = None # Extracted text character threshold; None to disable | |
class DownloadTooLarge(Exception): | |
pass | |
def download_url(url: str) -> tuple[Optional[str], dict[str, Any]]: | |
""" | |
Fetch content at URL, returning downloaded source code and metadata. | |
Metadata is represented as a dictionary containing Citation keys and values. | |
""" | |
source_code: Optional[str] = None | |
content_type: Optional[str] = None | |
num_chars: Optional[int] = None | |
error: Optional[str] = None | |
download_date = datetime.datetime.now().astimezone().isoformat() | |
try: | |
with requests.get(url, stream=True, timeout=DOWNLOAD_TIMEOUT) as response: | |
content_type = response.headers.get('Content-Type') | |
try: | |
content = '' | |
for chunk in response.iter_content(chunk_size=DOWNLOAD_BUFFER_SIZE, decode_unicode=True): | |
content += chunk | |
if len(content) > CODE_MAX_NUM_CHARS: | |
raise DownloadTooLarge() | |
source_code = content | |
num_chars = len(content) | |
except DownloadTooLarge: | |
source_code = None | |
error = 'Download is too large' | |
except Exception as ex: | |
source_code = None | |
error = f'{type(ex).__name__}: {ex}' | |
if not source_code: | |
source_code = None | |
error = 'Download is empty' | |
metadata = dict( | |
source_code_content_type=content_type, | |
source_code_num_chars=num_chars, | |
source_download_date=download_date, | |
source_download_error=error, | |
) | |
return (source_code, metadata) | |
def count_words(text: str) -> int: | |
return len(text.split()) | |
def has_sufficient_word_count(source_text: str) -> bool: | |
return not TEXT_MIN_NUM_WORDS or count_words(source_text) > TEXT_MIN_NUM_WORDS | |
def has_sufficient_char_count(source_text: str) -> bool: | |
return not TEXT_MIN_NUM_CHARS or len(source_text) > TEXT_MIN_NUM_CHARS | |
def extract_source_text(source_code: str) -> dict[str, Any]: | |
""" | |
Given a source's HTML, extract and return textual content and error information | |
as a dictionary containing citation keys and values. | |
""" | |
text: Optional[str] = None | |
error: Optional[str] = None | |
try: | |
text = trafilatura.extract( | |
source_code, | |
include_comments=INCLUDE_COMMENTS, | |
include_tables=INCLUDE_TABLES, | |
include_links=INCLUDE_LINKS, | |
) | |
if text is not None: | |
text = ftfy.fix_text(text) | |
except Exception as ex: | |
error = f'{type(ex).__name__}: {ex}' | |
text = None | |
else: | |
if not text: | |
error = 'Text is empty' | |
text = None | |
elif not has_sufficient_word_count(text): | |
error = f'Text is too short ({format_number(count_words(text))} words)' | |
text = None | |
elif not has_sufficient_char_count(text): | |
error = f'Text is too short ({format_number(len(text))} characters)' | |
text = None | |
return dict( | |
source_text=text, | |
source_extract_error=error, | |
) | |
def scrape_source_fields(url: str) -> dict[str, Any]: | |
""" | |
Download source code at URL and extract text from the downloaded source code, | |
returning a dictionary of fields to add to the corresponding citation. | |
This abstraction is a little awkward, but it facilitates caching. | |
""" | |
download_fields: dict[str, Any] = {} | |
extraction_fields: dict[str, Any] = {} | |
if url: | |
(source_code, download_fields) = download_url(url) | |
if source_code: | |
extraction_fields = extract_source_text(source_code) | |
return download_fields | extraction_fields | |
def scrape_source(citation: dict[str, Any]) -> dict[str, Any]: | |
""" | |
Scrape sources for citation, storing scraped content in citation *in-place.* | |
""" | |
if citation['url']: | |
citation['dehydrated_citation'] = copy.deepcopy(citation) | |
citation.update(scrape_source_fields(citation['url'])) | |
return citation | |
def update_citations_in_excerpts_with_citations( | |
excerpts_with_citations: list[dict[str, Any]], | |
new_citations: Iterable[dict[str, Any]], | |
) -> list[dict[str, Any]]: | |
""" | |
Replace citations in `excerpts_with_citations` with the corresponding citations from `new_citations`, | |
EXCEPT for the `char_index` field (leaving it as-is). | |
Citations are aligned/matched by their content field. | |
If a citation content string appears multiple times in `new_citations`, matching citations in | |
`excerpts_with_citations` will be replaced with the final match in `new_citations`. | |
`excerpts_with_citations` is modified *in-place,* and the updated list is returned. | |
""" | |
ewc_citation_indices_by_key: dict[str, set[tuple[int, int]]] = defaultdict(set) | |
for (i, ewc) in enumerate(excerpts_with_citations): | |
for (j, citation) in enumerate(ewc['citations']): | |
key = citation['content'] | |
ewc_citation_indices_by_key[key].add((i, j)) | |
for new_citation in new_citations: | |
key = new_citation['content'] | |
for (i, j) in ewc_citation_indices_by_key[key]: | |
excerpts_with_citations[i]['citations'][j] |= { | |
k: v | |
for (k, v) in new_citation.items() | |
if k != 'char_index' | |
} | |
return excerpts_with_citations # was modified in-place | |
def scrape_article_sources(article: dict[str, Any]) -> dict[str, Any]: | |
""" | |
Scrape sources for all web citations in article, storing scraped content in article citation | |
objects *in-place.* Return modified article for convenience. | |
""" | |
all_citations = [] | |
for element in article['elements']: | |
if element['type'] == 'paragraph': | |
for sentence in element['sentences']: | |
for citation in sentence['citations']: | |
scrape_source(citation) # modifies in-place | |
all_citations.append(citation) | |
elif element['type'] == 'heading': | |
for citation in element['citations']: | |
scrape_source(citation) # modifies in-place | |
all_citations.append(citation) | |
# modifies in-place | |
update_citations_in_excerpts_with_citations(article['excerpts_with_citations'], all_citations) | |
return article | |
def main(input_file: TextIOBase, output_file: TextIOBase, stream: bool) -> None: | |
""" | |
Scrape sources for all MegaWika 2 articles in JSON-lines (one JSON-encoded article per line) file INPUT_FILE, | |
updating each web citation with source-scraping content and metadata and writing the articles with source content | |
to JSON-lines file OUTPUT_FILE. | |
Each web citation will be updated with `source_text` *and* source-scraping metadata fields; the original | |
(dehydrated, metadata-only) web citation will be stored under the new citation field `dehydrated_citation`. | |
""" | |
articles: Iterable[dict[str, Any]] = (json.loads(line) for line in input_file) | |
if not stream: | |
click.echo('Reading all articles into memory') | |
articles = list(articles) | |
for article in tqdm(articles, desc='Rehydrating', unit='article'): | |
scrape_article_sources(article) | |
output_file.write(json.dumps(article) + '\n') | |
click.echo('Done') | |
if __name__ == '__main__': | |
main() | |