|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""MEDLINE/PubMed data - Modified for full abstract text extraction.""" |
|
|
|
|
|
import copy |
|
import gzip |
|
import xml.etree.ElementTree as ET |
|
|
|
import datasets |
|
import random |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
Courtesy of the U.S. National Library of Medicine. |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
NLM produces a baseline set of MEDLINE/PubMed citation records in XML format for download on an annual basis. |
|
The annual baseline is released in December of each year. Each day, NLM produces update files that include |
|
new, revised and deleted citations. See our documentation page for more information. |
|
This version is modified to extract the full text from structured abstracts. |
|
""" |
|
|
|
_HOMEPAGE = "https://www.nlm.nih.gov/databases/download/pubmed_medline.html" |
|
|
|
_LICENSE = "" |
|
|
|
|
|
total_files = 1274 |
|
num_bins = 50 |
|
total_urls = 20 |
|
|
|
|
|
bin_size = total_files // num_bins |
|
|
|
|
|
selected_indices = [] |
|
for b in range(num_bins): |
|
start = b * bin_size + 1 |
|
end = min((b + 1) * bin_size + 1, total_files + 1) |
|
if start < end: |
|
selected_indices.append(random.randint(start, end - 1)) |
|
|
|
|
|
selected_indices = sorted(random.sample(selected_indices, total_urls)) |
|
|
|
|
|
_URLs = [f"https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed25n{i:04d}.xml.gz" for i in range(1200, 1274)] |
|
|
|
|
|
|
|
def deepupdate(target, src): |
|
"""Deep update target dict with src |
|
For each k,v in src: if k doesn't exist in target, it is deep copied from |
|
src to target. Otherwise, if v is a list, target[k] is extended with |
|
src[k]. If v is a set, target[k] is updated with v, If v is a dict, |
|
recursively deep-update it. |
|
""" |
|
for k, v in src.items(): |
|
|
|
if k in target: |
|
target_type = type(target[k]) |
|
v_type = type(v) |
|
|
|
|
|
if target_type == int and v_type == str: |
|
try: |
|
v_int = int(v) |
|
v = v_int |
|
v_type = int |
|
except (ValueError, TypeError): |
|
logger.warning(f"Field '{k}': Could not convert string '{v}' to expected type {target_type}. Skipping update.") |
|
continue |
|
|
|
|
|
if target_type != v_type and not (isinstance(target[k], list) and isinstance(v, list)) \ |
|
and not (isinstance(target[k], dict) and isinstance(v, dict)) \ |
|
and not (isinstance(target[k], set) and isinstance(v, set)): |
|
|
|
if isinstance(target[k], list) and not isinstance(v, list): |
|
logger.warning(f"Field '{k}': Trying to update a list with a non-list ({v_type}). Wrapping source value in a list.") |
|
v = [v] |
|
|
|
elif isinstance(target[k], dict) and not isinstance(v, dict): |
|
logger.warning(f"Field '{k}': Trying to update a dict with a non-dict ({v_type}). Skipping update.") |
|
continue |
|
|
|
else: |
|
logger.warning(f"Field '{k}': Type mismatch. Target is {target_type}, Source is {v_type}. Skipping update.") |
|
continue |
|
|
|
|
|
|
|
if isinstance(v, list): |
|
if k not in target: |
|
target[k] = copy.deepcopy(v) |
|
elif isinstance(target[k], list): |
|
target[k].extend(v) |
|
|
|
|
|
else: |
|
logger.warning(f"Field '{k}': Trying to extend a non-list ({type(target[k])}) with a list. Replacing value.") |
|
target[k] = copy.deepcopy(v) |
|
|
|
elif isinstance(v, dict): |
|
if k not in target: |
|
target[k] = copy.deepcopy(v) |
|
elif isinstance(target[k], dict): |
|
deepupdate(target[k], v) |
|
else: |
|
logger.warning(f"Field '{k}': Trying to update a non-dict ({type(target[k])}) with a dict. Replacing value.") |
|
target[k] = copy.deepcopy(v) |
|
|
|
elif isinstance(v, set): |
|
if k not in target: |
|
target[k] = v.copy() |
|
elif isinstance(target[k], set): |
|
target[k].update(v.copy()) |
|
else: |
|
logger.warning(f"Field '{k}': Trying to update a non-set ({type(target[k])}) with a set. Replacing value.") |
|
target[k] = v.copy() |
|
|
|
|
|
else: |
|
|
|
if k in target and isinstance(target[k], (list, tuple, dict, set)): |
|
logger.warning(f"Field '{k}': Trying to overwrite a structured type ({type(target[k])}) with a primitive ({type(v)}). Skipping update.") |
|
continue |
|
target[k] = copy.copy(v) |
|
|
|
|
|
def default_date(): |
|
|
|
|
|
return {"Year": 0, "Month": 0, "Day": 0} |
|
|
|
|
|
def default_inline_article(): |
|
|
|
return { |
|
|
|
"Abstract": {"AbstractText": ""}, |
|
"ArticleTitle": "", |
|
|
|
"AuthorList": {"Author": []}, |
|
"Language": "", |
|
"GrantList": { |
|
"Grant": [], |
|
}, |
|
"PublicationTypeList": {"PublicationType": []}, |
|
} |
|
|
|
|
|
def default_article(): |
|
|
|
return { |
|
"MedlineCitation": { |
|
"PMID": 0, |
|
"DateCompleted": default_date(), |
|
"NumberOfReferences": 0, |
|
"DateRevised": default_date(), |
|
"Article": default_inline_article(), |
|
"MedlineJournalInfo": {"Country": ""}, |
|
"ChemicalList": {"Chemical": []}, |
|
"CitationSubset": "", |
|
"MeshHeadingList": {"MeshHeading": []}, |
|
}, |
|
"PubmedData": { |
|
"ArticleIdList": [], |
|
"PublicationStatus": "", |
|
"History": {"PubMedPubDate": []}, |
|
"ReferenceList": [], |
|
}, |
|
} |
|
|
|
|
|
class Pubmed(datasets.GeneratorBasedBuilder): |
|
"""Pubmed citations records - Modified for full abstract text""" |
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="2025", description="Subset of 2025 annual record with full abstract parsing", version=datasets.Version("6.0.0")), |
|
] |
|
|
|
|
|
SIMPLE_KEYS = set() |
|
LIST_KEYS = set() |
|
IGNORE_KEYS = set() |
|
|
|
def fill_keys_from_features(self, features): |
|
"""Recursively populates SIMPLE_KEYS and LIST_KEYS based on the dataset features.""" |
|
if isinstance(features, dict): |
|
for key, value in features.items(): |
|
if isinstance(value, datasets.Sequence): |
|
self.LIST_KEYS.add(key) |
|
|
|
self.fill_keys_from_features(value.feature) |
|
elif isinstance(value, datasets.Value): |
|
|
|
self.SIMPLE_KEYS.add(key) |
|
elif isinstance(value, dict): |
|
self.SIMPLE_KEYS.add(key) |
|
self.fill_keys_from_features(value) |
|
|
|
elif isinstance(features, datasets.Sequence): |
|
self.fill_keys_from_features(features.feature) |
|
|
|
def get_full_abstract_text(self, abstract_element): |
|
""" |
|
Extracts and concatenates text from all AbstractText elements |
|
within the given Abstract element. Handles structured abstracts and inline markup. |
|
Uses ElementTree's itertext for robustness. |
|
""" |
|
if abstract_element is None: |
|
return "" |
|
|
|
|
|
|
|
|
|
abstract_text_elements = abstract_element.findall('./AbstractText') |
|
|
|
full_text_parts = [] |
|
if not abstract_text_elements: |
|
|
|
|
|
abstract_text_elements = abstract_element.findall('.//AbstractText') |
|
if not abstract_text_elements: |
|
|
|
all_text = [text.strip() for text in abstract_element.itertext() if text and text.strip()] |
|
if all_text: |
|
logger.debug(f"Found abstract text directly within <Abstract> or nested tags (no <AbstractText>): {' '.join(all_text)}") |
|
return "\n".join(all_text) |
|
else: |
|
return "" |
|
|
|
|
|
for text_element in abstract_text_elements: |
|
|
|
label = text_element.get('Label') |
|
nlm_category = text_element.get('NlmCategory') |
|
|
|
|
|
|
|
|
|
|
|
text_content = " ".join(text.strip() for text in text_element.itertext() if text and text.strip()) |
|
|
|
|
|
current_part = "" |
|
if label: |
|
current_part += f"{label}: " |
|
|
|
|
|
|
|
current_part += text_content |
|
|
|
if current_part: |
|
full_text_parts.append(current_part) |
|
|
|
|
|
return "\n".join(full_text_parts) |
|
|
|
|
|
def xml_to_dictionnary(self, parentElement): |
|
""" |
|
Recursively converts an XML element and its children into a dictionary, |
|
guided by SIMPLE_KEYS and LIST_KEYS derived from the dataset schema. |
|
Includes specific handling for Abstract and ArticleTitle. |
|
""" |
|
data = {} |
|
|
|
|
|
|
|
if parentElement.tag == "ArticleTitle": |
|
|
|
|
|
inner_xml_parts = [ET.tostring(e, encoding='unicode', method='xml') for e in parentElement] |
|
full_content = (parentElement.text or "").strip() + "".join(inner_xml_parts) + (parentElement.tail or "").strip() |
|
|
|
full_content = ' '.join(full_content.split()) |
|
|
|
return {parentElement.tag: full_content.strip()} |
|
|
|
|
|
children = list(parentElement) |
|
for child in children: |
|
key = child.tag |
|
value = None |
|
|
|
|
|
if key == "Abstract": |
|
full_abstract = self.get_full_abstract_text(child) |
|
|
|
value = {"AbstractText": full_abstract} |
|
|
|
elif key == "AbstractText": |
|
|
|
continue |
|
|
|
else: |
|
|
|
|
|
child_text = child.text if child.text is not None else "" |
|
|
|
if len(child) == 0: |
|
value = child_text.strip() |
|
else: |
|
recursive_result = self.xml_to_dictionnary(child) |
|
|
|
if isinstance(recursive_result, dict) and key in recursive_result: |
|
value = recursive_result[key] |
|
else: |
|
|
|
logger.warning(f"Unexpected recursive result for <{key}>: {recursive_result}. Using direct text if available.") |
|
value = child_text.strip() |
|
|
|
|
|
if value is None or value == "": |
|
|
|
|
|
continue |
|
|
|
if key in data: |
|
if isinstance(data[key], list): |
|
data[key].append(value) |
|
else: |
|
data[key] = [data[key], value] |
|
elif key in self.LIST_KEYS: |
|
|
|
if isinstance(value, list): |
|
data[key] = value |
|
else: |
|
data[key] = [value] |
|
elif key in self.SIMPLE_KEYS: |
|
data[key] = value |
|
elif key in self.IGNORE_KEYS: |
|
continue |
|
else: |
|
|
|
|
|
|
|
if key not in ["PublicationStatus", "CitationSubset"]: |
|
logger.info(f"Ignoring unexpected key '{key}' found under <{parentElement.tag}>. Content: {value}. Add to Features or IGNORE_KEYS if needed.") |
|
self.IGNORE_KEYS.add(key) |
|
else: |
|
if key in self.LIST_KEYS: |
|
data[key] = [value] |
|
else: |
|
data[key] = value |
|
|
|
|
|
|
|
if parentElement.tag == "MeshHeading": |
|
if "QualifierName" not in data: data["QualifierName"] = "" |
|
elif parentElement.tag == "Author": |
|
if "LastName" not in data: data["LastName"] = "" |
|
if "ForeName" not in data: data["ForeName"] = "" |
|
if "Initials" not in data: data["Initials"] = "" |
|
if "CollectiveName" not in data: data["CollectiveName"] = "" |
|
elif parentElement.tag == "JournalIssue": |
|
if "Volume" not in data: data["Volume"] = "" |
|
if "Issue" not in data: data["Issue"] = "" |
|
|
|
elif parentElement.tag == "Grant": |
|
if "GrantID" not in data: data["GrantID"] = "" |
|
|
|
|
|
|
|
|
|
|
|
if data or parentElement.text: |
|
|
|
if not data and parentElement.text and parentElement.text.strip(): |
|
return {parentElement.tag: parentElement.text.strip()} |
|
else: |
|
|
|
if parentElement.attrib: |
|
|
|
attr_dict = {f"@{k}": v for k, v in parentElement.attrib.items() if k not in data} |
|
data.update(attr_dict) |
|
return {parentElement.tag: data} |
|
else: |
|
|
|
return {parentElement.tag: {}} |
|
|
|
|
|
def _info(self): |
|
"""Defines the dataset schema.""" |
|
|
|
Date = { |
|
"Year": datasets.Value("int32"), |
|
"Month": datasets.Value("int32"), |
|
"Day": datasets.Value("int32"), |
|
} |
|
|
|
MeshHeading = {"DescriptorName": datasets.Value("string"), "QualifierName": datasets.Value("string")} |
|
|
|
MedlineJournalInfo = { |
|
"Country": datasets.Value("string"), |
|
|
|
|
|
|
|
|
|
} |
|
Chemical = { |
|
"RegistryNumber": datasets.Value("string"), |
|
"NameOfSubstance": datasets.Value("string"), |
|
} |
|
|
|
Author = { |
|
|
|
"LastName": datasets.Value("string"), |
|
"ForeName": datasets.Value("string"), |
|
"Initials": datasets.Value("string"), |
|
"CollectiveName": datasets.Value("string"), |
|
|
|
} |
|
Grant = { |
|
|
|
"GrantID": datasets.Value("string"), |
|
"Agency": datasets.Value("string"), |
|
"Country": datasets.Value("string"), |
|
} |
|
|
|
|
|
Article = { |
|
|
|
"Abstract": {"AbstractText": datasets.Value("string")}, |
|
"ArticleTitle": datasets.Value("string"), |
|
|
|
"AuthorList": {"Author": datasets.Sequence(Author)}, |
|
"Language": datasets.Value("string"), |
|
"GrantList": { |
|
|
|
"Grant": datasets.Sequence(Grant), |
|
}, |
|
"PublicationTypeList": {"PublicationType": datasets.Sequence(datasets.Value("string"))}, |
|
} |
|
|
|
|
|
features = datasets.Features( |
|
{ |
|
"MedlineCitation": { |
|
"PMID": datasets.Value("int32"), |
|
"DateCompleted": Date, |
|
"NumberOfReferences": datasets.Value("int32"), |
|
"DateRevised": Date, |
|
"Article": Article, |
|
"MedlineJournalInfo": MedlineJournalInfo, |
|
"ChemicalList": {"Chemical": datasets.Sequence(Chemical)}, |
|
|
|
"CitationSubset": datasets.Value("string"), |
|
"MeshHeadingList": { |
|
"MeshHeading": datasets.Sequence(MeshHeading), |
|
}, |
|
|
|
}, |
|
"PubmedData": { |
|
|
|
"ArticleIdList": datasets.Sequence({ |
|
"ArticleId": datasets.Sequence(datasets.Value("string")), |
|
|
|
}), |
|
"PublicationStatus": datasets.Value("string"), |
|
"History": {"PubMedPubDate": datasets.Sequence(Date)}, |
|
|
|
"ReferenceList": datasets.Sequence({ |
|
"Citation": datasets.Value("string"), |
|
"CitationId": datasets.Value("int32"), |
|
|
|
}), |
|
}, |
|
} |
|
) |
|
|
|
|
|
self.fill_keys_from_features(features) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Downloads data and defines splits.""" |
|
|
|
|
|
|
|
|
|
dl_dir = dl_manager.download(_URLs) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filenames": dl_dir if isinstance(dl_dir, list) else list(dl_dir.values())}, |
|
), |
|
] |
|
|
|
def update_citation(self, article): |
|
""" |
|
Flattens the complex ReferenceList structure into a simpler list of {Citation, CitationId}. |
|
Modifies the 'article' dictionary in-place. |
|
Needs careful error handling for potentially missing keys. |
|
""" |
|
citations = [] |
|
try: |
|
|
|
reference_list_container = article.get("PubmedData", {}).get("ReferenceList") |
|
if not reference_list_container: |
|
article["PubmedData"]["ReferenceList"] = [] |
|
return |
|
|
|
|
|
for ref_container in reference_list_container: |
|
references = ref_container.get("Reference") |
|
if not references: continue |
|
|
|
|
|
for ref in references: |
|
citation_text = ref.get("Citation") |
|
if not citation_text: continue |
|
|
|
citation_id = None |
|
|
|
article_id_list_container = ref.get("ArticleIdList") |
|
if article_id_list_container: |
|
|
|
for id_container in article_id_list_container: |
|
article_ids = id_container.get("ArticleId") |
|
if article_ids: |
|
|
|
for art_id in article_ids: |
|
try: |
|
|
|
if art_id.isdigit(): |
|
citation_id = int(art_id) |
|
|
|
|
|
|
|
break |
|
except (ValueError, TypeError): |
|
continue |
|
if citation_id: break |
|
|
|
if citation_id is not None: |
|
citations.append({"Citation": citation_text, "CitationId": citation_id}) |
|
|
|
|
|
|
|
|
|
if "PubmedData" in article: |
|
article["PubmedData"]["ReferenceList"] = citations |
|
else: |
|
|
|
article["PubmedData"] = {"ReferenceList": citations} |
|
|
|
|
|
except Exception as e: |
|
logger.error(f"Error during citation update for article: {e}") |
|
|
|
if "PubmedData" not in article: |
|
article["PubmedData"] = {} |
|
article["PubmedData"]["ReferenceList"] = [] |
|
|
|
|
|
def _generate_examples(self, filenames): |
|
"""Yields examples parsing XML files using iterparse for memory efficiency, skipping duplicate PMIDs.""" |
|
|
|
yielded_pmids = set() |
|
|
|
for filename in filenames: |
|
logger.info(f"Processing file: {filename}") |
|
try: |
|
with gzip.open(filename, "rb") as f: |
|
context = ET.iterparse(f, events=("end",)) |
|
event, root = next(context) |
|
|
|
for event, elem in context: |
|
if event == "end" and elem.tag == "PubmedArticle": |
|
article_dict_wrapper = None |
|
pmid = "UNKNOWN_PMID" |
|
try: |
|
article_dict_wrapper = self.xml_to_dictionnary(elem) |
|
|
|
if not article_dict_wrapper or 'PubmedArticle' not in article_dict_wrapper: |
|
logger.warning(f"Parser returned empty or invalid structure for a PubmedArticle element in {filename}") |
|
elem.clear() |
|
continue |
|
|
|
article = article_dict_wrapper.get('PubmedArticle') |
|
if not article or not isinstance(article, dict): |
|
logger.warning(f"Parsed empty or invalid article data from element in {filename}") |
|
elem.clear() |
|
continue |
|
|
|
|
|
pmid_val = article.get("MedlineCitation", {}).get("PMID", 0) |
|
try: |
|
pmid = int(pmid_val) |
|
if pmid <= 0: raise ValueError("PMID must be positive") |
|
except (ValueError, TypeError): |
|
logger.warning(f"Skipping article due to invalid or missing PMID: '{pmid_val}' in {filename}") |
|
elem.clear() |
|
continue |
|
|
|
|
|
if pmid in yielded_pmids: |
|
logger.warning(f"Skipping duplicate PMID {pmid} found in {filename}.") |
|
elem.clear() |
|
continue |
|
|
|
|
|
|
|
self.update_citation(article) |
|
new_article = default_article() |
|
deepupdate(new_article, article) |
|
|
|
|
|
final_pmid_check = new_article.get("MedlineCitation", {}).get("PMID", 0) |
|
if final_pmid_check != pmid: |
|
logger.error(f"PMID mismatch after processing! Expected {pmid}, got {final_pmid_check}. Skipping article.") |
|
elem.clear() |
|
continue |
|
|
|
|
|
encoded_example = self.info.features.encode_example(new_article) |
|
|
|
|
|
yield pmid, new_article |
|
yielded_pmids.add(pmid) |
|
|
|
except Exception as e: |
|
logger.error(f"Failed to process article PMID {pmid} in {filename}: {e}", exc_info=False) |
|
|
|
|
|
|
|
finally: |
|
elem.clear() |
|
|
|
if root is not None: |
|
root.clear() |
|
|
|
except ET.ParseError as e: |
|
logger.error(f"XML ParseError in file {filename}: {e}") |
|
continue |
|
except gzip.BadGzipFile: |
|
logger.error(f"Bad Gzip File error for {filename}. It might be corrupted or incomplete.") |
|
continue |
|
except FileNotFoundError: |
|
logger.error(f"File not found: {filename}") |
|
continue |
|
except Exception as e: |
|
logger.error(f"An unexpected error occurred processing file {filename}: {e}", exc_info=True) |
|
continue |