LPM-24-image / README.md
ndhieunguyen's picture
Update README.md
8fa8380 verified
metadata
dataset_info:
  features:
    - name: image
      dtype: image
    - name: id
      dtype: int64
    - name: canonical
      dtype: string
    - name: selfies
      dtype: string
    - name: caption
      dtype: string
  splits:
    - name: train
      num_bytes: 2269140389.488
      num_examples: 126864
    - name: validation
      num_bytes: 597161224.016
      num_examples: 33696
  download_size: 2757974974
  dataset_size: 2866301613.5039997
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
      - split: validation
        path: data/validation-*

Thank you for your interest in using this dataset I kindly ask you to cite the following research papers when using this dataset in your work:

@inproceedings{edwards2024_LPM24,
    title = "{L}+{M}-24: Building a Dataset for {L}anguage+{M}olecules @ {ACL} 2024",
    author = "Edwards, Carl  and
      Wang, Qingyun  and
      Zhao, Lawrence  and
      Ji, Heng",
    booktitle = "Proceedings of the 1st Workshop on Language + Molecules (L+M 2024)",
    month = aug,
    year = "2024",
    address = "Bangkok, Thailand",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2024.langmol-1.1",
    doi = "10.18653/v1/2024.langmol-1.1",
    pages = "1--9",
}

@inproceedings{edwards-etal-2022-translation,
    title = "Translation between Molecules and Natural Language",
    author = "Edwards, Carl  and
      Lai, Tuan  and
      Ros, Kevin  and
      Honke, Garrett  and
      Cho, Kyunghyun  and
      Ji, Heng",
    booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
    month = dec,
    year = "2022",
    address = "Abu Dhabi, United Arab Emirates",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2022.emnlp-main.26",
    pages = "375--413",
}

@inproceedings{tran-etal-2024-mol2lang,
    title = "{M}ol2{L}ang-{VLM}: Vision- and Text-Guided Generative Pre-trained Language Models for Advancing Molecule Captioning through Multimodal Fusion",
    author = "Tran, Duong  and
      Pham, Nhat Truong  and
      Nguyen, Nguyen  and
      Manavalan, Balachandran",
    editor = "Edwards, Carl  and
      Wang, Qingyun  and
      Li, Manling  and
      Zhao, Lawrence  and
      Hope, Tom  and
      Ji, Heng",
    booktitle = "Proceedings of the 1st Workshop on Language + Molecules (L+M 2024)",
    month = aug,
    year = "2024",
    address = "Bangkok, Thailand",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2024.langmol-1.12/",
    doi = "10.18653/v1/2024.langmol-1.12",
    pages = "97--102",
    abstract = "This paper introduces Mol2Lang-VLM, an enhanced method for refining generative pre-trained language models for molecule captioning using multimodal features to achieve more accurate caption generation. Our approach leverages the encoder and decoder blocks of the Transformer-based architecture by introducing third sub-layers into both. Specifically, we insert sub-layers in the encoder to fuse features from SELFIES strings and molecular images, while the decoder fuses features from SMILES strings and their corresponding descriptions. Moreover, cross multi-head attention is employed instead of common multi-head attention to enable the decoder to attend to the encoder`s output, thereby integrating the encoded contextual information for better and more accurate caption generation. Performance evaluation on the CheBI-20 and L+M-24 benchmark datasets demonstrates Mol2Lang-VLM`s superiority, achieving higher accuracy and quality in caption generation compared to existing methods. Our code and pre-processed data are available at https://github.com/nhattruongpham/mol-lang-bridge/tree/mol2lang/."
}

@inproceedings{nguyen-etal-2024-lang2mol,
    title = "{L}ang2{M}ol-Diff: A Diffusion-Based Generative Model for Language-to-Molecule Translation Leveraging {SELFIES} Representation",
    author = "Nguyen, Nguyen  and
      Pham, Nhat Truong  and
      Tran, Duong  and
      Manavalan, Balachandran",
    editor = "Edwards, Carl  and
      Wang, Qingyun  and
      Li, Manling  and
      Zhao, Lawrence  and
      Hope, Tom  and
      Ji, Heng",
    booktitle = "Proceedings of the 1st Workshop on Language + Molecules (L+M 2024)",
    month = aug,
    year = "2024",
    address = "Bangkok, Thailand",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2024.langmol-1.15/",
    doi = "10.18653/v1/2024.langmol-1.15",
    pages = "128--134",
    abstract = "Generating de novo molecules from textual descriptions is challenging due to potential issues with molecule validity in SMILES representation and limitations of autoregressive models. This work introduces Lang2Mol-Diff, a diffusion-based language-to-molecule generative model using the SELFIES representation. Specifically, Lang2Mol-Diff leverages the strengths of two state-of-the-art molecular generative models: BioT5 and TGM-DLM. By employing BioT5 to tokenize the SELFIES representation, Lang2Mol-Diff addresses the validity issues associated with SMILES strings. Additionally, it incorporates a text diffusion mechanism from TGM-DLM to overcome the limitations of autoregressive models in this domain. To the best of our knowledge, this is the first study to leverage the diffusion mechanism for text-based de novo molecule generation using the SELFIES molecular string representation. Performance evaluation on the L+M-24 benchmark dataset shows that Lang2Mol-Diff outperforms all existing methods for molecule generation in terms of validity. Our code and pre-processed data are available at https://github.com/nhattruongpham/mol-lang-bridge/tree/lang2mol/."
}