atgctg commited on
Commit
5b8e87d
·
unverified ·
1 Parent(s): 5a4f8cd

feat: proof of concept dataset_script

Browse files
Files changed (1) hide show
  1. pubchem_selfies.py +165 -0
pubchem_selfies.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: File Description"""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+ # Using https://www.bibtex.com/c/doi-to-bibtex-converter/ (doi: 10.1093/nar/gkv951)
25
+ _CITATION = """\
26
+ @ARTICLE{Kim2016-sz,
27
+ title = "{PubChem} Substance and Compound databases",
28
+ author = "Kim, Sunghwan and Thiessen, Paul A and Bolton, Evan E and Chen,
29
+ Jie and Fu, Gang and Gindulyte, Asta and Han, Lianyi and He,
30
+ Jane and He, Siqian and Shoemaker, Benjamin A and Wang, Jiyao
31
+ and Yu, Bo and Zhang, Jian and Bryant, Stephen H",
32
+ abstract = "PubChem (https://pubchem.ncbi.nlm.nih.gov) is a public
33
+ repository for information on chemical substances and their
34
+ biological activities, launched in 2004 as a component of the
35
+ Molecular Libraries Roadmap Initiatives of the US National
36
+ Institutes of Health (NIH). For the past 11 years, PubChem has
37
+ grown to a sizable system, serving as a chemical information
38
+ resource for the scientific research community. PubChem consists
39
+ of three inter-linked databases, Substance, Compound and
40
+ BioAssay. The Substance database contains chemical information
41
+ deposited by individual data contributors to PubChem, and the
42
+ Compound database stores unique chemical structures extracted
43
+ from the Substance database. Biological activity data of
44
+ chemical substances tested in assay experiments are contained in
45
+ the BioAssay database. This paper provides an overview of the
46
+ PubChem Substance and Compound databases, including data sources
47
+ and contents, data organization, data submission using PubChem
48
+ Upload, chemical structure standardization, web-based interfaces
49
+ for textual and non-textual searches, and programmatic access.
50
+ It also gives a brief description of PubChem3D, a resource
51
+ derived from theoretical three-dimensional structures of
52
+ compounds in PubChem, as well as PubChemRDF, Resource
53
+ Description Framework (RDF)-formatted PubChem data for data
54
+ sharing, analysis and integration with information contained in
55
+ other databases.",
56
+ journal = "Nucleic Acids Res.",
57
+ publisher = "Oxford University Press (OUP)",
58
+ volume = 44,
59
+ number = "D1",
60
+ pages = "D1202--13",
61
+ month = jan,
62
+ year = 2016,
63
+ language = "en"
64
+ }
65
+ """
66
+
67
+ # TODO: Add description of the dataset here
68
+ # You can copy an official description
69
+ _DESCRIPTION = """\
70
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
71
+ """
72
+
73
+ _HOMEPAGE = "https://pubchem.ncbi.nlm.nih.gov/"
74
+
75
+ # TODO: Add the licence for the dataset here if you can find it
76
+ # _LICENSE = ""
77
+
78
+ # _BASE_URL = "https://huggingface.co/datasets/zpn/pubchem-selfies/resolve/refs%2Fpr%2F2"
79
+ _URLS = [] # TODO: define urls
80
+
81
+
82
+ class PubchemSelfies(datasets.GeneratorBasedBuilder):
83
+ """TODO: Short description of my dataset."""
84
+
85
+ VERSION = datasets.Version("1.1.0")
86
+
87
+ # You will be able to load one or the other configurations in the following list with
88
+ BUILDER_CONFIG = datasets.BuilderConfig(
89
+ version=VERSION, description=""
90
+ ) # TODO: Short description of my dataset.
91
+
92
+ def _info(self):
93
+ return datasets.DatasetInfo(
94
+ # This is the description that will appear on the datasets page.
95
+ description=_DESCRIPTION,
96
+ # This defines the different columns of the dataset and their types
97
+ features=datasets.Features(
98
+ {
99
+ "PUBCHEM_COMPOUND_CID": datasets.Value("string"),
100
+ "CAN_SELFIES": datasets.Value("string"),
101
+ }
102
+ ),
103
+ # Homepage of the dataset for documentation
104
+ homepage=_HOMEPAGE,
105
+ # License for the dataset if available
106
+ # license=_LICENSE,
107
+ # Citation for the dataset
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _split_generators(self, dl_manager):
112
+
113
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
114
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
115
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
116
+ data_dir = dl_manager.download_and_extract(_URLS)
117
+ subdirs = os.listdir(data_dir)
118
+
119
+ import random
120
+
121
+ random.shuffle(subdirs)
122
+
123
+ train_len = int(len(subdirs) * 0.8)
124
+ valid_len = int(len(subdirs) * 0.1)
125
+
126
+ return [
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TRAIN,
129
+ # These kwargs will be passed to _generate_examples
130
+ gen_kwargs={
131
+ "subdirs": subdirs[:train_len],
132
+ "split": "train",
133
+ },
134
+ ),
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.VALIDATION,
137
+ # These kwargs will be passed to _generate_examples
138
+ gen_kwargs={
139
+ "subdirs": subdirs[train_len : train_len + valid_len],
140
+ "split": "valid",
141
+ },
142
+ ),
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.TEST,
145
+ # These kwargs will be passed to _generate_examples
146
+ gen_kwargs={
147
+ "subdirs": subdirs[-valid_len:],
148
+ "split": "test",
149
+ },
150
+ ),
151
+ ]
152
+
153
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
154
+ def _generate_examples(self, subdirs, split):
155
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
156
+ for subdir in subdirs:
157
+ filepath = subdir + f"{subdir.split('/')[-1]}_SELFIES.jsonl"
158
+ with open(filepath, encoding="utf-8") as f:
159
+ for key, row in enumerate(f):
160
+ data = json.loads(row)
161
+ properties = data["molecules"][0]["properties"]
162
+ yield key, {
163
+ "PUBCHEM_COMPOUND_CID": properties["PUBCHEM_COMPOUND_CID"],
164
+ "CAN_SELFIES": properties["CAN_SELFIES"],
165
+ }