Witold Wydmański
commited on
Commit
·
18b1614
1
Parent(s):
f93d5a7
feat: forgot to include script lol
Browse files- biodataome.py +77 -0
biodataome.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#%%
|
2 |
+
import datasets
|
3 |
+
from datasets.tasks import TaskTemplate
|
4 |
+
from sklearn.model_selection import train_test_split
|
5 |
+
import pandas as pd
|
6 |
+
import csv
|
7 |
+
import os
|
8 |
+
|
9 |
+
_ORIGIN = "http://dataome.mensxmachina.org/"
|
10 |
+
_CITATION = """ """
|
11 |
+
|
12 |
+
class BioDataome(datasets.GeneratorBasedBuilder):
|
13 |
+
METADATA = pd.read_csv(f"biodataome_data.csv")
|
14 |
+
BUILDER_CONFIGS = [
|
15 |
+
datasets.BuilderConfig(name=i,
|
16 |
+
version=datasets.Version("1.0.0"),
|
17 |
+
description=d)
|
18 |
+
for i, d in zip(
|
19 |
+
METADATA["GSE"],
|
20 |
+
METADATA["Disease"],
|
21 |
+
)
|
22 |
+
]
|
23 |
+
|
24 |
+
print(BUILDER_CONFIGS)
|
25 |
+
|
26 |
+
def _info(self) -> datasets.DatasetInfo:
|
27 |
+
print(BioDataome.BUILDER_CONFIGS)
|
28 |
+
return datasets.DatasetInfo(
|
29 |
+
description="",
|
30 |
+
citation=_CITATION,
|
31 |
+
homepage=_ORIGIN,
|
32 |
+
license="",
|
33 |
+
)
|
34 |
+
|
35 |
+
def _split_generators(self, dl_manager):
|
36 |
+
gse = self.config.name
|
37 |
+
url = self.METADATA[self.METADATA["GSE"] == gse]["Datapath"].values[0]
|
38 |
+
metadata_url = self.METADATA[self.METADATA["GSE"] == gse]["DataAnnot"].values[0]
|
39 |
+
data: datasets.download.DownloadManager = dl_manager.download(url)
|
40 |
+
metadata: datasets.download.DownloadManager = dl_manager.download(metadata_url)
|
41 |
+
|
42 |
+
new_name = os.path.dirname(data) + "/" + os.path.basename(data).split(".")[0] + "_processed.csv"
|
43 |
+
|
44 |
+
df = pd.read_csv(data, index_col=0)
|
45 |
+
df = df.T
|
46 |
+
df.to_csv(new_name, index=False)
|
47 |
+
|
48 |
+
return [
|
49 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": new_name, "metadata": metadata}),
|
50 |
+
]
|
51 |
+
|
52 |
+
def _generate_examples(self, filepath, metadata):
|
53 |
+
print(filepath)
|
54 |
+
with open(filepath, "r") as f:
|
55 |
+
f_header = f.readline()
|
56 |
+
with open(metadata, "r") as m:
|
57 |
+
m_header = m.readline()
|
58 |
+
for key, (row, meta) in enumerate(zip(f, m)):
|
59 |
+
metadata = csv.reader([meta], quotechar='"').__next__()
|
60 |
+
row = row.split(",")
|
61 |
+
yield key, {
|
62 |
+
"data":
|
63 |
+
{
|
64 |
+
i.strip(): j for i, j in zip(f_header.split(","), row)
|
65 |
+
},
|
66 |
+
"metadata":
|
67 |
+
{
|
68 |
+
i.strip(): j for i, j in zip(m_header.split(","), metadata)
|
69 |
+
}
|
70 |
+
}
|
71 |
+
|
72 |
+
#%%
|
73 |
+
if __name__ == "__main__":
|
74 |
+
ds = datasets.load_dataset("./load_script.py", "GSE17933")
|
75 |
+
ds['train'][0]
|
76 |
+
|
77 |
+
# %%
|