File size: 6,085 Bytes
a3c6344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Allergies"
cohort = "GSE203409"

# Input paths
in_trait_dir = "../DATA/GEO/Allergies"
in_cohort_dir = "../DATA/GEO/Allergies/GSE203409"

# Output paths
out_data_file = "./output/preprocess/1/Allergies/GSE203409.csv"
out_gene_data_file = "./output/preprocess/1/Allergies/gene_data/GSE203409.csv"
out_clinical_data_file = "./output/preprocess/1/Allergies/clinical_data/GSE203409.csv"
json_path = "./output/preprocess/1/Allergies/cohort_info.json"

# STEP 1

from tools.preprocess import *

# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(
    matrix_file, 
    background_prefixes, 
    clinical_prefixes
)

# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)

# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("\nSample Characteristics Dictionary:")
print(sample_characteristics_dict)
# 1. Gene Expression Data Availability
# Based on the series title and summary ("Gene expression profiling..."),
# we conclude that gene expression data is indeed available.
is_gene_available = True

# 2. Variable Availability and Data Type Conversion

# From the sample characteristics dictionary, we see:
#   0 -> cell line info
#   1 -> knockdown info
#   2 -> treatment info
#   3 -> treatment compound concentration
# This dataset is an in vitro study using a HaCaT cell line.
# There is no human-level "Allergies" status, no age, and no gender data.
# Hence, for each variable (trait, age, gender), data is NOT available.

trait_row = None
age_row = None
gender_row = None

# Even though data is not available, we must define conversion functions.
# If called, they would handle extraction and conversion logic. Here, they return None.

def convert_trait(value: str):
    # Placeholder implementation.
    # Usually, we'd parse 'value' after the colon, e.g. value.split(':')[-1].strip().
    # But since data is not available, always return None.
    return None

def convert_age(value: str):
    # Placeholder implementation.
    return None

def convert_gender(value: str):
    # Placeholder implementation.
    return None

# 3. Save Metadata
# We do an initial validation using 'validate_and_save_cohort_info'.
# Trait data availability is determined by (trait_row is not None).
is_trait_available = (trait_row is not None)

is_usable = validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort, 
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=is_trait_available
)

# 4. Clinical Feature Extraction
# Since trait_row is None, we skip the clinical extraction step.
# (No substep needed as there is no clinical data to extract.)
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)

# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Based on inspection, the identifiers "ILMN_xxxxxx" appear to be Illumina probe IDs, not standard human gene symbols.
# Therefore, gene symbol mapping is required.
print("requires_gene_mapping = True")
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)

# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP: Gene Identifier Mapping

# 1) From the preview, the "ID" column in 'gene_annotation' matches the probe IDs in 'gene_data' (both have "ILMN_xxxxx" format),
#    and the "Symbol" column holds the gene symbol information.
# 2) Create a mapping dataframe.
mapping_df = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='Symbol')

# 3) Convert probe-level measurements to gene-level by applying the mapping.
gene_data = apply_gene_mapping(gene_data, mapping_df)

# For confirmation, print out the shape and a small preview of the mapped gene_data.
print("Gene data shape after mapping:", gene_data.shape)
print(gene_data.head())
import pandas as pd

# STEP 5: Data Normalization and Linking

# 1. Normalize gene symbols in the obtained gene expression data
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file, index=True)
print(f"Saved normalized gene data to {out_gene_data_file}")

# Since in earlier steps trait_row was None, we have no clinical data to link.
# Hence, there's no trait column to process. We'll skip linking and further steps
# that require the trait. However, we must still perform a final validation.

# Prepare a dummy DataFrame for the final validation
dummy_df = pd.DataFrame()

# We must provide is_biased and df to the final validation.
# Because trait data is not available, this dataset won't be usable.
is_biased = False  # Arbitrarily set; since trait is unavailable, "is_usable" will be False anyway.

is_usable = validate_and_save_cohort_info(
    is_final=True,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=True,      # Gene data is available
    is_trait_available=False,    # Trait data is not available
    is_biased=is_biased,
    df=dummy_df,
    note="No trait data available; skipping linking."
)

# 6. If data were usable, we would save it; otherwise we do nothing
if is_usable:
    print("Data is unexpectedly marked usable, but trait is unavailable. Skipping save.")