File size: 7,086 Bytes
b5650f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "COVID-19"
cohort = "GSE185658"

# Input paths
in_trait_dir = "../DATA/GEO/COVID-19"
in_cohort_dir = "../DATA/GEO/COVID-19/GSE185658"

# Output paths
out_data_file = "./output/preprocess/1/COVID-19/GSE185658.csv"
out_gene_data_file = "./output/preprocess/1/COVID-19/gene_data/GSE185658.csv"
out_clinical_data_file = "./output/preprocess/1/COVID-19/clinical_data/GSE185658.csv"
json_path = "./output/preprocess/1/COVID-19/cohort_info.json"

# STEP1
from tools.preprocess import *

# 1. Attempt to identify the paths to the SOFT file and the matrix file
try:
    soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
except AssertionError:
    print("[WARNING] Could not find the expected '.soft' or '.matrix' files in the directory.")
    soft_file, matrix_file = None, None

if soft_file is None or matrix_file is None:
    print("[ERROR] Required GEO files are missing. Please check file names in the cohort directory.")
else:
    # 2. Read the matrix file to obtain background information and sample characteristics data
    background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
    clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
    background_info, clinical_data = get_background_and_clinical_data(matrix_file,
                                                                      background_prefixes,
                                                                      clinical_prefixes)

    # 3. Obtain the sample characteristics dictionary from the clinical dataframe
    sample_characteristics_dict = get_unique_values_by_row(clinical_data)

    # 4. Explicitly print out all the background information and the sample characteristics dictionary
    print("Background Information:")
    print(background_info)
    print("\nSample Characteristics Dictionary:")
    print(sample_characteristics_dict)
# 1) Determine if gene expression data is available
is_gene_available = True  # From the background info (Affymetrix microarrays)

# 2) Identify data availability for trait, age, and gender.
#    Based on the sample characteristics dictionary, none of these variables (COVID-19 status, age, gender)
#    appear to be present, so we set row indices to None.
trait_row = None
age_row = None
gender_row = None

# 2) Define the conversion functions.
#    Although we don't have actual data for these variables, these stubs must still be defined.
#    The typical pattern is to parse the text after a colon (':') and return the processed value
#    or None if there's no valid matching pattern.

def convert_trait(value: str):
    # No COVID-19 data is present, so we simply return None.
    return None

def convert_age(value: str):
    # No age data is present, so return None.
    return None

def convert_gender(value: str):
    # No gender data is present, so return None.
    return None

# 3) Conduct initial filtering with validate_and_save_cohort_info
#    Trait data availability depends on trait_row; here trait_row is None => trait data not available.
is_trait_available = (trait_row is not None)

is_usable = validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=is_trait_available
)

# 4) Since trait_row is None, we skip clinical feature extraction.
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)

# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
print("Based on the given gene identifiers, they appear to be numeric platform IDs, not standard human gene symbols.")
print("requires_gene_mapping = True")
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)

# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP: Gene Identifier Mapping

# 1. Identify columns in 'gene_annotation' for mapping:
#    - The probe identifier column matches the numeric IDs in 'gene_data' ("ID").
#    - The gene symbol information is contained in "gene_assignment".
mapping_df = get_gene_mapping(
    annotation=gene_annotation,
    prob_col='ID',
    gene_col='gene_assignment'
)

# 2. Convert probe-level measurements to gene-level by applying the mapping.
gene_data = apply_gene_mapping(gene_data, mapping_df)

# For verification, print a brief preview of the resulting mapped gene data.
print("Mapped gene_data shape:", gene_data.shape)
print(gene_data.head(5))
import os
import pandas as pd

# STEP7: Data Normalization and Linking

# 1) Normalize the gene symbols in the previously obtained gene_data
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)

# 2) Load clinical data only if it exists and is non-empty
if os.path.exists(out_clinical_data_file) and os.path.getsize(out_clinical_data_file) > 0:
    # Read the file
    clinical_temp = pd.read_csv(out_clinical_data_file)

    # Adjust row index to label the trait, age, and gender properly
    if clinical_temp.shape[0] == 3:
        clinical_temp.index = [trait, "Age", "Gender"]
    elif clinical_temp.shape[0] == 2:
        clinical_temp.index = [trait, "Gender"]
    elif clinical_temp.shape[0] == 1:
        clinical_temp.index = [trait]

    # 2) Link the clinical and normalized genetic data
    linked_data = geo_link_clinical_genetic_data(clinical_temp, normalized_gene_data)

    # 3) Handle missing values
    linked_data = handle_missing_values(linked_data, trait)

    # 4) Check for severe bias in the trait; remove biased demographic features if present
    trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)

    # 5) Final quality validation and save metadata
    is_usable = validate_and_save_cohort_info(
        is_final=True,
        cohort=cohort,
        info_path=json_path,
        is_gene_available=True,
        is_trait_available=True,
        is_biased=trait_biased,
        df=linked_data,
        note=f"Final check on {cohort} with {trait}."
    )

    # 6) If the linked data is usable, save it
    if is_usable:
        linked_data.to_csv(out_data_file)
else:
    # If no valid clinical data file is found, finalize metadata indicating trait unavailability
    is_usable = validate_and_save_cohort_info(
        is_final=True,
        cohort=cohort,
        info_path=json_path,
        is_gene_available=True,
        is_trait_available=False,
        is_biased=True,  # Force a fallback so that it's flagged as unusable
        df=pd.DataFrame(),
        note=f"No trait data found for {cohort}, final metadata recorded."
    )
    # Per instructions, do not save a final linked data file when trait data is absent.