File size: 6,905 Bytes
d5514d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Schizophrenia"
cohort = "GSE145554"

# Input paths
in_trait_dir = "../DATA/GEO/Schizophrenia"
in_cohort_dir = "../DATA/GEO/Schizophrenia/GSE145554"

# Output paths
out_data_file = "./output/preprocess/3/Schizophrenia/GSE145554.csv"
out_gene_data_file = "./output/preprocess/3/Schizophrenia/gene_data/GSE145554.csv"
out_clinical_data_file = "./output/preprocess/3/Schizophrenia/clinical_data/GSE145554.csv"
json_path = "./output/preprocess/3/Schizophrenia/cohort_info.json"

# Get file paths
soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)

# Get background info and clinical data
background_info, clinical_data = get_background_and_clinical_data(matrix_file_path)
print("Background Information:")
print(background_info)
print("\nSample Characteristics:")

# Get dictionary of unique values per row 
unique_values_dict = get_unique_values_by_row(clinical_data)
for row, values in unique_values_dict.items():
    print(f"\n{row}:")
    print(values)
# 1. Gene Expression Data Availability
# Based on the background info, this is a microarray study of mRNA, so gene expression data should be available
is_gene_available = True

# 2. Variable Availability and Data Type Conversion
# 2.1 Data Availability
trait_row = 0  # Disease state is in row 0
gender_row = 1  # Sex is in row 1 
age_row = 3  # Age is in row 3

# 2.2 Data Type Conversion Functions
def convert_trait(x):
    if not isinstance(x, str):
        return None
    value = x.split(': ')[-1].lower()
    if 'schizophrenia' in value:
        return 1
    elif 'control' in value:
        return 0
    return None

def convert_gender(x):
    if not isinstance(x, str):
        return None
    value = x.split(': ')[-1].lower()
    if 'female' in value:
        return 0
    elif 'male' in value:
        return 1
    return None

def convert_age(x):
    if not isinstance(x, str):
        return None
    try:
        age = int(x.split(': ')[-1])
        return age
    except:
        return None

# 3. Save metadata about data availability
validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=trait_row is not None
)

# 4. Clinical Feature Extraction
if trait_row is not None:
    clinical_features = geo_select_clinical_features(
        clinical_df=clinical_data,
        trait=trait,
        trait_row=trait_row,
        convert_trait=convert_trait,
        age_row=age_row,
        convert_age=convert_age,
        gender_row=gender_row,
        convert_gender=convert_gender
    )
    
    # Preview the extracted features
    print("Preview of clinical features:")
    print(preview_df(clinical_features))
    
    # Save clinical features to CSV
    clinical_features.to_csv(out_clinical_data_file)
# Get gene expression data from matrix file
genetic_data = get_genetic_data(matrix_file_path)

# Examine data structure
print("Data structure and head:")
print(genetic_data.head())

print("\nShape:", genetic_data.shape)

print("\nFirst 20 row IDs (gene/probe identifiers):")
print(list(genetic_data.index)[:20])

# Get a few column names to verify sample IDs
print("\nFirst 5 column names:")
print(list(genetic_data.columns)[:5])
# The gene identifiers (7892501, 7892502, etc.) appear to be probe IDs from a microarray platform 
# rather than standard human gene symbols. They need to be mapped to gene symbols
requires_gene_mapping = True
# Extract gene annotation data
gene_annotation = get_gene_annotation(soft_file_path)

# Preview column names and values from annotation dataframe
print("Gene annotation DataFrame preview:")
print(preview_df(gene_annotation))
# Extract probe-gene mapping
# 'ID' contains the same type of identifiers as in gene expression data
# 'gene_assignment' contains gene symbols and various annotations
mapping_data = get_gene_mapping(gene_annotation, 'ID', 'gene_assignment')

# Apply the mapping to convert probe-level data to gene-level data
gene_data = apply_gene_mapping(genetic_data, mapping_data)

# Normalize gene symbols to standard format, using NCBI gene synonym information
gene_data = normalize_gene_symbols_in_index(gene_data)

# Save gene expression data
gene_data.to_csv(out_gene_data_file)
# 1. Normalize gene symbols
print("\nSample gene symbols before normalization:", list(gene_data.index)[:5])

try:
    # Verify synonym dictionary
    with open("./metadata/gene_synonym.json", "r") as f:
        synonym_dict = json.load(f)
    print("\nNumber of entries in synonym dictionary:", len(synonym_dict))
    print("Sample entries from synonym dict:", list(synonym_dict.items())[:2])
    
    genetic_data = normalize_gene_symbols_in_index(gene_data)
    print("\nGene data shape after normalization:", genetic_data.shape)
    
    if genetic_data.shape[0] == 0:
        raise ValueError("Gene symbol normalization resulted in empty dataset")
        
    os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True) 
    genetic_data.to_csv(out_gene_data_file)
    
    # Load clinical data previously processed
    selected_clinical_df = pd.read_csv(out_clinical_data_file, index_col=0)
    print("\nClinical data shape:", selected_clinical_df.shape)

    # 2. Link clinical and genetic data
    linked_data = geo_link_clinical_genetic_data(selected_clinical_df, genetic_data)
    print("\nLinked data shape:", linked_data.shape)

    # 3. Handle missing values systematically  
    if trait in linked_data.columns:
        linked_data = handle_missing_values(linked_data, trait)

        # 4. Check for bias in trait and demographic features
        trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)

        # 5. Final validation and information saving
        note = "This dataset studies alcohol dependence in brain tissue samples, containing gene expression data from the prefrontal cortex."
        is_usable = validate_and_save_cohort_info(
            is_final=True,
            cohort=cohort, 
            info_path=json_path,
            is_gene_available=True,
            is_trait_available=True,
            is_biased=trait_biased,
            df=linked_data,
            note=note
        )

        # 6. Save linked data only if usable and not biased
        if is_usable and not trait_biased:
            os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
            linked_data.to_csv(out_data_file)
            
except Exception as e:
    print(f"\nError during preprocessing: {str(e)}")
    # Record failure
    note = f"Failed during gene symbol normalization: {str(e)}"
    validate_and_save_cohort_info(
        is_final=True,
        cohort=cohort,
        info_path=json_path,
        is_gene_available=True,  
        is_trait_available=True,
        is_biased=None,
        df=None,
        note=note
    )