File size: 5,890 Bytes
a5a8278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Bipolar_disorder"
cohort = "GSE120342"

# Input paths
in_trait_dir = "../DATA/GEO/Bipolar_disorder"
in_cohort_dir = "../DATA/GEO/Bipolar_disorder/GSE120342"

# Output paths
out_data_file = "./output/preprocess/1/Bipolar_disorder/GSE120342.csv"
out_gene_data_file = "./output/preprocess/1/Bipolar_disorder/gene_data/GSE120342.csv"
out_clinical_data_file = "./output/preprocess/1/Bipolar_disorder/clinical_data/GSE120342.csv"
json_path = "./output/preprocess/1/Bipolar_disorder/cohort_info.json"

# STEP1
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)

# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)

# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("Sample Characteristics Dictionary:")
print(sample_characteristics_dict)
# 1) Determine if this dataset likely contains suitable gene expression data
is_gene_available = True  # Based on "Aberrant transcriptomes..." mention

# 2) Identify data availability and create conversion functions

# From the sample characteristics:
# {
#   0: ['disease state: control', 'disease state: SCZ', 'disease state: BD(-)', 'disease state: BD(+)'],
#   1: ['laterality: left', 'laterality: right']
# }
# There is only information about disease state and laterality. No explicit age or gender metadata.
# For trait: we can use row 0, as it contains BD-, BD+, SCZ, control.
# For age and gender information: None (not available).

trait_row = 0
age_row = None
gender_row = None

def convert_trait(value: str):
    parts = value.split(":", 1)
    val = parts[1].strip() if len(parts) > 1 else value.strip()
    # Convert BD to 1, others (SCZ, control) to 0
    if val in ["control", "SCZ"]:
        return 0
    elif val in ["BD(-)", "BD(+)"]:
        return 1
    else:
        return None

# Since age and gender rows are not available, we set their convert functions to None
convert_age = None
convert_gender = None

# 3) Conduct initial filtering and save metadata
is_trait_available = (trait_row is not None)
is_usable = validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=is_trait_available
)

# 4) Clinical feature extraction if trait data is available
if trait_row is not None:
    df_clinical = geo_select_clinical_features(
        clinical_df=clinical_data,
        trait=trait,
        trait_row=trait_row,
        convert_trait=convert_trait,
        age_row=age_row,
        convert_age=convert_age,
        gender_row=gender_row,
        convert_gender=convert_gender
    )
    print(preview_df(df_clinical, n=5, max_items=200))
    df_clinical.to_csv(out_clinical_data_file, index=False)
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)

# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# The listed identifiers (e.g., "cg00000292") appear to be CpG probe IDs rather than standard human gene symbols.
# Therefore, mapping is needed to associate each probe with corresponding gene information.

requires_gene_mapping = True
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)

# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP: Gene Identifier Mapping

# 1. Decide on the columns to use as probe ID and gene symbol
probe_col = "ID"      # Matches the gene expression dataframe index (e.g., 'cg00000292')
symbol_col = "Symbol" # Contains the gene symbols from the annotation

# 2. Get a gene mapping dataframe
gene_mapping_df = get_gene_mapping(gene_annotation, probe_col, symbol_col)

# 3. Convert probe-level measurements to gene expression data
gene_data = apply_gene_mapping(gene_data, gene_mapping_df)
# STEP7

# 1. Normalize the obtained gene data using the NCBI Gene synonym database
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)

# 2. Link the clinical and genetic data
linked_data = geo_link_clinical_genetic_data(df_clinical, normalized_gene_data)

# 3. Handle missing values systematically using the actual column name (stored in variable trait)
linked_data_processed = handle_missing_values(linked_data, trait_col=trait)

# 4. Check for biased trait and remove any biased demographic features
trait_biased, linked_data_final = judge_and_remove_biased_features(linked_data_processed, trait)

# 5. Final quality validation and metadata saving
is_usable = validate_and_save_cohort_info(
    is_final=True,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=True,
    is_trait_available=True,
    is_biased=trait_biased,
    df=linked_data_final,
    note="Dataset processed with GEO pipeline. Checked for missing values and bias."
)

# 6. If dataset is usable, save the final linked data
if is_usable:
    linked_data_final.to_csv(out_data_file)