File size: 5,840 Bytes
187fbda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Atrial_Fibrillation"
cohort = "GSE143924"

# Input paths
in_trait_dir = "../DATA/GEO/Atrial_Fibrillation"
in_cohort_dir = "../DATA/GEO/Atrial_Fibrillation/GSE143924"

# Output paths
out_data_file = "./output/preprocess/1/Atrial_Fibrillation/GSE143924.csv"
out_gene_data_file = "./output/preprocess/1/Atrial_Fibrillation/gene_data/GSE143924.csv"
out_clinical_data_file = "./output/preprocess/1/Atrial_Fibrillation/clinical_data/GSE143924.csv"
json_path = "./output/preprocess/1/Atrial_Fibrillation/cohort_info.json"

# STEP1
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)

# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)

# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("Sample Characteristics Dictionary:")
print(sample_characteristics_dict)
############################
# 1. Gene Expression Data Availability
############################
is_gene_available = True  # Based on "Whole-tissue gene expression patterns" in the series summary

############################
# 2. Variable Availability and Data Type Conversion
############################
# From the sample characteristics dictionary:
# {0: ['tissue: epicardial adipose tissue'],
#  1: ['patient diagnosis: sinus rhythm after surgery',
#      'patient diagnosis: postoperative atrial fibrillation after surgery (POAF)']}

# The trait "Atrial_Fibrillation" can be inferred from row 1 since it contains
# "sinus rhythm after surgery" vs. "postoperative atrial fibrillation after surgery (POAF)".
trait_row = 1

# There's no mention of age or gender information in the dictionary,
# thus they are considered not available.
age_row = None
gender_row = None

# Data Type Conversions
def convert_trait(value: str) -> Optional[int]:
    # Extract the value after the colon
    parts = value.split(':', 1)
    val_str = parts[1].strip() if len(parts) > 1 else value.strip()
    
    # Map recognized patterns to 0 or 1
    if val_str.lower() == 'sinus rhythm after surgery':
        return 0
    elif 'postoperative atrial fibrillation' in val_str.lower():
        return 1
    else:
        return None

def convert_age(value: str) -> Optional[float]:
    # No age data is truly available here; returning None
    return None

def convert_gender(value: str) -> Optional[int]:
    # No gender data is truly available here; returning None
    return None

############################
# 3. Save Metadata (Initial Filtering)
############################
is_trait_available = trait_row is not None
is_usable = validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=is_trait_available
)

############################
# 4. Clinical Feature Extraction
############################
if trait_row is not None:
    # Suppose clinical_data is our input DataFrame of sample characteristics
    selected_clinical = geo_select_clinical_features(
        clinical_df=clinical_data,
        trait=trait,
        trait_row=trait_row,
        convert_trait=convert_trait,
        age_row=age_row,
        convert_age=convert_age,
        gender_row=gender_row,
        convert_gender=convert_gender
    )
    
    # Preview the extracted clinical features
    preview = preview_df(selected_clinical, n=5)
    print("Selected Clinical Features Preview:", preview)
    
    # Save the selected clinical data
    selected_clinical.to_csv(out_clinical_data_file, index=False)
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)

# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Based on biomedical knowledge, the listed gene identifiers appear to be recognized human gene symbols.
# Therefore, they do not require additional gene symbol mapping.
print("requires_gene_mapping = False")
# STEP5

# 1. Normalize the obtained gene data using the NCBI Gene synonym database
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)

# 2. Link the clinical and genetic data
# Replace "selected_clinical_df" with the correct variable "selected_clinical"
linked_data = geo_link_clinical_genetic_data(selected_clinical, normalized_gene_data)

# 3. Handle missing values systematically
linked_data_processed = handle_missing_values(linked_data, trait_col=trait)

# 4. Check for biased trait and remove any biased demographic features
trait_biased, linked_data_final = judge_and_remove_biased_features(linked_data_processed, trait)

# 5. Final quality validation and metadata saving
is_usable = validate_and_save_cohort_info(
    is_final=True,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=True,
    is_trait_available=True,
    is_biased=trait_biased,
    df=linked_data_final,
    note="Dataset processed with GEO pipeline. Checked for missing values and bias."
)

# 6. If dataset is usable, save the final linked data
if is_usable:
    linked_data_final.to_csv(out_data_file)