File size: 5,509 Bytes
a0b62f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Arrhythmia"
cohort = "GSE47727"

# Input paths
in_trait_dir = "../DATA/GEO/Arrhythmia"
in_cohort_dir = "../DATA/GEO/Arrhythmia/GSE47727"

# Output paths
out_data_file = "./output/preprocess/3/Arrhythmia/GSE47727.csv"
out_gene_data_file = "./output/preprocess/3/Arrhythmia/gene_data/GSE47727.csv"
out_clinical_data_file = "./output/preprocess/3/Arrhythmia/clinical_data/GSE47727.csv"
json_path = "./output/preprocess/3/Arrhythmia/cohort_info.json"

# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# Extract background info and clinical data 
background_info, clinical_data = get_background_and_clinical_data(matrix_file)

# Get unique values per clinical feature
sample_characteristics = get_unique_values_by_row(clinical_data)

# Print background info
print("Dataset Background Information:")
print(f"{background_info}\n")

# Print sample characteristics
print("Sample Characteristics:")
for feature, values in sample_characteristics.items():
    print(f"Feature: {feature}")
    print(f"Values: {values}\n")
# 1. Gene Expression Data Availability
# Yes - the series title mentions HumanHT-12 which is a gene expression array platform
is_gene_available = True

# 2.1 Data Availability
trait_row = 0  # Can infer arrhythmia status from age row since these are known controls
age_row = 0    # Age data available in row 0
gender_row = 1 # Gender data available in row 1

# 2.2 Data Type Conversion Functions
def convert_trait(x):
    return 0  # All samples are controls (no arrhythmia) based on series summary

def convert_age(x):
    try:
        # Extract number after colon
        age = int(x.split(': ')[1])
        return age
    except:
        return None

def convert_gender(x):
    try:
        gender = x.split(': ')[1].lower()
        if gender == 'female':
            return 0
        elif gender == 'male':
            return 1
        return None
    except:
        return None

# 3. Save metadata
validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=True  # We can infer trait status for all samples
)

# 4. Clinical Feature Extraction
clinical_features = geo_select_clinical_features(
    clinical_df=clinical_data,
    trait=trait,
    trait_row=trait_row,
    convert_trait=convert_trait,
    age_row=age_row,
    convert_age=convert_age,
    gender_row=gender_row,
    convert_gender=convert_gender
)

# Preview the extracted features
preview = preview_df(clinical_features)
print("Preview of clinical features:")
print(preview)

# Save clinical features
clinical_features.to_csv(out_clinical_data_file)
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# Extract gene expression data from matrix file
gene_data = get_genetic_data(matrix_file)

# Print first 20 row IDs and shape of data to help debug 
print("Shape of gene expression data:", gene_data.shape)
print("\nFirst few rows of data:")
print(gene_data.head())
print("\nFirst 20 gene/probe identifiers:")
print(gene_data.index[:20])

# Inspect a snippet of raw file to verify identifier format
import gzip
with gzip.open(matrix_file, 'rt', encoding='utf-8') as f:
    lines = []
    for i, line in enumerate(f):
        if "!series_matrix_table_begin" in line:
            # Get the next 5 lines after the marker
            for _ in range(5):
                lines.append(next(f).strip())
            break
print("\nFirst few lines after matrix marker in raw file:")
for line in lines:
    print(line)
# The identifiers starting with 'ILMN_' indicate these are Illumina probe IDs
# These need to be mapped to standard human gene symbols
requires_gene_mapping = True
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# Extract gene annotation from SOFT file 
gene_annotation = get_gene_annotation(soft_file)

# Preview annotation dataframe structure
print("Gene Annotation Preview:")
print("Column names:", gene_annotation.columns.tolist())
print("\nFirst few rows as dictionary:")
print(preview_df(gene_annotation))
# Extract probe ID and gene symbol mapping from annotation data
mapping_data = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='Symbol')

# Apply gene mapping to convert probe-level data to gene expression data
gene_data = apply_gene_mapping(gene_data, mapping_data)

# Preview mapped gene data
print("Shape of gene expression data after mapping:", gene_data.shape)
print("\nFirst few rows of mapped data:")
print(gene_data.head())
print("\nFirst 20 gene symbols:")
print(gene_data.index[:20].tolist())
# 1. Normalize gene symbols
gene_data = normalize_gene_symbols_in_index(gene_data)
gene_data.to_csv(out_gene_data_file)

# 2. Link clinical and genetic data 
clinical_data = pd.read_csv(out_clinical_data_file, index_col=0)
linked_data = geo_link_clinical_genetic_data(clinical_data, gene_data)

# 3. Handle missing values
linked_data = handle_missing_values(linked_data, trait)

# 4. Evaluate bias
is_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)

# 5. Validate and save cohort info
is_usable = validate_and_save_cohort_info(
    is_final=True,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=True,
    is_trait_available=True,
    is_biased=is_biased,
    df=linked_data
)

# 6. Save linked data if usable
if is_usable:
    linked_data.to_csv(out_data_file)