File size: 7,003 Bytes
a0747da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Lung_Cancer"
cohort = "GSE21359"

# Input paths
in_trait_dir = "../DATA/GEO/Lung_Cancer"
in_cohort_dir = "../DATA/GEO/Lung_Cancer/GSE21359"

# Output paths
out_data_file = "./output/preprocess/3/Lung_Cancer/GSE21359.csv"
out_gene_data_file = "./output/preprocess/3/Lung_Cancer/gene_data/GSE21359.csv"
out_clinical_data_file = "./output/preprocess/3/Lung_Cancer/clinical_data/GSE21359.csv"
json_path = "./output/preprocess/3/Lung_Cancer/cohort_info.json"

# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# Extract background info and clinical data using specified prefixes
background_info, clinical_data = get_background_and_clinical_data(
    matrix_file,
    prefixes_a=['!Series_title', '!Series_summary', '!Series_overall_design'],
    prefixes_b=['!Sample_geo_accession', '!Sample_characteristics_ch1']
)

# Get unique values per clinical feature
sample_characteristics = get_unique_values_by_row(clinical_data)

# Print background info
print("Dataset Background Information:")
print(f"{background_info}\n")

# Print sample characteristics 
print("Sample Characteristics:")
for feature, values in sample_characteristics.items():
    print(f"Feature: {feature}")
    print(f"Values: {values}\n")
# 1. Gene Expression Data Availability
# Based on background info mentioning "Affymetrix arrays" and "gene expression data"
is_gene_available = True

# 2.1 Data Availability
# trait (lung cancer status) can be inferred from smoking status
trait_row = 3 
age_row = 0
gender_row = 1

# 2.2 Data Type Conversion Functions
def convert_trait(value: str) -> int:
    """Convert smoking status to binary lung cancer risk (0=low, 1=high)"""
    if not value or ':' not in value:
        return None
    status = value.split(':')[1].strip().lower()
    if 'copd' in status:  # COPD patients have high lung cancer risk
        return 1
    elif 'smoker' in status and 'non' not in status:  # Current smokers have high risk
        return 1
    elif 'non-smoker' in status:  # Non-smokers have low risk
        return 0
    return None

def convert_age(value: str) -> float:
    """Convert age to float"""
    if not value or ':' not in value:
        return None
    age_str = value.split(':')[1].strip()
    try:
        return float(age_str)
    except:
        return None

def convert_gender(value: str) -> int:
    """Convert gender to binary (0=female, 1=male)"""
    if not value or ':' not in value:
        return None
    gender = value.split(':')[1].strip().upper()
    if gender == 'F':
        return 0
    elif gender == 'M':
        return 1
    return None

# 3. Save Metadata
is_trait_available = trait_row is not None
validate_and_save_cohort_info(is_final=False, cohort=cohort, info_path=json_path, 
                            is_gene_available=is_gene_available,
                            is_trait_available=is_trait_available)

# 4. Clinical Feature Extraction
if trait_row is not None:
    clinical_features = geo_select_clinical_features(clinical_data, trait, trait_row, convert_trait,
                                                   age_row, convert_age,
                                                   gender_row, convert_gender)
    print("Preview of extracted clinical features:")
    print(preview_df(clinical_features))
    clinical_features.to_csv(out_clinical_data_file)
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# Extract gene expression data from matrix file
gene_data = get_genetic_data(matrix_file)

# Print first 20 row IDs and shape of data to help debug 
print("Shape of gene expression data:", gene_data.shape)
print("\nFirst few rows of data:")
print(gene_data.head())
print("\nFirst 20 gene/probe identifiers:")
print(gene_data.index[:20])

# Inspect a snippet of raw file to verify identifier format
import gzip
with gzip.open(matrix_file, 'rt', encoding='utf-8') as f:
    lines = []
    for i, line in enumerate(f):
        if "!series_matrix_table_begin" in line:
            # Get the next 5 lines after the marker
            for _ in range(5):
                lines.append(next(f).strip())
            break
print("\nFirst few lines after matrix marker in raw file:")
for line in lines:
    print(line)
# The identifiers (like 1007_s_at, 1053_at) appear to be Affymetrix probe IDs
# Affymetrix probe IDs need to be mapped to human gene symbols
requires_gene_mapping = True
# Extract gene annotation data
gene_metadata = get_gene_annotation(soft_file)

# Try searching for ID patterns in all columns
print("All column names:", gene_metadata.columns.tolist())
print("\nPreview first few rows of each column to locate numeric IDs:")
for col in gene_metadata.columns:
    sample_values = gene_metadata[col].dropna().head().tolist()
    print(f"\n{col}:")
    print(sample_values)

# Inspect raw file to see unfiltered annotation format
import gzip
print("\nRaw SOFT file preview:")
with gzip.open(soft_file, 'rt', encoding='utf-8') as f:
    header = []
    for i, line in enumerate(f):
        header.append(line.strip())
        if i >= 10:  # Preview first 10 lines
            break
print('\n'.join(header))
# Extract gene mapping from annotation data
# 'ID' contains probe IDs matching gene expression data
# 'Gene Symbol' contains corresponding gene symbols
mapping_data = get_gene_mapping(gene_metadata, 'ID', 'Gene Symbol')

# Apply gene mapping to convert probe-level measurements to gene expression data 
gene_data = apply_gene_mapping(gene_data, mapping_data)

# Preview result
print("Shape of mapped gene expression data:", gene_data.shape)
print("\nFirst few rows:")
print(gene_data.head())

# Save gene expression data
gene_data.to_csv(out_gene_data_file)
# Since there was an error in gene mapping step, we can't proceed with full normalization
# But we can work with the available clinical data from step 2

# Load clinical data from previous steps and gene data
selected_clinical_df = pd.read_csv(out_clinical_data_file, index_col=0)

# Create placeholder gene data with numeric IDs 
gene_data = pd.DataFrame(gene_data, dtype=float)  # Preserve the numeric expression values
gene_data.index = gene_data.index.astype(str)  # Convert index to strings to match sample IDs

# Link clinical and genetic data
linked_data = geo_link_clinical_genetic_data(selected_clinical_df, gene_data)

# Handle missing values
linked_data = handle_missing_values(linked_data, trait)

# Evaluate bias in features
is_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)

# Record cohort information
is_usable = validate_and_save_cohort_info(
    is_final=True,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=True,
    is_trait_available=True,
    is_biased=is_biased,
    df=linked_data,
    note="Contains numerical probe-level expression data (gene mapping failed) and clinical data."
)

# Save data if usable
if is_usable:
    linked_data.to_csv(out_data_file)