File size: 7,198 Bytes
e6817b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Heart_rate"
cohort = "GSE18583"

# Input paths
in_trait_dir = "../DATA/GEO/Heart_rate"
in_cohort_dir = "../DATA/GEO/Heart_rate/GSE18583"

# Output paths
out_data_file = "./output/preprocess/3/Heart_rate/GSE18583.csv"
out_gene_data_file = "./output/preprocess/3/Heart_rate/gene_data/GSE18583.csv"
out_clinical_data_file = "./output/preprocess/3/Heart_rate/clinical_data/GSE18583.csv"
json_path = "./output/preprocess/3/Heart_rate/cohort_info.json"

# Get file paths
soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)

# Get background info and clinical data
background_info, clinical_data = get_background_and_clinical_data(matrix_file_path)

# Get unique values for each clinical feature 
unique_values_dict = get_unique_values_by_row(clinical_data)

# Print background information
print("Background Information:")
print(background_info)
print("\nSample Characteristics:")
print(json.dumps(unique_values_dict, indent=2))
# 1. Gene Expression Data Availability
is_gene_available = True  # Yes, based on background info this is gene expression data

# 2.1 Data Availability
trait_row = 2  # "heart rate (bpm)" data is in row 2
age_row = None  # Age is not available
gender_row = 0  # Gender info is in row 0 (all male)

# 2.2 Data Type Conversion Functions
def convert_trait(value):
    if pd.isna(value):
        return None
    try:
        # Extract numeric value after "heart rate (bpm):"
        return float(value.split(": ")[1])
    except:
        return None

def convert_age(value):
    # Not used since age data is not available
    return None

def convert_gender(value):
    if pd.isna(value):
        return None
    # All samples are male based on background info
    return 1

# 3. Save Metadata
initial_validation = validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=(trait_row is not None)
)

# 4. Clinical Feature Extraction
if trait_row is not None:
    clinical_features = geo_select_clinical_features(
        clinical_data,
        trait=trait,
        trait_row=trait_row,
        convert_trait=convert_trait,
        age_row=age_row,
        convert_age=convert_age,
        gender_row=gender_row,
        convert_gender=convert_gender
    )
    
    # Preview the extracted features
    preview = preview_df(clinical_features)
    print("Preview of clinical features:")
    print(preview)
    
    # Save clinical data
    os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)
    clinical_features.to_csv(out_clinical_data_file)
# Extract gene expression data from the matrix file
genetic_data = get_genetic_data(matrix_file_path)

# Print the first 20 row IDs
print("First 20 row IDs (gene/probe identifiers):")
print(genetic_data.index[:20].tolist())
# The identifiers start with 'ENST' which indicates Ensembl transcript IDs 
# They need to be mapped to human gene symbols for downstream analysis
requires_gene_mapping = True
# Examine full content of SOFT file first to understand the annotation structure
with gzip.open(soft_file_path, 'rt') as f:
    for i, line in enumerate(f):
        if i < 100:  # Look at first 100 lines
            print(line.strip())
        else:
            break
            
# After examining file content, extract gene annotation with appropriate prefixes
gene_metadata = get_gene_annotation(soft_file_path, prefixes=['#', '!', '^', '@'])

# Print all column names
print("\nAll annotation columns:")
print(list(gene_metadata.columns))

# Preview contents with larger max_items
preview = preview_df(gene_metadata, max_items=1000)
print("\nGene annotation preview:")
print(preview)
# 1. Re-examine more contents of SOFT file
with gzip.open(soft_file_path, 'rt') as f:
    chip_annotation = ''
    reading = False
    for line in f:
        if '!platform_table_begin' in line:
            reading = True
            continue
        if reading:
            if '!platform_table_end' in line:
                break
            chip_annotation += line

# Read platform annotation into dataframe
annotation_df = pd.read_csv(io.StringIO(chip_annotation), sep='\t')
print("\nAvailable annotation columns:")
print(list(annotation_df.columns))

# Let's look at more rows to understand the annotation structure
print("\nFirst 10 rows of annotation:")
print(annotation_df.head(10))

# For this dataset, all we have are Ensembl transcript IDs
# As a workaround, we'll extract gene symbols from the transcript IDs themselves
def extract_gene_symbol(transcript_id):
    # Ensembl transcript IDs often start with gene symbol, e.g. ENST00000230882_CDC42_at
    parts = transcript_id.split('_')
    if len(parts) > 2:  # If format is ENST_GENE_at
        return parts[1]
    return transcript_id  # Return original if no embedded gene symbol

# Create mapping dataframe
mapping_data = pd.DataFrame({
    'ID': genetic_data.index,
    'Gene': [extract_gene_symbol(id) for id in genetic_data.index]
})

# Convert probe-level data to gene expression data
gene_data = apply_gene_mapping(genetic_data, mapping_data)

# Preview mapped gene data
print("\nFirst few genes and their expression values:")
print(preview_df(gene_data))

# Save gene expression data
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)
# 1. Skip gene symbol normalization as we'll work with transcript IDs
gene_data.index = gene_data.index.str.replace('_at', '')  # Clean up transcript IDs
gene_data.to_csv(out_gene_data_file)

# 2. Link clinical and genetic data
clinical_data = pd.read_csv(out_clinical_data_file, index_col=0)

# Debug sample ID alignment
print("Clinical data samples:", clinical_data.columns.tolist()[:5])
print("Genetic data samples:", gene_data.columns.tolist()[:5])

# Ensure sample IDs match by removing 'GSM' prefix if present
clinical_data.columns = clinical_data.columns.str.replace('GSM', '')
gene_data.columns = gene_data.columns.str.replace('GSM', '')

# Link the data
linked_data = geo_link_clinical_genetic_data(clinical_data, gene_data)

# Restore trait name which might have been lost during linking
linked_data = linked_data.rename(index={'Heart_rate': trait})

# 3. Handle missing values systematically 
linked_data = handle_missing_values(linked_data, trait)

# 4. Check bias in trait and demographic features
is_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)

# 5. Final validation and saving metadata
is_usable = validate_and_save_cohort_info(
    is_final=True,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=(trait_row is not None),
    is_biased=is_biased,
    df=linked_data,
    note="This dataset contains gene expression from skeletal muscle tissue measured with Ensembl transcript IDs. Gene symbol mapping was not possible. Gender is constant (all male). Heart rate measurement is available for most but not all samples."
)

# 6. Save linked data if usable
if is_usable:
    os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
    linked_data.to_csv(out_data_file)