File size: 6,024 Bytes
f426016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Arrhythmia"
cohort = "GSE41177"

# Input paths
in_trait_dir = "../DATA/GEO/Arrhythmia"
in_cohort_dir = "../DATA/GEO/Arrhythmia/GSE41177"

# Output paths
out_data_file = "./output/preprocess/1/Arrhythmia/GSE41177.csv"
out_gene_data_file = "./output/preprocess/1/Arrhythmia/gene_data/GSE41177.csv"
out_clinical_data_file = "./output/preprocess/1/Arrhythmia/clinical_data/GSE41177.csv"
json_path = "./output/preprocess/1/Arrhythmia/cohort_info.json"

# STEP 1

from tools.preprocess import *

# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(
    matrix_file, 
    background_prefixes, 
    clinical_prefixes
)

# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)

# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("\nSample Characteristics Dictionary:")
print(sample_characteristics_dict)
# 1. Gene Expression Data Availability
# Based on the background info ("microarray analysis..."), we consider that
# this dataset likely contains gene expression data.
is_gene_available = True

# 2.1 Data Availability
# The trait (arrhythmia) appears to be constant in all samples (all have AF),
# hence it's not useful for association studies.
trait_row = None
# The 'age' variable is found at key=2 with multiple distinct values.
age_row = 2
# The 'gender' variable is found at key=1 with multiple distinct values.
gender_row = 1

# 2.2 Data Type Conversion
def convert_trait(value: str):
    # The trait is not actually available (constant across all samples),
    # so we return None here.
    return None

def convert_age(value: str):
    # Example entry: "age: 62Y"
    # We parse the substring after ':' then remove 'Y' and convert to float.
    try:
        parts = value.split(':', 1)
        age_str = parts[1].replace('Y', '').strip() if len(parts) > 1 else ''
        return float(age_str)
    except:
        return None

def convert_gender(value: str):
    # Example entry: "gender: male"
    # Convert female -> 0, male -> 1
    parts = value.split(':', 1)
    g_str = parts[1].strip().lower() if len(parts) > 1 else ''
    if g_str == 'male':
        return 1
    elif g_str == 'female':
        return 0
    return None

# 3. Save Metadata (Initial Filtering)
# trait_row is None => trait data is not available
is_trait_available = (trait_row is not None)
is_usable = validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=is_trait_available
)

# 4. Clinical Feature Extraction
# Since trait_row is None, we skip the clinical feature extraction step.
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)

# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Based on the given probe IDs (e.g., "1007_s_at", "1053_at"), they are Affymetrix probe set IDs and not human gene symbols.
# Therefore, gene mapping to human gene symbols is required.

print("requires_gene_mapping = True")
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)

# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP: Gene Identifier Mapping

# 1. Identify the columns corresponding to the probe identifiers and the gene symbols
#    From the annotation preview, the 'ID' column matches the probe identifiers in our gene_data,
#    and the 'Gene Symbol' column stores the gene symbols.

probe_col = "ID"
symbol_col = "Gene Symbol"

# 2. Extract the gene mapping DataFrame using the library function
gene_mapping_df = get_gene_mapping(gene_annotation, probe_col, symbol_col)

# 3. Convert probe-level measurements to gene-level expression data
gene_data = apply_gene_mapping(gene_data, gene_mapping_df)
# STEP 7: Data Normalization and Linking

import pandas as pd

# 1. Normalize gene symbols in the obtained gene expression data
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)
print(f"Saved normalized gene data to {out_gene_data_file}")

# Since trait_row is None (trait is not available), we cannot link clinical data or perform trait-based analysis.
# We'll skip linking and bias evaluation of the trait.

# We'll still perform the final validation to record that this cohort lacks trait data.
placeholder_df = pd.DataFrame()  # Empty placeholder
is_usable = validate_and_save_cohort_info(
    is_final=True,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=True,
    is_trait_available=False,  # trait not available
    is_biased=False,           # No trait → can't evaluate trait bias, set to False to proceed
    df=placeholder_df,         # Required argument, though empty
    note="No trait data available in the cohort."
)

# If the dataset is usable (unlikely since trait is missing), we would save final linked data. 
if is_usable:
    # Normally we would have a "linked_data" DataFrame to save; 
    # however, there's no trait, so no final data is produced.
    pass
else:
    print("Trait not available; skipping final data linkage and output.")