File size: 6,559 Bytes
f426016 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Arrhythmia"
cohort = "GSE115574"
# Input paths
in_trait_dir = "../DATA/GEO/Arrhythmia"
in_cohort_dir = "../DATA/GEO/Arrhythmia/GSE115574"
# Output paths
out_data_file = "./output/preprocess/1/Arrhythmia/GSE115574.csv"
out_gene_data_file = "./output/preprocess/1/Arrhythmia/gene_data/GSE115574.csv"
out_clinical_data_file = "./output/preprocess/1/Arrhythmia/clinical_data/GSE115574.csv"
json_path = "./output/preprocess/1/Arrhythmia/cohort_info.json"
# STEP 1
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(
matrix_file,
background_prefixes,
clinical_prefixes
)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("\nSample Characteristics Dictionary:")
print(sample_characteristics_dict)
# Step 1: Determine if gene expression data is available
is_gene_available = True # Based on the background info (Affymetrix human gene expression microarrays)
# Step 2: Identify data availability and define conversion functions
# After reviewing the sample characteristics dictionary:
# {0: ['disease state: atrial fibrillation patient with severe mitral regurgitation',
# 'disease state: sinus rhythm patient with severe mitral regurgitation'],
# 1: ['tissue: left atrium - heart',
# 'tissue: right atrium - heart']}
# The "trait" 'Arrhythmia' can be inferred from row 0, which distinguishes AFib vs. sinus rhythm.
trait_row = 0
# Age and gender data are not apparent in the dictionary, so set them to None.
age_row = None
gender_row = None
def convert_trait(value):
"""
Convert the disease state string to a binary indicator for arrhythmia (AFib).
Return 1 if the string indicates atrial fibrillation, 0 if sinus rhythm, else None.
"""
try:
after_colon = value.split(':', 1)[1].strip().lower()
except IndexError:
return None
if 'atrial fibrillation' in after_colon:
return 1
elif 'sinus rhythm' in after_colon:
return 0
return None
def convert_age(value):
"""
Age data is not available in this dataset. Return None.
"""
return None
def convert_gender(value):
"""
Gender data is not available in this dataset. Return None.
"""
return None
# Step 3: Conduct initial filtering and save metadata
is_trait_available = (trait_row is not None)
is_usable = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# Step 4: If trait data is available, extract clinical features and save
if trait_row is not None:
selected_clinical_df = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
preview_result = preview_df(selected_clinical_df, n=5)
selected_clinical_df.to_csv(out_clinical_data_file, index=False)
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# These identifiers (e.g., '1007_s_at', '1053_at') are Affymetrix probe IDs, not standard gene symbols.
# Therefore, mapping to gene symbols is required.
requires_gene_mapping = True
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# Gene Identifier Mapping
# 1. Identify the columns in 'gene_annotation' that match the probe IDs in 'gene_data' and the gene symbols.
# From the preview, the probe ID is stored in column "ID", and gene symbols are stored in "Gene Symbol".
# 2. Extract the gene mapping dataframe.
mapping_df = get_gene_mapping(gene_annotation, prob_col="ID", gene_col="Gene Symbol")
# 3. Convert probe-level measurements to gene expression data.
gene_data = apply_gene_mapping(gene_data, mapping_df)
# For verification, let's print the new gene_data shape and a sample of its row indices.
print("Mapped gene_data shape:", gene_data.shape)
print("Sample gene symbols:", list(gene_data.index[:10]))
# STEP 7: Data Normalization and Linking
# 1. Normalize gene symbols in the obtained gene expression data
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)
print(f"Saved normalized gene data to {out_gene_data_file}")
# 2. Link the clinical and genetic data on sample IDs
linked_data = geo_link_clinical_genetic_data(selected_clinical_df, normalized_gene_data)
# 3. Handle missing values in the linked data
linked_data = handle_missing_values(linked_data, trait_col=trait)
# 4. Determine whether the trait/demographic features are severely biased
trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait=trait)
# 5. Conduct final quality validation and save metadata
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=trait_biased,
df=linked_data,
note="Trait data and gene data successfully linked."
)
# 6. If the dataset is deemed usable, save the final linked data as a CSV file
if is_usable:
linked_data.to_csv(out_data_file)
print(f"Saved final linked data to {out_data_file}")
else:
print("Dataset was not deemed usable; final linked data not saved.") |