File size: 6,666 Bytes
187fbda |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Arrhythmia"
cohort = "GSE55231"
# Input paths
in_trait_dir = "../DATA/GEO/Arrhythmia"
in_cohort_dir = "../DATA/GEO/Arrhythmia/GSE55231"
# Output paths
out_data_file = "./output/preprocess/1/Arrhythmia/GSE55231.csv"
out_gene_data_file = "./output/preprocess/1/Arrhythmia/gene_data/GSE55231.csv"
out_clinical_data_file = "./output/preprocess/1/Arrhythmia/clinical_data/GSE55231.csv"
json_path = "./output/preprocess/1/Arrhythmia/cohort_info.json"
# STEP 1
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(
matrix_file,
background_prefixes,
clinical_prefixes
)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("\nSample Characteristics Dictionary:")
print(sample_characteristics_dict)
# 1. Determine if gene expression data is available
is_gene_available = True # Based on study description (eQTL analysis, transcription profiling)
# 2. Identify variable availability
# Trait "Arrhythmia" is not listed in the sample characteristics, so treat it as not available.
trait_row = None
# Age is provided under key 2
age_row = 2
# Gender is provided under key 0
gender_row = 0
# 2.2 Define conversion functions
def convert_trait(value: str):
# Trait data is not available. Return None for all inputs.
return None
def convert_age(value: str):
# Parse the string after colon and convert to float if possible
parts = value.split(':', 1)
raw = parts[1].strip() if len(parts) > 1 else parts[0].strip()
try:
return float(raw)
except ValueError:
return None
def convert_gender(value: str):
# Parse the string after colon and convert to binary (female=0, male=1)
parts = value.split(':', 1)
raw = parts[1].strip().lower() if len(parts) > 1 else parts[0].strip().lower()
if raw == 'female':
return 0
elif raw == 'male':
return 1
return None
# 3. Initial usability filtering and metadata saving
is_trait_available = (trait_row is not None)
cohort_usable = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4. Since trait_row is None, skip clinical feature extraction.
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Based on observation, the "ILMN_" prefix indicates Illumina probe IDs, not standard human gene symbols.
# Therefore, they require mapping to gene symbols.
print("These identifiers are Illumina probe IDs.\nrequires_gene_mapping = True")
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP: Gene Identifier Mapping
# 1. Identify the columns in gene_annotation that match the probe ID and gene symbol
probe_col = 'ID'
gene_symbol_col = 'Symbol'
# 2. Create the gene mapping dataframe
gene_mapping_df = get_gene_mapping(gene_annotation, prob_col=probe_col, gene_col=gene_symbol_col)
# 3. Convert probe-level measurements to gene-level expression data
gene_data = apply_gene_mapping(gene_data, gene_mapping_df)
# Just for a brief preview, let's check the resulting shape
print("Mapped gene_data shape:", gene_data.shape)
# STEP 7: Data Normalization and Linking
# 1. Normalize gene symbols in the obtained gene expression data
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)
print(f"Saved normalized gene data to {out_gene_data_file}")
# 2. Check if we have a clinical dataframe called 'selected_clinical_df' (which only exists if trait_row was not None)
if 'selected_clinical_df' in globals():
# We have trait data, so we can link and proceed with the final steps.
selected_clinical = selected_clinical_df
# 3. Link the clinical and genetic data on sample IDs
linked_data = geo_link_clinical_genetic_data(selected_clinical, normalized_gene_data)
# 4. Handle missing values, removing or imputing as instructed
linked_data = handle_missing_values(linked_data, trait)
# 5. Determine whether the trait is severely biased.
trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# 6. Conduct final quality validation and save metadata
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=trait_biased,
df=linked_data,
note="Cohort data successfully processed with trait-based analysis."
)
# 7. If the dataset is usable, save the final linked data
if is_usable:
linked_data.to_csv(out_data_file, index=True)
print(f"Saved final linked data to {out_data_file}")
else:
print("The dataset is not usable for trait-based association. Skipping final output.")
else:
# Trait data was not extracted in Step 2 (trait_row was None), so no clinical linking or bias checks.
print("No trait data found. Skipping linking, missing value handling, and trait bias analysis.")
# Perform an initial metadata save (not final) since we lack a trait.
is_usable = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False
)
# Without trait data, this dataset won't move forward to final association analysis
print("No final output generated due to missing trait data.") |