Liu-Hy's picture
Add files using upload-large-folder tool
f426016 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Angelman_Syndrome"
cohort = "GSE43900"
# Input paths
in_trait_dir = "../DATA/GEO/Angelman_Syndrome"
in_cohort_dir = "../DATA/GEO/Angelman_Syndrome/GSE43900"
# Output paths
out_data_file = "./output/preprocess/1/Angelman_Syndrome/GSE43900.csv"
out_gene_data_file = "./output/preprocess/1/Angelman_Syndrome/gene_data/GSE43900.csv"
out_clinical_data_file = "./output/preprocess/1/Angelman_Syndrome/clinical_data/GSE43900.csv"
json_path = "./output/preprocess/1/Angelman_Syndrome/cohort_info.json"
# STEP 1
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(
matrix_file,
background_prefixes,
clinical_prefixes
)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("\nSample Characteristics Dictionary:")
print(sample_characteristics_dict)
# 1. Determine if the dataset likely contains gene expression data
is_gene_available = True # From background info, it appears to focus on gene regulation, so we assume gene expression data
# 2. Variable Availability and Data Type Conversion
# According to the sample characteristics dictionary, we only have:
# 0: treatment information,
# 1: cell type, and
# 2: strain.
# None indicates that the dataset does not provide suitable human trait, age, or gender info.
trait_row = None
age_row = None
gender_row = None
# Define the conversion functions (though they won't be used if rows are None).
def convert_trait(x: str) -> int:
# No actual data available, placeholder implementation
return None
def convert_age(x: str) -> float:
# No actual data available, placeholder implementation
return None
def convert_gender(x: str) -> int:
# No actual data available, placeholder implementation
return None
# 3. Save Metadata (initial filtering)
is_trait_available = (trait_row is not None)
validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4. Clinical Feature Extraction (skip because trait_row is None)
# No action needed as trait_row is None
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Based on observation, the identifiers are numeric probe IDs and do not appear to be standard human gene symbols.
# Therefore, gene mapping is required.
print("requires_gene_mapping = True")
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP: Gene Identifier Mapping
# 1. Decide the columns for gene identifier (probe) and gene symbol based on the preview.
probe_col = "ID"
gene_symbol_col = "Gene Symbol"
# 2. Get the mapping dataframe from the annotation.
mapping_df = get_gene_mapping(gene_annotation, probe_col, gene_symbol_col)
# 3. Convert probe-level measurements to gene-level expression data.
gene_data = apply_gene_mapping(gene_data, mapping_df)
# Display the first few gene symbols to confirm the result.
print(gene_data.index[:20])
# STEP 7: Data Normalization and Linking
# 1. Normalize gene symbols in the obtained gene expression data
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)
print(f"Saved normalized gene data to {out_gene_data_file}")
# 2. Check if the trait is available in this dataset
if trait_row is None:
# If the trait does not exist, we do not finalize; we do an initial validation
# so that the library won't require 'df' and 'is_biased'.
validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=True, # Genetic data is present
is_trait_available=False # Trait data is not available
)
print("Trait data not available. Only gene expression data was processed. No final data to save.")
else:
# 3. Since trait is available, link the clinical and genetic data on sample IDs
selected_clinical_df = geo_select_clinical_features(
clinical_data,
trait,
trait_row,
convert_trait,
age_row,
convert_age,
gender_row,
convert_gender
)
linked_data = geo_link_clinical_genetic_data(selected_clinical_df, normalized_gene_data)
# 4. Handle missing values as instructed
linked_data = handle_missing_values(linked_data, trait)
# 5. Determine whether the trait is severely biased
trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# 6. Conduct final quality validation and save metadata
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=trait_biased,
df=linked_data,
note="Cohort data successfully processed with trait-based analysis."
)
# 7. If the dataset is usable, save the final linked data
if is_usable:
linked_data.to_csv(out_data_file, index=True)
print(f"Saved final linked data to {out_data_file}")
else:
print("The dataset is not usable for trait-based association. Skipping final output.")