Liu-Hy's picture
Add files using upload-large-folder tool
5c59ea7 verified
raw
history blame
8 kB
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Chronic_obstructive_pulmonary_disease_(COPD)"
cohort = "GSE210272"
# Input paths
in_trait_dir = "../DATA/GEO/Chronic_obstructive_pulmonary_disease_(COPD)"
in_cohort_dir = "../DATA/GEO/Chronic_obstructive_pulmonary_disease_(COPD)/GSE210272"
# Output paths
out_data_file = "./output/preprocess/1/Chronic_obstructive_pulmonary_disease_(COPD)/GSE210272.csv"
out_gene_data_file = "./output/preprocess/1/Chronic_obstructive_pulmonary_disease_(COPD)/gene_data/GSE210272.csv"
out_clinical_data_file = "./output/preprocess/1/Chronic_obstructive_pulmonary_disease_(COPD)/clinical_data/GSE210272.csv"
json_path = "./output/preprocess/1/Chronic_obstructive_pulmonary_disease_(COPD)/cohort_info.json"
# STEP1
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("Sample Characteristics Dictionary:")
print(sample_characteristics_dict)
# Step 1: Determine if gene expression data is available
# From the background info: "mRNA expression was profiled using Affymetrix Human Gene 1.0 ST Arrays."
# => This dataset indeed contains gene expression data (not purely miRNA or methylation).
is_gene_available = True
# Step 2: Check availability of variables (trait, age, gender) and define converters
# 2.1 - Identify rows for each variable and check if they're constant or missing
# The trait is COPD. We do NOT see an explicit row for COPD status in the sample characteristics,
# nor a consistent way to infer presence/absence from the dictionary.
# Even though FEV1% predicted is relevant clinically, it does not provide a clear "has vs. no COPD" label,
# and the background suggests "with and without COPD" but does not give a direct classification here.
# Hence, treat trait data as unavailable.
trait_row = None
# Age data looks available in row 2, with multiple distinct values (57.6, 61, 66.3, etc.).
age_row = 2
# Gender data is available in row 1, with two distinct values ("Male", "Female").
gender_row = 1
# 2.2 - Define data type and converters
def convert_trait(raw_value: str) -> int:
# Not used since trait_row is None, but define for completeness.
return None
def convert_age(raw_value: str):
# Example: "age: 57.6" -> 57.6 as float, unknown -> None
parts = raw_value.split(":")
if len(parts) < 2:
return None
try:
return float(parts[1].strip())
except ValueError:
return None
def convert_gender(raw_value: str):
# Example: "Sex: Male" -> 1, "Sex: Female" -> 0, otherwise -> None
parts = raw_value.split(":")
if len(parts) < 2:
return None
val = parts[1].strip().lower()
if val == "male":
return 1
elif val == "female":
return 0
return None
# Step 3: Save metadata with initial filtering
# Trait data availability is determined by whether trait_row is None
is_trait_available = (trait_row is not None)
is_usable = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# Step 4: Since trait_row is None, we SKIP the clinical feature extraction step.
# (No extraction or CSV export for the trait variable.)
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
print("These gene identifiers appear to be Ensembl transcript IDs, not standard human gene symbols.")
print("requires_gene_mapping = True")
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP 6: Gene Identifier Mapping
# 1. Observe that the "ID" column in gene_annotation matches the expression DataFrame index
# (e.g., "ENSG00000000003_at"), and "ORF" holds the Ensembl gene IDs (e.g., "ENSG00000000003").
mapping_df = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='ORF')
# We'll define a custom function similar to the library's apply_gene_mapping,
# but preserving Ensembl IDs instead of extracting standard gene symbols:
def apply_ensembl_mapping(expression_df: pd.DataFrame, mapping_df: pd.DataFrame) -> pd.DataFrame:
# Keep only the mapping rows that match expression_df's index
mapping_df = mapping_df[mapping_df['ID'].isin(expression_df.index)].copy()
mapping_df.drop_duplicates(subset=['ID', 'Gene'], inplace=True)
# Convert the 'Gene' column into list form (in case of multiple IDs in a single row)
# Here, we assume each row typically contains a single Ensembl ID string.
# Still, we handle multi-mapping if present.
mapping_df['Gene'] = mapping_df['Gene'].apply(lambda x: [x.strip()] if isinstance(x, str) else [])
# Count how many genes each probe maps to
mapping_df['num_genes'] = mapping_df['Gene'].apply(len)
# "explode" turns each list element into its own row
mapping_df = mapping_df.explode('Gene')
mapping_df.dropna(subset=['Gene'], inplace=True)
# Merge with expression data
mapping_df.set_index('ID', inplace=True)
merged_df = mapping_df.join(expression_df)
# Distribute expression by the number of genes when a probe maps to multiple genes
expr_cols = [col for col in merged_df.columns if col not in ['Gene', 'num_genes']]
merged_df[expr_cols] = merged_df[expr_cols].div(merged_df['num_genes'].replace(0, 1), axis=0)
# Sum values for each gene
gene_expression_df = merged_df.groupby('Gene')[expr_cols].sum()
return gene_expression_df
# 2. Convert probe-level data into Ensembl gene-level data
gene_data = apply_ensembl_mapping(gene_data, mapping_df)
# Print out the final shape of the mapped gene_data
print(f"Mapped gene_data shape: {gene_data.shape}")
# STEP 7
import pandas as pd
# 1. Normalize the obtained gene data with the 'normalize_gene_symbols_in_index' function from the library.
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)
# Since the trait is unavailable (trait_row=None), we can't link clinical data or evaluate trait bias properly.
# However, the library function validate_and_save_cohort_info requires 'df' and 'is_biased' when is_final=True.
# We'll provide an empty DataFrame and set is_biased to True to mark the dataset as unusable.
empty_df = pd.DataFrame()
is_biased = True
# 5. Conduct final quality validation, indicating the dataset is not usable without trait data.
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=is_biased,
df=empty_df,
note="No trait data available; final validation indicates dataset is not usable for trait analysis."
)
# 6. Because the dataset is not usable, we do not save any further data.
if is_usable:
pass