Liu-Hy's picture
Add files using upload-large-folder tool
3088323 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Cardiovascular_Disease"
cohort = "GSE182600"
# Input paths
in_trait_dir = "../DATA/GEO/Cardiovascular_Disease"
in_cohort_dir = "../DATA/GEO/Cardiovascular_Disease/GSE182600"
# Output paths
out_data_file = "./output/preprocess/1/Cardiovascular_Disease/GSE182600.csv"
out_gene_data_file = "./output/preprocess/1/Cardiovascular_Disease/gene_data/GSE182600.csv"
out_clinical_data_file = "./output/preprocess/1/Cardiovascular_Disease/clinical_data/GSE182600.csv"
json_path = "./output/preprocess/1/Cardiovascular_Disease/cohort_info.json"
# STEP1
from tools.preprocess import *
# 1. Attempt to identify the paths to the SOFT file and the matrix file
try:
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
except AssertionError:
print("[WARNING] Could not find the expected '.soft' or '.matrix' files in the directory.")
soft_file, matrix_file = None, None
if soft_file is None or matrix_file is None:
print("[ERROR] Required GEO files are missing. Please check file names in the cohort directory.")
else:
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(matrix_file,
background_prefixes,
clinical_prefixes)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("\nSample Characteristics Dictionary:")
print(sample_characteristics_dict)
# Step 1: Determine if the dataset likely contains gene expression data
is_gene_available = True # Based on the background info "genome-wide gene expression"
# Step 2: Variable Availability and Data Type Conversion
# From the sample characteristics dictionary, all subjects have some form of cardiovascular disease.
# Therefore, there is no variation for the trait "Cardiovascular_Disease." We mark it as not available.
trait_row = None
# Age is found at key=1 with multiple distinct values
age_row = 1
# Gender is found at key=2 with both F and M
gender_row = 2
# Data type conversions
def convert_trait(value: str):
# Not used because trait_row = None
return None
def convert_age(value: str):
# Example value: "age: 33.4"
# Parse the substring after the colon and convert to float
try:
val_str = value.split(":", 1)[1].strip()
return float(val_str)
except:
return None
def convert_gender(value: str):
# Example value: "gender: F"
# Parse and convert "F" to 0 and "M" to 1
try:
val_str = value.split(":", 1)[1].strip().upper()
if val_str.startswith("F"):
return 0
elif val_str.startswith("M"):
return 1
else:
return None
except:
return None
# Step 3: Conduct initial filtering on dataset usability and save metadata
is_trait_available = (trait_row is not None)
is_usable = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# Step 4: Since trait_row is None, we skip clinical feature extraction
# STEP3
# Attempt to read gene expression data; if the library function yields an empty DataFrame,
# try re-reading without ignoring lines that start with '!' (because sometimes GEO data may
# place actual expression rows under lines that begin with '!').
gene_data = get_genetic_data(matrix_file)
if gene_data.empty:
print("[WARNING] The gene_data is empty. Attempting alternative loading without treating '!' as comments.")
import gzip
# Locate the marker line first
skip_rows = 0
with gzip.open(matrix_file, 'rt') as file:
for i, line in enumerate(file):
if "!series_matrix_table_begin" in line:
skip_rows = i + 1
break
# Read the data again, this time not treating '!' as comment
gene_data = pd.read_csv(
matrix_file,
compression="gzip",
skiprows=skip_rows,
delimiter="\t",
on_bad_lines="skip"
)
gene_data = gene_data.rename(columns={"ID_REF": "ID"}).astype({"ID": "str"})
gene_data.set_index("ID", inplace=True)
# Print the first 20 row IDs to confirm data structure
print(gene_data.index[:20])
# The gene identifiers shown (ILMN_XXXXXXX) appear to be Illumina probe IDs, not standard human gene symbols.
print("They appear to be Illumina probe IDs (ILMN identifiers), not standard gene symbols.\nrequires_gene_mapping = True")
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP: Gene Identifier Mapping
# 1. Identify the columns from the annotation that correspond to the probe IDs vs. the actual gene symbols.
# From the preview, the 'ID' column in 'gene_annotation' matches the same ILMN_xxxx IDs in gene_data,
# and the 'Symbol' column provides the gene symbol for each probe.
prob_col = "ID"
gene_col = "Symbol"
# 2. Get the gene mapping DataFrame by extracting the two relevant columns from the gene annotation
mapping_df = get_gene_mapping(gene_annotation, prob_col, gene_col)
# 3. Convert probe-level measurements to gene expression values using the mapping
gene_data = apply_gene_mapping(gene_data, mapping_df)
# Verify the shape or preview the first few rows if needed
print("Gene expression data after mapping:", gene_data.shape)
print(gene_data.head(5))
import os
import pandas as pd
# STEP7: Data Normalization and Linking
# 1) Normalize the gene symbols in the previously obtained gene_data
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)
# 2) Load clinical data only if it exists and is non-empty
if os.path.exists(out_clinical_data_file) and os.path.getsize(out_clinical_data_file) > 0:
# Read the file
clinical_temp = pd.read_csv(out_clinical_data_file)
# Adjust row index to label the trait, age, and gender properly
if clinical_temp.shape[0] == 3:
clinical_temp.index = [trait, "Age", "Gender"]
elif clinical_temp.shape[0] == 2:
clinical_temp.index = [trait, "Gender"]
elif clinical_temp.shape[0] == 1:
clinical_temp.index = [trait]
# 2) Link the clinical and normalized genetic data
linked_data = geo_link_clinical_genetic_data(clinical_temp, normalized_gene_data)
# 3) Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# 4) Check for severe bias in the trait; remove biased demographic features if present
trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# 5) Final quality validation and save metadata
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=trait_biased,
df=linked_data,
note=f"Final check on {cohort} with {trait}."
)
# 6) If the linked data is usable, save it
if is_usable:
linked_data.to_csv(out_data_file)
else:
# If no valid clinical data file is found, finalize metadata indicating trait unavailability
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=True, # Force a fallback so that it's flagged as unusable
df=pd.DataFrame(),
note=f"No trait data found for {cohort}, final metadata recorded."
)
# Per instructions, do not save a final linked data file when trait data is absent.