Liu-Hy's picture
Add files using upload-large-folder tool
dc810d0 verified
raw
history blame
9.07 kB
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Schizophrenia"
cohort = "GSE193818"
# Input paths
in_trait_dir = "../DATA/GEO/Schizophrenia"
in_cohort_dir = "../DATA/GEO/Schizophrenia/GSE193818"
# Output paths
out_data_file = "./output/preprocess/1/Schizophrenia/GSE193818.csv"
out_gene_data_file = "./output/preprocess/1/Schizophrenia/gene_data/GSE193818.csv"
out_clinical_data_file = "./output/preprocess/1/Schizophrenia/clinical_data/GSE193818.csv"
json_path = "./output/preprocess/1/Schizophrenia/cohort_info.json"
# STEP1
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("Sample Characteristics Dictionary:")
print(sample_characteristics_dict)
import re
# 1. Determine if gene expression data is available
is_gene_available = True # Based on the background text, this dataset contains gene expression data
# 2.1 Identify keys for trait, age, and gender
# From the sample characteristics dictionary, all subjects have the same trait (Schizophrenia), so it's constant.
# Therefore, we consider trait not available.
trait_row = None
# For age, key=1
age_row = 1
# For gender, key=0
gender_row = 0
# 2.2 Define conversion functions
# Trait data is not available, but we still define a placeholder function.
def convert_trait(x: str):
# No actual data; return None
return None
def convert_age(x: str):
# Example input: "age: 30"
# Extract the part after "age: "
match = re.split(r'age:\s*', x, flags=re.IGNORECASE)
if len(match) > 1:
val = match[1].strip()
if val.upper() != "NA":
try:
return float(val)
except ValueError:
return None
else:
return None
return None
def convert_gender(x: str):
# Example input: "gender: Male"
# Extract the part after "gender: "
match = re.split(r'gender:\s*', x, flags=re.IGNORECASE)
if len(match) > 1:
val = match[1].strip().lower()
if val == "female":
return 0
elif val == "male":
return 1
else:
return None
return None
# 3. Conduct initial filtering and save metadata
is_trait_available = (trait_row is not None)
validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4. Clinical feature extraction: skip if trait_row is None
if trait_row is not None:
# If we had trait data, we would extract features. But since trait_row is None, we skip.
pass
# STEP3
import gzip
import pandas as pd
try:
# 1. Attempt to extract gene expression data using the library function
gene_data = get_genetic_data(matrix_file)
except KeyError:
# Fallback: the expected "ID_REF" column may be absent, so manually parse the file
# and rename the first column to "ID".
marker = "!series_matrix_table_begin"
skip_rows = None
# Determine how many rows to skip before the matrix data begins
with gzip.open(matrix_file, 'rt') as f:
for i, line in enumerate(f):
if marker in line:
skip_rows = i + 1
break
else:
raise ValueError(f"Marker '{marker}' not found in the file.")
# Read the data from the determined position
gene_data = pd.read_csv(
matrix_file,
compression='gzip',
skiprows=skip_rows,
comment='!',
delimiter='\t',
on_bad_lines='skip'
)
# If a different column name is used instead of 'ID_REF', rename appropriately
if 'ID_REF' in gene_data.columns:
gene_data.rename(columns={'ID_REF': 'ID'}, inplace=True)
else:
first_col = gene_data.columns[0]
gene_data.rename(columns={first_col: 'ID'}, inplace=True)
gene_data['ID'] = gene_data['ID'].astype(str)
gene_data.set_index('ID', inplace=True)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Observing the gene identifiers: They appear to be AFFX background control probes from an Affymetrix array.
# They do not appear to be standard human gene symbols.
# They require mapping to gene symbols.
requires_gene_mapping = True
# STEP5
# 1 & 2. Only extract and preview gene annotation data if the SOFT file exists, otherwise skip.
if soft_file is None:
print("No SOFT file found. Skipping gene annotation extraction.")
gene_annotation = pd.DataFrame()
else:
try:
# Attempt to extract gene annotation with the default method
gene_annotation = get_gene_annotation(soft_file)
except UnicodeDecodeError:
# Fallback if UTF-8 decoding fails: read with a more lenient encoding and pass the content as a string
import gzip
with gzip.open(soft_file, 'rt', encoding='latin-1', errors='replace') as f:
content = f.read()
gene_annotation = filter_content_by_prefix(
content,
prefixes_a=['^','!','#'],
unselect=True,
source_type='string',
return_df_a=True
)[0]
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP: Gene Identifier Mapping
# 1 & 2. Determine which columns to use for probe ID and gene symbol.
# From the preview, 'ID' matches the probe IDs in our gene_data index,
# and 'SPOT_ID.1' contains the gene symbol information.
probe_col = 'ID'
symbol_col = 'SPOT_ID.1'
# Create the mapping dataframe
mapping_df = get_gene_mapping(gene_annotation, prob_col=probe_col, gene_col=symbol_col)
# 3. Convert probe-level to gene-level data
gene_data = apply_gene_mapping(gene_data, mapping_df)
# Let's inspect the resulting gene_data shape and a small portion of its index
print("Mapped gene_data shape:", gene_data.shape)
print("First 20 gene symbols in the mapped gene_data index:")
print(gene_data.index[:20].tolist())
import os
import pandas as pd
# STEP 7: Data Normalization and Linking
# First, check if the clinical CSV file exists. If it does not, we cannot proceed with trait-based linking.
if not os.path.exists(out_clinical_data_file):
# No trait data file => dataset is not usable for trait analysis
df_null = pd.DataFrame()
is_biased = True # Arbitrary boolean to satisfy function requirement
validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=is_biased,
df=df_null,
note="No trait data file found; dataset not usable for trait analysis."
)
else:
# 1. Normalize the mapped gene expression data using known gene symbol synonyms, then save.
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)
# 2. Load the previously extracted clinical CSV.
selected_clinical_df = pd.read_csv(out_clinical_data_file)
# If we had a single-row trait, rename row 0 to the trait name (example usage).
selected_clinical_df = selected_clinical_df.rename(index={0: trait})
# Combine these as our final clinical data; in this dataset, we only have trait info (if any).
combined_clinical_df = selected_clinical_df
# Link the clinical and genetic data by matching sample IDs in columns.
linked_data = geo_link_clinical_genetic_data(combined_clinical_df, normalized_gene_data)
# 3. Handle missing values in the linked data (drop incomplete rows/columns, then impute).
processed_data = handle_missing_values(linked_data, trait)
# 4. Check trait bias and remove any biased demographic features (if any).
trait_biased, processed_data = judge_and_remove_biased_features(processed_data, trait)
# 5. Final validation and metadata saving.
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=trait_biased,
df=processed_data,
note="Completed trait-based preprocessing."
)
# 6. If final dataset is usable, save. Otherwise, skip.
if is_usable:
processed_data.to_csv(out_data_file)