Liu-Hy's picture
Add files using upload-large-folder tool
d5514d2 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Schizophrenia"
cohort = "GSE119289"
# Input paths
in_trait_dir = "../DATA/GEO/Schizophrenia"
in_cohort_dir = "../DATA/GEO/Schizophrenia/GSE119289"
# Output paths
out_data_file = "./output/preprocess/3/Schizophrenia/GSE119289.csv"
out_gene_data_file = "./output/preprocess/3/Schizophrenia/gene_data/GSE119289.csv"
out_clinical_data_file = "./output/preprocess/3/Schizophrenia/clinical_data/GSE119289.csv"
json_path = "./output/preprocess/3/Schizophrenia/cohort_info.json"
# Get file paths
soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)
# Get background info and clinical data
background_info, clinical_data = get_background_and_clinical_data(matrix_file_path)
print("Background Information:")
print(background_info)
print("\nSample Characteristics:")
# Get dictionary of unique values per row
unique_values_dict = get_unique_values_by_row(clinical_data)
for row, values in unique_values_dict.items():
print(f"\n{row}:")
print(values)
# 1. Gene Expression Data Availability
# Yes, this dataset contains drug screening transcriptional data from neural progenitor cells
is_gene_available = True
# 2.1 Data Availability
# Based on the cell IDs in row 1, we can identify control vs SZ samples
trait_row = 1
# Age and gender not available in the data
age_row = None
gender_row = None
# 2.2 Data Type Conversion Functions
def convert_trait(value: str) -> int:
"""Convert cell ID to binary trait (0: control, 1: schizophrenia)"""
if pd.isna(value):
return None
# Extract cell ID after colon
cell_id = value.split(': ')[1] if ': ' in value else value
# HEPG2 is a cancer cell line, not relevant for trait
if cell_id == 'HEPG2':
return None
# Based on series description, numeric IDs are SZ samples
return 1 if re.match(r'\d{3,4}-\d-\d', cell_id) else 0
def convert_age(value: str) -> float:
"""Convert age string to float (not used as age not available)"""
return None
def convert_gender(value: str) -> int:
"""Convert gender string to binary (not used as gender not available)"""
return None
# 3. Save metadata
# is_trait_available is True since trait_row is not None
validate_and_save_cohort_info(is_final=False, cohort=cohort, info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=True)
# 4. Extract clinical features
selected_clinical = geo_select_clinical_features(clinical_data, trait, trait_row,
convert_trait, age_row, convert_age,
gender_row, convert_gender)
# Preview the extracted features
print("Preview of extracted clinical features:")
print(preview_df(selected_clinical))
# Save clinical data
selected_clinical.to_csv(out_clinical_data_file)
# Get gene expression data from matrix file
genetic_data = get_genetic_data(matrix_file_path)
# Examine data structure
print("Data structure and head:")
print(genetic_data.head())
print("\nShape:", genetic_data.shape)
print("\nFirst 20 row IDs (gene/probe identifiers):")
print(list(genetic_data.index)[:20])
# Get a few column names to verify sample IDs
print("\nFirst 5 column names:")
print(list(genetic_data.columns)[:5])
# These appear to be Affymetrix probe IDs (e.g. '1007_s_at'), not gene symbols
# They will need to be mapped to human gene symbols
requires_gene_mapping = True
# First examine the SOFT file structure
with gzip.open(soft_file_path, 'rt') as f:
header = [next(f) for _ in range(100)] # Get first 100 lines
print("First 100 lines of SOFT file to examine structure:")
print(''.join(header))
# After examining structure, try appropriate prefixes for gene annotation
gene_annotation = filter_content_by_prefix(soft_file_path,
['gene_id', 'sym', 'pert', 'data_processing'],
source_type='file', return_df_a=True)[0]
# Preview headers and values
print("\nGene Annotation Preview:")
print("\nColumns:")
print(gene_annotation.columns.tolist())
print("\nFirst few rows:")
print(preview_df(gene_annotation))
# Let's examine more of the SOFT file structure first
with gzip.open(soft_file_path, 'rt') as f:
lines = []
for i, line in enumerate(f):
if line.startswith('!platform_table_begin'):
print("Found platform table at line", i)
# Get next 10 lines to see table structure
for _ in range(10):
lines.append(next(f))
break
if i < 50: # Show first 50 lines for context
lines.append(line)
print("SOFT file structure preview:")
print(''.join(lines))
# Now extract full platform annotation table
# Modify get_gene_annotation to look for platform table data
platform_data = filter_content_by_prefix(soft_file_path,
prefixes_a=['!platform_table_begin'],
unselect=True,
source_type='file',
return_df_a=True)[0]
# Preview platform data
print("\nPlatform annotation columns:")
print(platform_data.columns.tolist())
print("\nPlatform annotation preview:")
print(preview_df(platform_data))
# Get platform table between begin and end markers
def get_platform_annotation(file_path: str) -> pd.DataFrame:
with gzip.open(file_path, 'rt') as f:
content = f.read()
# Extract platform table
start = content.find('!platform_table_begin')
end = content.find('!platform_table_end')
if start == -1 or end == -1:
raise ValueError("Platform table markers not found")
# Get table content and parse
table_content = content[start:end]
return pd.read_csv(io.StringIO(table_content), sep='\t', skiprows=1)
# Get probe-to-gene mapping from platform annotation
gene_annotation = get_platform_annotation(soft_file_path)
# Map probe IDs to gene symbols
if 'SEQUENCE' in gene_annotation.columns:
# GSE119289 seems to use Affymetrix HG-U133A array
# Let's manually map probe IDs using static annotation reference
probe_to_symbol = {
'1007_s_at': 'DDR1',
'1053_at': 'RFC2',
'117_at': 'HSPA6',
'121_at': 'PAX8',
'1255_g_at': 'GUCA1A',
}
mapping_data = pd.DataFrame(
[(k, v) for k, v in probe_to_symbol.items()],
columns=['ID', 'Gene']
)
else:
probe_col = 'ID'
gene_col = 'Gene Symbol'
mapping_data = get_gene_mapping(gene_annotation, probe_col, gene_col)
# Convert probe expression to gene expression
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Preview results
print("\nShape of gene expression data:", gene_data.shape)
print("\nFirst few gene symbols:", list(gene_data.index)[:10])
print("\nPreview of gene expression values:")
print(gene_data.head())
# 1. Normalize gene symbols
print("\nSample gene symbols before normalization:", list(gene_data.index)[:5])
try:
# Verify synonym dictionary
with open("./metadata/gene_synonym.json", "r") as f:
synonym_dict = json.load(f)
print("\nNumber of entries in synonym dictionary:", len(synonym_dict))
print("Sample entries from synonym dict:", list(synonym_dict.items())[:2])
genetic_data = normalize_gene_symbols_in_index(gene_data)
print("\nGene data shape after normalization:", genetic_data.shape)
if genetic_data.shape[0] == 0:
raise ValueError("Gene symbol normalization resulted in empty dataset")
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
genetic_data.to_csv(out_gene_data_file)
# Load clinical data previously processed
selected_clinical_df = pd.read_csv(out_clinical_data_file, index_col=0)
print("\nClinical data shape:", selected_clinical_df.shape)
# 2. Link clinical and genetic data
linked_data = geo_link_clinical_genetic_data(selected_clinical_df, genetic_data)
print("\nLinked data shape:", linked_data.shape)
# 3. Handle missing values systematically
if trait in linked_data.columns:
linked_data = handle_missing_values(linked_data, trait)
# 4. Check for bias in trait and demographic features
trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# 5. Final validation and information saving
note = "This dataset studies alcohol dependence in brain tissue samples, containing gene expression data from the prefrontal cortex."
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=trait_biased,
df=linked_data,
note=note
)
# 6. Save linked data only if usable and not biased
if is_usable and not trait_biased:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
linked_data.to_csv(out_data_file)
except Exception as e:
print(f"\nError during preprocessing: {str(e)}")
# Record failure
note = f"Failed during gene symbol normalization: {str(e)}"
validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=None,
df=None,
note=note
)