{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "b705789f", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Hutchinson-Gilford_Progeria_Syndrome\"\n", "cohort = \"GSE84351\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Hutchinson-Gilford_Progeria_Syndrome\"\n", "in_cohort_dir = \"../../input/GEO/Hutchinson-Gilford_Progeria_Syndrome/GSE84351\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Hutchinson-Gilford_Progeria_Syndrome/GSE84351.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Hutchinson-Gilford_Progeria_Syndrome/gene_data/GSE84351.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Hutchinson-Gilford_Progeria_Syndrome/clinical_data/GSE84351.csv\"\n", "json_path = \"../../output/preprocess/Hutchinson-Gilford_Progeria_Syndrome/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "014d9416", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "06c393a6", "metadata": {}, "outputs": [], "source": [ "from tools.preprocess import *\n", "# 1. Identify the paths to the SOFT file and the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Read the matrix file to obtain background information and sample characteristics data\n", "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", "\n", "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n", "print(\"Background Information:\")\n", "print(background_info)\n", "print(\"Sample Characteristics Dictionary:\")\n", "print(sample_characteristics_dict)\n" ] }, { "cell_type": "markdown", "id": "290cf860", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "5fe84214", "metadata": {}, "outputs": [], "source": [ "import os\n", "import json\n", "import pandas as pd\n", "import numpy as np\n", "from typing import Optional, Callable, Dict, Any, List\n", "\n", "# Check for gene expression availability\n", "# This is gene expression data from Affymetrix platform, so it likely contains gene expression data\n", "is_gene_available = True\n", "\n", "# Define rows for trait, age, and gender\n", "trait_row = 2 # 'condition: HGPS' or 'condition: Normal'\n", "age_row = None # Age data not available\n", "gender_row = 0 # 'Sex: Male', 'Sex: Female', 'Sex: ?'\n", "\n", "# Define conversion functions for each variable\n", "def convert_trait(value):\n", " \"\"\"Convert HGPS trait status to binary format.\"\"\"\n", " if value is None or (isinstance(value, float) and np.isnan(value)):\n", " return None\n", " \n", " # Convert to string to ensure we can work with it\n", " value = str(value)\n", " \n", " # Extract the value after the colon\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " if value.lower() == 'hgps':\n", " return 1 # HGPS positive\n", " elif value.lower() == 'normal':\n", " return 0 # HGPS negative\n", " else:\n", " return None # Unknown value\n", "\n", "def convert_gender(value):\n", " \"\"\"Convert gender to binary format: female=0, male=1.\"\"\"\n", " if value is None or (isinstance(value, float) and np.isnan(value)):\n", " return None\n", " \n", " # Convert to string to ensure we can work with it\n", " value = str(value)\n", " \n", " # Extract the value after the colon\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " if value.lower() == 'male':\n", " return 1\n", " elif value.lower() == 'female':\n", " return 0\n", " else:\n", " return None # Unknown or missing value\n", "\n", "# Determine if trait data is available\n", "is_trait_available = trait_row is not None\n", "\n", "# Validate and save cohort info (initial filtering)\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# If trait data is available, extract clinical features\n", "if is_trait_available:\n", " # Create a DataFrame from the sample characteristics dictionary\n", " sample_characteristics = {\n", " 0: ['Sex: Male', 'Sex: Female', 'Sex: ?'], \n", " 1: ['cell line: HGADFN003', 'cell line: HGMDFN090', 'cell line: HGADFN167', 'cell line: HGFDFN168', 'cell line: AG01972', 'cell line: BJ1', 'cell line: H9'], \n", " 2: ['condition: HGPS', 'condition: Normal'], \n", " 3: ['cell type: iPSC', 'cell type: Vascular Smooth Muscle', 'cell type: Fibroblast', 'cell type: Embryonic Stem Cell']\n", " }\n", " \n", " # Determine the number of samples (use max length of values in any row)\n", " max_samples = max(len(values) for values in sample_characteristics.values())\n", " \n", " # Create a properly formatted clinical DataFrame \n", " # The format expected by geo_select_clinical_features is:\n", " # - Rows represent features (like trait, gender)\n", " # - Columns represent samples\n", " clinical_data = pd.DataFrame(index=range(len(sample_characteristics)), \n", " columns=[f'Sample_{i+1}' for i in range(max_samples)])\n", " \n", " # Fill in the DataFrame with the available values\n", " for row_idx, values in sample_characteristics.items():\n", " for col_idx, value in enumerate(values):\n", " if col_idx < max_samples:\n", " clinical_data.iloc[row_idx, col_idx] = value\n", " \n", " # Extract clinical features using the library function\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the resulting dataframe\n", " preview = preview_df(selected_clinical_df)\n", " print(\"Clinical data preview:\", preview)\n", " \n", " # Save the extracted clinical data\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " selected_clinical_df.to_csv(out_clinical_data_file, index=False)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "1f65f9b2", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "bf370d56", "metadata": {}, "outputs": [], "source": [ "# 1. Get the file paths for the SOFT file and matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Extract gene expression data from the matrix file\n", "try:\n", " print(\"Extracting gene data from matrix file:\")\n", " gene_data = get_genetic_data(matrix_file)\n", " if gene_data.empty:\n", " print(\"Extracted gene expression data is empty\")\n", " is_gene_available = False\n", " else:\n", " print(f\"Successfully extracted gene data with {len(gene_data.index)} rows\")\n", " print(\"First 20 gene IDs:\")\n", " print(gene_data.index[:20])\n", " is_gene_available = True\n", "except Exception as e:\n", " print(f\"Error extracting gene data: {e}\")\n", " print(\"This dataset appears to have an empty or malformed gene expression matrix\")\n", " is_gene_available = False\n", "\n", "print(f\"\\nGene expression data available: {is_gene_available}\")\n" ] }, { "cell_type": "markdown", "id": "a617af3c", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "553d31f7", "metadata": {}, "outputs": [], "source": [ "# From the gene identifiers in the previous step, these appear to be numeric IDs (like 16650001, 16650003, etc.)\n", "# rather than standard human gene symbols. Human gene symbols would typically be alphabetic characters \n", "# (like BRCA1, TP53, LMNA, etc.).\n", "# \n", "# These numeric IDs likely refer to probe or feature IDs from a microarray platform and would need\n", "# to be mapped to standard gene symbols for meaningful analysis.\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "04da6fac", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "ad7730f6", "metadata": {}, "outputs": [], "source": [ "# 1. Extract gene annotation data from the SOFT file\n", "print(\"Extracting gene annotation data from SOFT file...\")\n", "try:\n", " # Use the library function to extract gene annotation\n", " gene_annotation = get_gene_annotation(soft_file)\n", " print(f\"Successfully extracted gene annotation data with {len(gene_annotation.index)} rows\")\n", " \n", " # Preview the annotation DataFrame\n", " print(\"\\nGene annotation preview (first few rows):\")\n", " print(preview_df(gene_annotation))\n", " \n", " # Show column names to help identify which columns we need for mapping\n", " print(\"\\nColumn names in gene annotation data:\")\n", " print(gene_annotation.columns.tolist())\n", " \n", " # Check for relevant mapping columns\n", " if 'GB_ACC' in gene_annotation.columns:\n", " print(\"\\nThe dataset contains GenBank accessions (GB_ACC) that could be used for gene mapping.\")\n", " # Count non-null values in GB_ACC column\n", " non_null_count = gene_annotation['GB_ACC'].count()\n", " print(f\"Number of rows with GenBank accessions: {non_null_count} out of {len(gene_annotation)}\")\n", " \n", " if 'SPOT_ID' in gene_annotation.columns:\n", " print(\"\\nThe dataset contains genomic regions (SPOT_ID) that could be used for location-based gene mapping.\")\n", " print(\"Example SPOT_ID format:\", gene_annotation['SPOT_ID'].iloc[0])\n", " \n", "except Exception as e:\n", " print(f\"Error processing gene annotation data: {e}\")\n", " is_gene_available = False\n" ] }, { "cell_type": "markdown", "id": "4d7a68ca", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "050c6ed3", "metadata": {}, "outputs": [], "source": [ "# 1. Observe the gene identifiers and decide which columns to use for mapping\n", "print(\"Analyzing gene identifiers and annotation data...\")\n", "\n", "# From the previous steps we can see:\n", "# - Gene expression data uses numeric IDs (like 16650001) in the 'ID' column\n", "# - Gene annotation has the same 'ID' column\n", "# - 'GB_ACC' contains gene accession numbers that we need to map to gene symbols\n", "\n", "# Check if the SPOT_ID column contains genomic coordinates we can use\n", "print(\"\\nAnalyzing SPOT_ID format for genomic locations:\")\n", "spot_id_samples = gene_annotation['SPOT_ID'].dropna().unique()[:5]\n", "print(f\"Sample SPOT_ID values: {spot_id_samples}\")\n", "\n", "# GenBank accessions might be useful even if they're non-coding RNAs\n", "# Let's examine the accessions more closely\n", "print(\"\\nAnalyzing GenBank accessions:\")\n", "gb_acc_samples = gene_annotation['GB_ACC'].dropna().sample(min(10, gene_annotation['GB_ACC'].count())).tolist()\n", "print(f\"Sample GenBank accessions: {gb_acc_samples}\")\n", "\n", "# 2. Create a custom mapping function for GenBank accessions\n", "# Since our extract_human_gene_symbols function filters out NR_/XR_ accessions,\n", "# we'll create a modified approach that keeps these identifiers\n", "def extract_gene_identifiers(text):\n", " \"\"\"Extract gene identifiers from GenBank accessions including non-coding RNAs.\"\"\"\n", " if not isinstance(text, str):\n", " return []\n", " # Keep the accession ID as is since we don't have proper gene symbols\n", " return [text]\n", "\n", "# Create gene mapping dataframe using ID and GB_ACC columns\n", "prob_col = 'ID'\n", "gene_col = 'GB_ACC'\n", "\n", "print(f\"\\nCreating gene mapping with {prob_col} and {gene_col}...\")\n", "gene_mapping = get_gene_mapping(gene_annotation, prob_col, gene_col)\n", "print(f\"Generated mapping for {len(gene_mapping)} entries\")\n", "\n", "# Modify the mapping to use our custom extraction function\n", "gene_mapping['Gene'] = gene_mapping['Gene'].apply(extract_gene_identifiers)\n", "\n", "# Preview the mapping\n", "print(\"\\nGene mapping preview:\")\n", "print(preview_df(gene_mapping))\n", "\n", "# 3. Apply the gene mapping to convert probe measurements to gene expression\n", "print(\"\\nConverting probe measurements to gene expression data...\")\n", "try:\n", " # Create a subclass of the mapping function that uses our custom extraction\n", " from tools.preprocess import apply_gene_mapping as original_apply_gene_mapping\n", " \n", " def custom_apply_gene_mapping(expression_df, mapping_df):\n", " \"\"\"Modified version that preserves GenBank accessions\"\"\"\n", " mapping_df = mapping_df[mapping_df['ID'].isin(expression_df.index)].copy()\n", " # Use the already-processed Gene column (from extract_gene_identifiers)\n", " \n", " # Count genes per probe and expand to one gene per row\n", " mapping_df['num_genes'] = mapping_df['Gene'].apply(len)\n", " mapping_df = mapping_df.explode('Gene')\n", " # Empty list becomes NaN after explode, which should be dropped\n", " mapping_df = mapping_df.dropna(subset=['Gene'])\n", " mapping_df.set_index('ID', inplace=True)\n", " \n", " # Merge and distribute expression values\n", " merged_df = mapping_df.join(expression_df)\n", " expr_cols = [col for col in merged_df.columns if col not in ['Gene', 'num_genes']]\n", " merged_df[expr_cols] = merged_df[expr_cols].div(merged_df['num_genes'].replace(0, 1), axis=0)\n", " \n", " # Sum expression values for each gene\n", " gene_expression_df = merged_df.groupby('Gene')[expr_cols].sum()\n", " \n", " return gene_expression_df\n", " \n", " # Apply our custom mapping function\n", " gene_data_mapped = custom_apply_gene_mapping(gene_data, gene_mapping)\n", " \n", " # Check if we got any results\n", " if len(gene_data_mapped) > 0:\n", " gene_data = gene_data_mapped\n", " print(f\"Successfully mapped to {len(gene_data)} gene identifiers\")\n", " else:\n", " print(\"Mapping produced no results. Using original gene data with numeric IDs.\")\n", " # Keep the original gene_data \n", "except Exception as e:\n", " print(f\"Error in gene mapping: {e}\")\n", " print(\"Using original gene data with numeric IDs.\")\n", "\n", "# Preview the gene expression data\n", "print(\"\\nGene expression data preview:\")\n", "print(f\"Gene data shape: {gene_data.shape}\")\n", "if len(gene_data) > 0:\n", " print(preview_df(gene_data))\n", "\n", "# Save the processed gene data\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"\\nGene expression data saved to {out_gene_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "ca80c468", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "0e0e8136", "metadata": {}, "outputs": [], "source": [ "# 1. Normalize gene symbols and extract from step 3 and 6\n", "# Load the gene expression data (already loaded from Step 6)\n", "print(f\"Gene data shape before normalization: {gene_data.shape}\")\n", "\n", "try:\n", " # Normalize gene symbols using the NCBI Gene database information\n", " normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n", " \n", " if normalized_gene_data.empty:\n", " print(\"Normalization resulted in empty dataframe. Using original gene data instead.\")\n", " normalized_gene_data = gene_data\n", " \n", " print(f\"Gene data shape after normalization: {normalized_gene_data.shape}\")\n", " \n", " # Save the normalized gene data to the output file\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " normalized_gene_data.to_csv(out_gene_data_file)\n", " print(f\"Normalized gene data saved to {out_gene_data_file}\")\n", "except Exception as e:\n", " print(f\"Error normalizing gene data: {e}. Using original gene data instead.\")\n", " normalized_gene_data = gene_data\n", " # Save the original gene data if normalization fails\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " normalized_gene_data.to_csv(out_gene_data_file)\n", "\n", "# 2. Link clinical and genetic data\n", "# Use the trait_row identified in Step 2 (trait_row = 1) to extract trait data\n", "is_trait_available = trait_row is not None\n", "\n", "if is_trait_available:\n", " # Extract clinical features using the function and conversion methods from Step 2\n", " clinical_features = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Save clinical features\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " clinical_features.to_csv(out_clinical_data_file)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n", " \n", " # Link clinical and genetic data\n", " linked_data = geo_link_clinical_genetic_data(clinical_features, normalized_gene_data)\n", " print(f\"Linked data shape: {linked_data.shape}\")\n", "else:\n", " # Create a minimal dataframe with just the trait column\n", " linked_data = pd.DataFrame({trait: [np.nan]})\n", " print(\"No trait data available, creating minimal dataframe for validation.\")\n", "\n", "# 3. Handle missing values in the linked data\n", "if is_trait_available:\n", " print(\"\\nHandling missing values...\")\n", " linked_data = handle_missing_values(linked_data, trait)\n", " print(f\"After missing value handling, linked data shape: {linked_data.shape}\")\n", "\n", "# 4. Determine whether trait and demographic features are biased\n", "if is_trait_available and not linked_data.empty and len(linked_data.columns) > 1:\n", " print(\"\\nEvaluating feature bias...\")\n", " is_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)\n", " print(f\"Trait bias evaluation result: {is_biased}\")\n", "else:\n", " is_biased = False\n", " print(\"Skipping bias evaluation due to insufficient data.\")\n", "\n", "# 5. Final validation and save metadata\n", "note = \"\"\n", "if not is_trait_available:\n", " note = f\"Dataset contains gene expression data but no {trait} measurements.\"\n", "elif is_biased:\n", " note = f\"Dataset contains {trait} data but its distribution is severely biased.\"\n", "\n", "# Validate and save cohort info\n", "is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=is_gene_available, \n", " is_trait_available=is_trait_available, \n", " is_biased=is_biased,\n", " df=linked_data,\n", " note=note\n", ")\n", "\n", "# 6. Save the linked data if usable\n", "print(f\"\\nDataset usability: {is_usable}\")\n", "if is_usable:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " linked_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", "else:\n", " print(f\"Dataset is not usable for {trait} association studies. Data not saved.\")" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }