{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "6a9120be", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"X-Linked_Lymphoproliferative_Syndrome\"\n", "cohort = \"GSE180393\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/X-Linked_Lymphoproliferative_Syndrome\"\n", "in_cohort_dir = \"../../input/GEO/X-Linked_Lymphoproliferative_Syndrome/GSE180393\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/GSE180393.csv\"\n", "out_gene_data_file = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/gene_data/GSE180393.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/clinical_data/GSE180393.csv\"\n", "json_path = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "8ec0b4be", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "7640018c", "metadata": {}, "outputs": [], "source": [ "# 1. Let's first list the directory contents to understand what files are available\n", "import os\n", "\n", "print(\"Files in the cohort directory:\")\n", "files = os.listdir(in_cohort_dir)\n", "print(files)\n", "\n", "# Adapt file identification to handle different naming patterns\n", "soft_files = [f for f in files if 'soft' in f.lower() or '.soft' in f.lower() or '_soft' in f.lower()]\n", "matrix_files = [f for f in files if 'matrix' in f.lower() or '.matrix' in f.lower() or '_matrix' in f.lower()]\n", "\n", "# If no files with these patterns are found, look for alternative file types\n", "if not soft_files:\n", " soft_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "if not matrix_files:\n", " matrix_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "\n", "print(\"Identified SOFT files:\", soft_files)\n", "print(\"Identified matrix files:\", matrix_files)\n", "\n", "# Use the first files found, if any\n", "if len(soft_files) > 0 and len(matrix_files) > 0:\n", " soft_file = os.path.join(in_cohort_dir, soft_files[0])\n", " matrix_file = os.path.join(in_cohort_dir, matrix_files[0])\n", " \n", " # 2. Read the matrix file to obtain background information and sample characteristics data\n", " background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", " clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", " background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", " \n", " # 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", " sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", " \n", " # 4. Explicitly print out all the background information and the sample characteristics dictionary\n", " print(\"\\nBackground Information:\")\n", " print(background_info)\n", " print(\"\\nSample Characteristics Dictionary:\")\n", " print(sample_characteristics_dict)\n", "else:\n", " print(\"No appropriate files found in the directory.\")\n" ] }, { "cell_type": "markdown", "id": "a7579484", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "a9a6798e", "metadata": {}, "outputs": [], "source": [ "# 1. Analyzing the dataset for gene expression data\n", "# From the background info, we see this is a microarray study on Affymetrix platform\n", "# analyzing transcriptome data, which indicates gene expression data is available\n", "is_gene_available = True\n", "\n", "# 2. Variable availability and data type conversion\n", "# 2.1 Data Availability\n", "# Looking at the sample characteristics:\n", "# For trait: The key 0 contains disease categories/sample groups\n", "# Age: Not available in the characteristics\n", "# Gender: Not available in the characteristics\n", "\n", "trait_row = 0 # This corresponds to \"sample group\"\n", "age_row = None # Age data not available\n", "gender_row = None # Gender data not available\n", "\n", "# 2.2 Data Type Conversion Functions\n", "\n", "def convert_trait(value):\n", " \"\"\"\n", " Convert the trait value to binary: \n", " 1 for disease conditions, 0 for healthy controls (Living donor)\n", " \"\"\"\n", " if value is None or ':' not in value:\n", " return None\n", " \n", " # Extract the value after the colon\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Determine trait status\n", " if value == \"Living donor\":\n", " return 0 # Healthy control\n", " else:\n", " return 1 # Disease condition (any type of kidney disease)\n", "\n", "# Since age and gender data are not available, we define placeholder functions\n", "def convert_age(value):\n", " return None\n", "\n", "def convert_gender(value):\n", " return None\n", "\n", "# 3. Save Metadata\n", "# The trait data is available since trait_row is not None\n", "is_trait_available = trait_row is not None\n", "\n", "# Validate and save the initial filtering information\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "# Since trait_row is not None, we proceed with clinical feature extraction\n", "if trait_row is not None:\n", " try:\n", " # Extract clinical features using the provided clinical_data from previous step\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the dataframe\n", " print(\"Preview of selected clinical data:\")\n", " print(preview_df(selected_clinical_df))\n", " \n", " # Save to CSV\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " selected_clinical_df.to_csv(out_clinical_data_file)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n", " except Exception as e:\n", " print(f\"Error during clinical feature extraction: {e}\")\n" ] }, { "cell_type": "markdown", "id": "14696995", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "f253e320", "metadata": {}, "outputs": [], "source": [ "# Use the helper function to get the proper file paths\n", "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# Extract gene expression data\n", "try:\n", " gene_data = get_genetic_data(matrix_file_path)\n", " \n", " # Print the first 20 row IDs (gene or probe identifiers)\n", " print(\"First 20 gene/probe identifiers:\")\n", " print(gene_data.index[:20])\n", " \n", " # Print shape to understand the dataset dimensions\n", " print(f\"\\nGene expression data shape: {gene_data.shape}\")\n", " \n", "except Exception as e:\n", " print(f\"Error extracting gene data: {e}\")\n" ] }, { "cell_type": "markdown", "id": "c3841377", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "c191534f", "metadata": {}, "outputs": [], "source": [ "# Reviewing the gene identifiers\n", "# The identifiers like '100009613_at', '100009676_at', '10000_at' appear to be probe IDs from a microarray\n", "# platform, likely Affymetrix, as indicated by the '_at' suffix.\n", "# These are not standard human gene symbols and will need to be mapped to official gene symbols.\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "acb6b7e4", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "6f9ccd25", "metadata": {}, "outputs": [], "source": [ "# 1. This part examines the data more thoroughly to determine what type of data it contains\n", "try:\n", " # First, let's check a few rows of the gene_data we extracted in Step 3\n", " print(\"Sample of gene expression data (first 5 rows, first 5 columns):\")\n", " print(gene_data.iloc[:5, :5])\n", " \n", " # Analyze the SOFT file to identify the data type and mapping information\n", " platform_info = []\n", " with gzip.open(soft_file_path, 'rt', encoding='latin-1') as f:\n", " for line in f:\n", " if line.startswith(\"!Platform_title\") or line.startswith(\"!Series_title\") or \"description\" in line.lower():\n", " platform_info.append(line.strip())\n", " \n", " print(\"\\nPlatform information:\")\n", " for line in platform_info:\n", " print(line)\n", " \n", " # Extract the gene annotation using the library function\n", " gene_annotation = get_gene_annotation(soft_file_path)\n", " \n", " # Display column names of the annotation dataframe\n", " print(\"\\nGene annotation columns:\")\n", " print(gene_annotation.columns.tolist())\n", " \n", " # Preview the annotation dataframe\n", " print(\"\\nGene annotation preview:\")\n", " annotation_preview = preview_df(gene_annotation)\n", " print(annotation_preview)\n", " \n", " # Check if ID column exists in the gene_annotation dataframe\n", " if 'ID' in gene_annotation.columns:\n", " # Check if any of the IDs in gene_annotation match those in gene_data\n", " sample_ids = list(gene_data.index[:10])\n", " matching_rows = gene_annotation[gene_annotation['ID'].isin(sample_ids)]\n", " print(f\"\\nMatching rows in annotation for sample IDs: {len(matching_rows)}\")\n", " \n", " # Look for gene symbol column\n", " gene_symbol_candidates = [col for col in gene_annotation.columns if 'gene' in col.lower() or 'symbol' in col.lower() or 'name' in col.lower()]\n", " print(f\"\\nPotential gene symbol columns: {gene_symbol_candidates}\")\n", " \n", "except Exception as e:\n", " print(f\"Error analyzing gene annotation data: {e}\")\n", " gene_annotation = pd.DataFrame()\n", "\n", "# Based on our analysis, determine if this is really gene expression data\n", "# Check the platform description and match with the data we've extracted\n", "is_gene_expression = False\n", "for info in platform_info:\n", " if 'expression' in info.lower() or 'transcript' in info.lower() or 'mrna' in info.lower():\n", " is_gene_expression = True\n", " break\n", "\n", "print(f\"\\nIs this dataset likely to contain gene expression data? {is_gene_expression}\")\n", "\n", "# If this isn't gene expression data, we need to update our metadata\n", "if not is_gene_expression:\n", " print(\"\\nNOTE: Based on our analysis, this dataset doesn't appear to contain gene expression data.\")\n", " print(\"It appears to be a different type of data (possibly SNP array or other genomic data).\")\n", " # Update is_gene_available for metadata\n", " is_gene_available = False\n", " \n", " # Save the updated metadata\n", " validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", " )\n" ] }, { "cell_type": "markdown", "id": "eae651c0", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "833dec3e", "metadata": {}, "outputs": [], "source": [ "# 1. Identify the columns for gene mapping\n", "probe_id_col = 'ID'\n", "gene_id_col = 'ENTREZ_GENE_ID'\n", "\n", "print(f\"Using mapping from {probe_id_col} (probe identifiers) to {gene_id_col} (gene identifiers)\")\n", "\n", "# First, let's examine the format differences between gene_data and gene_annotation\n", "print(\"\\nSample probe IDs in gene expression data:\")\n", "print(gene_data.index[:5].tolist())\n", "print(\"\\nSample probe IDs in gene annotation:\")\n", "print(gene_annotation[probe_id_col][:5].tolist())\n", "\n", "try:\n", " # Create a properly formatted mapping dictionary that will match the gene_data index\n", " mapping_dict = {}\n", " \n", " # Extract the base part of the probe IDs from gene_data (remove suffix if needed)\n", " for probe_id in gene_data.index:\n", " # Check if this probe exists directly in the annotation\n", " matching_rows = gene_annotation[gene_annotation[probe_id_col] == probe_id]\n", " \n", " if len(matching_rows) > 0:\n", " # Direct match found\n", " entrez_id = matching_rows.iloc[0][gene_id_col]\n", " mapping_dict[probe_id] = str(entrez_id)\n", " else:\n", " # Try matching without the \"_at\" suffix\n", " base_id = probe_id.split('_')[0] if '_' in probe_id else probe_id\n", " matching_rows = gene_annotation[gene_annotation[probe_id_col] == base_id]\n", " \n", " if len(matching_rows) > 0:\n", " entrez_id = matching_rows.iloc[0][gene_id_col]\n", " mapping_dict[probe_id] = str(entrez_id)\n", " \n", " print(f\"\\nCreated mapping for {len(mapping_dict)} probes\")\n", " \n", " # Convert mapping_dict to DataFrame for apply_gene_mapping function\n", " mapping_df = pd.DataFrame({\n", " 'ID': list(mapping_dict.keys()),\n", " 'Gene': list(mapping_dict.values())\n", " })\n", " \n", " # Apply mapping to get gene expression data\n", " if len(mapping_df) > 0:\n", " # Skip the symbol extraction since we're using Entrez IDs directly\n", " # Create a custom function to apply the mapping\n", " def map_probes_to_genes(expression_df, mapping_df):\n", " \"\"\"Map probes to genes using the mapping dataframe without symbol extraction\"\"\"\n", " # Add a sentinel column to track genes per probe (always 1 for this case)\n", " mapping_df['num_genes'] = 1\n", " mapping_df = mapping_df.set_index('ID')\n", " \n", " # Join expression data with mapping\n", " merged_df = mapping_df.join(expression_df, how='inner')\n", " \n", " # Get expression columns\n", " expr_cols = [col for col in merged_df.columns if col not in ['Gene', 'num_genes']]\n", " \n", " # Group by gene and sum expression values\n", " gene_expression_df = merged_df.groupby('Gene')[expr_cols].sum()\n", " \n", " return gene_expression_df\n", " \n", " # Apply custom mapping function\n", " gene_data = map_probes_to_genes(gene_data, mapping_df)\n", " \n", " print(f\"\\nAfter mapping, gene expression data shape: {gene_data.shape}\")\n", " print(\"First 5 genes and 3 samples after mapping:\")\n", " print(gene_data.iloc[:5, :3] if not gene_data.empty else \"No genes mapped successfully\")\n", " \n", " # Normalize gene symbols using the provided function if not empty\n", " if not gene_data.empty:\n", " print(\"\\nNormalizing gene symbols...\")\n", " try:\n", " # Since we're using Entrez IDs, we'll skip normalization\n", " # Save directly with Entrez IDs as gene identifiers\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " gene_data.to_csv(out_gene_data_file)\n", " print(f\"Gene expression data saved to {out_gene_data_file}\")\n", " except Exception as e:\n", " print(f\"Error during gene symbol normalization: {e}\")\n", " else:\n", " print(\"No gene expression data to save after mapping.\")\n", " else:\n", " print(\"No valid mappings found between probes and genes.\")\n", " \n", "except Exception as e:\n", " print(f\"Error during gene mapping: {e}\")\n", " import traceback\n", " traceback.print_exc()" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }