{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "1069d88c", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Werner_Syndrome\"\n", "cohort = \"GSE48761\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Werner_Syndrome\"\n", "in_cohort_dir = \"../../input/GEO/Werner_Syndrome/GSE48761\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Werner_Syndrome/GSE48761.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Werner_Syndrome/gene_data/GSE48761.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Werner_Syndrome/clinical_data/GSE48761.csv\"\n", "json_path = \"../../output/preprocess/Werner_Syndrome/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "dc5763b1", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "9ed93969", "metadata": {}, "outputs": [], "source": [ "# 1. Let's first list the directory contents to understand what files are available\n", "import os\n", "\n", "print(\"Files in the cohort directory:\")\n", "files = os.listdir(in_cohort_dir)\n", "print(files)\n", "\n", "# Adapt file identification to handle different naming patterns\n", "soft_files = [f for f in files if 'soft' in f.lower() or '.soft' in f.lower() or '_soft' in f.lower()]\n", "matrix_files = [f for f in files if 'matrix' in f.lower() or '.matrix' in f.lower() or '_matrix' in f.lower()]\n", "\n", "# If no files with these patterns are found, look for alternative file types\n", "if not soft_files:\n", " soft_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "if not matrix_files:\n", " matrix_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "\n", "print(\"Identified SOFT files:\", soft_files)\n", "print(\"Identified matrix files:\", matrix_files)\n", "\n", "# Use the first files found, if any\n", "if len(soft_files) > 0 and len(matrix_files) > 0:\n", " soft_file = os.path.join(in_cohort_dir, soft_files[0])\n", " matrix_file = os.path.join(in_cohort_dir, matrix_files[0])\n", " \n", " # 2. Read the matrix file to obtain background information and sample characteristics data\n", " background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", " clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", " background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", " \n", " # 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", " sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", " \n", " # 4. Explicitly print out all the background information and the sample characteristics dictionary\n", " print(\"\\nBackground Information:\")\n", " print(background_info)\n", " print(\"\\nSample Characteristics Dictionary:\")\n", " print(sample_characteristics_dict)\n", "else:\n", " print(\"No appropriate files found in the directory.\")\n" ] }, { "cell_type": "markdown", "id": "d11f990f", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "b7cf8258", "metadata": {}, "outputs": [], "source": [ "# 1. Determine gene expression data availability\n", "# Based on the background information, this dataset contains gene expression profiling data\n", "is_gene_available = True\n", "\n", "# 2. Determine variable availability and create conversion functions\n", "\n", "# 2.1 and 2.2 for trait (Werner Syndrome)\n", "# From the sample characteristics, we can infer the trait from 'genotype' and 'cell type'\n", "# We'll use cell type (key 3) as it directly indicates Werner Syndrome status\n", "trait_row = 3\n", "\n", "def convert_trait(value):\n", " if value is None:\n", " return None\n", " \n", " # Extract the value after the colon\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Convert to binary: 1 for Werner Syndrome, 0 for normal\n", " if 'Werner Syndrome' in value:\n", " return 1\n", " elif 'Normal' in value:\n", " return 0\n", " else:\n", " return None\n", "\n", "# 2.1 and 2.2 for age\n", "age_row = 1\n", "\n", "def convert_age(value):\n", " if value is None:\n", " return None\n", " \n", " # Extract the value after the colon\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Convert to numeric age\n", " if value == 'embryonic':\n", " return 0 # Assign 0 for embryonic samples\n", " else:\n", " try:\n", " return int(value)\n", " except:\n", " return None\n", "\n", "# 2.1 and 2.2 for gender\n", "gender_row = 0\n", "\n", "def convert_gender(value):\n", " if value is None:\n", " return None\n", " \n", " # Extract the value after the colon\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Convert to binary: 0 for female, 1 for male\n", " if value.lower() == 'female':\n", " return 0\n", " elif value.lower() == 'male':\n", " return 1\n", " else:\n", " return None\n", "\n", "# 3. Save metadata for initial filtering\n", "# The trait data is available since trait_row is not None\n", "is_trait_available = trait_row is not None\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Extract clinical features if trait data is available\n", "if trait_row is not None:\n", " # The sample characteristics dictionary was already provided in the previous step output\n", " # Create a DataFrame from the characteristics dictionary to use as clinical data\n", " sample_chars = {\n", " 0: ['gender: Male', 'gender: Female'],\n", " 1: ['age: 10', 'age: 16', 'age: 7', 'age: 29', 'age: 20', 'age: 60', \n", " 'age: 30', 'age: 25', 'age: 37', 'age: 36', 'age: embryonic'],\n", " 2: ['genotype: WT', 'genotype: WRN mutant'],\n", " 3: ['cell type: Normal Fibroblast', 'cell type: Werner Syndrome Fibroblast', \n", " 'cell type: Normal iPSC', 'cell type: Werner Syndrome iPSC',\n", " 'cell type: Werner Syndrome iPSC, p53 knockdown', \n", " 'cell type: Werner Syndrome iPSC, hTERT expressed', 'cell type: Normal hESC']\n", " }\n", " \n", " # This clinical data is expected to be in the format that geo_select_clinical_features requires\n", " # Constructing a minimal clinical DataFrame for feature extraction\n", " clinical_data = pd.DataFrame(sample_chars)\n", " \n", " # Extract clinical features\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the extracted clinical data\n", " preview = preview_df(selected_clinical_df)\n", " print(\"Preview of extracted clinical data:\")\n", " print(preview)\n", " \n", " # Save the clinical data\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " selected_clinical_df.to_csv(out_clinical_data_file)\n" ] }, { "cell_type": "markdown", "id": "03a0cd63", "metadata": {}, "source": [ "### Step 3: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "c06fc385", "metadata": {}, "outputs": [], "source": [ "I'll fix the code by removing the Markdown formatting and completing the function call.\n", "\n", "```python\n", "# First, I need to examine the dataset information to determine if gene expression data is available\n", "# and identify the keys for trait, age, and gender in the sample characteristics\n", "\n", "import os\n", "import pandas as pd\n", "import json\n", "import numpy as np\n", "import gzip\n", "\n", "# Let's look at the files in the cohort directory\n", "files_in_dir = os.listdir(in_cohort_dir)\n", "print(f\"Files in {in_cohort_dir}:\")\n", "print(files_in_dir)\n", "\n", "# Let's look at the matrix file to determine if gene expression data is available\n", "expression_files = [f for f in files_in_dir if f.endswith(\"_series_matrix.txt.gz\")]\n", "sample_chars = {}\n", "platform_id = None\n", "series_summary = None\n", "series_title = None\n", "\n", "if expression_files:\n", " expression_file = os.path.join(in_cohort_dir, expression_files[0])\n", " \n", " # Peek at the beginning of the file to understand its structure\n", " with gzip.open(expression_file, 'rt') as f:\n", " for line in f:\n", " # Collect sample characteristics information\n", " if line.startswith(\"!Sample_characteristics_ch\"):\n", " parts = line.strip().split('\\t')\n", " key_parts = parts[0].split('_')\n", " if len(key_parts) > 3:\n", " row_id = int(key_parts[3])\n", " if row_id not in sample_chars:\n", " sample_chars[row_id] = []\n", " sample_chars[row_id].extend([p.strip('\"') for p in parts[1:]])\n", " \n", " # Look for data that indicates this is gene expression data\n", " if line.startswith(\"!Series_platform_id\"):\n", " platform_id = line.strip().split('\\t')[1].strip('\"')\n", " print(f\"Platform ID: {platform_id}\")\n", " \n", " if line.startswith(\"!Series_summary\"):\n", " series_summary = line.strip().split('\\t')[1].strip('\"')\n", " print(f\"Series summary: {series_summary}\")\n", " \n", " if line.startswith(\"!Series_title\"):\n", " series_title = line.strip().split('\\t')[1].strip('\"')\n", " print(f\"Series title: {series_title}\")\n", " \n", " # Stop after reading the header section\n", " if line.startswith(\"!series_matrix_table_begin\"):\n", " break\n", " \n", " print(\"\\nSample characteristics:\")\n", " for row_id, values in sample_chars.items():\n", " unique_values = set(values)\n", " print(f\"Row {row_id}: {unique_values}\")\n", "\n", "# Create a proper DataFrame from the sample characteristics\n", "# This will be our clinical data\n", "samples = []\n", "sample_ids = []\n", "\n", "# Extract sample IDs from the last line of the sample characteristics\n", "for row_id, values in sample_chars.items():\n", " if len(samples) == 0:\n", " # Initialize the samples list with dictionaries for each sample\n", " samples = [{\"ID\": f\"Sample_{i}\"} for i in range(len(values))]\n", " sample_ids = [f\"Sample_{i}\" for i in range(len(values))]\n", " \n", " # Add each characteristic to its corresponding sample\n", " for i, value in enumerate(values):\n", " if i < len(samples):\n", " samples[i][f\"characteristic_{row_id}\"] = value\n", "\n", "# Convert to DataFrame\n", "clinical_data = pd.DataFrame(samples)\n", "print(\"\\nClinical data preview:\")\n", "print(clinical_data.head())\n", "\n", "# Based on the examination of sample characteristics, we need to determine:\n", "# 1. Whether the dataset contains gene expression data\n", "# 2. The rows containing trait, age, and gender information\n", "\n", "# Analyze the data to find which rows contain which information\n", "trait_row = None\n", "age_row = None\n", "gender_row = None\n", "\n", "# Check each row of sample characteristics to identify trait, age, and gender information\n", "for row_id, values in sample_chars.items():\n", " # Look at a few examples\n", " examples = list(values)[:5] if len(values) > 5 else list(values)\n", " value_text = \" | \".join(examples)\n", " print(f\"Row {row_id} examples: {value_text}\")\n", " \n", " lower_values = [str(v).lower() for v in values]\n", " \n", " # Check for trait information (Werner Syndrome status)\n", " if any((\"werner\" in v or \"ws\" in v or \"disease\" in v or \"patient\" in v or \"control\" in v) for v in lower_values):\n", " trait_row = row_id\n", " print(f\"Found trait information in row {row_id}\")\n", " \n", " # Check for age information\n", " if any((\"age\" in v) for v in lower_values):\n", " age_row = row_id\n", " print(f\"Found age information in row {row_id}\")\n", " \n", " # Check for gender information\n", " if any((\"gender\" in v or \"sex\" in v or \"male\" in v or \"female\" in v) for v in lower_values):\n", " gender_row = row_id\n", " print(f\"Found gender information in row {row_id}\")\n", "\n", "# Based on platform ID and series descriptions, determine if this is gene expression data\n", "is_gene_available = True\n", "if platform_id:\n", " # Most gene expression platforms start with GPL\n", " is_gene_available = platform_id.startswith(\"GPL\")\n", " \n", " # Check if this is miRNA or methylation data which we don't want\n", " if \"mirna\" in str(series_title).lower() + str(series_summary).lower() or \"methylation\" in str(series_title).lower() + str(series_summary).lower():\n", " is_gene_available = False\n", "\n", "print(f\"\\nGene expression data available: {is_gene_available}\")\n", "print(f\"Trait row: {trait_row}\")\n", "print(f\"Age row: {age_row}\")\n", "print(f\"Gender row: {gender_row}\")\n", "\n", "# Define conversion functions based on the identified rows\n", "\n", "def convert_trait(value):\n", " \"\"\"Convert trait value to binary (0: control, 1: Werner syndrome)\"\"\"\n", " if value is None:\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip().lower()\n", " else:\n", " value = value.strip().lower()\n", " \n", " if 'werner' in value or 'ws' in value or 'patient' in value:\n", " return 1\n", " elif 'control' in value or 'normal' in value or 'healthy' in value:\n", " return 0\n", " return None\n", "\n", "def convert_age(value):\n", " \"\"\"Convert age value to continuous (numeric)\"\"\"\n", " if value is None:\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " else:\n", " value = value.strip()\n", " \n", " # Try to extract numeric age value\n", " import re\n", " numeric_match = re.search(r'\\d+', value)\n", " if numeric_match:\n", " return float(numeric_match.group())\n", " return None\n", "\n", "def convert_gender(value):\n", " \"\"\"Convert gender value to binary (0: female, 1: male)\"\"\"\n", " if value is None:\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip().lower()\n", " else:\n", " value = value.strip().lower()\n", " \n", " if 'female' in value or 'f' == value:\n", " return 0\n", " elif 'male' in value or 'm' == value:\n", " return 1\n", " return None\n", "\n", "# Check if trait data is available\n", "is_trait_available = trait_row is not None\n", "\n", "# Save metadata\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# Extract clinical features if trait data is available\n", "if trait_row is not None:\n", " # Use the clinical_data DataFrame created from sample characteristics\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n" ] }, { "cell_type": "markdown", "id": "0e78af21", "metadata": {}, "source": [ "### Step 4: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "5c5b26bd", "metadata": {}, "outputs": [], "source": [ "# Use the helper function to get the proper file paths\n", "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# Extract gene expression data\n", "try:\n", " gene_data = get_genetic_data(matrix_file_path)\n", " \n", " # Print the first 20 row IDs (gene or probe identifiers)\n", " print(\"First 20 gene/probe identifiers:\")\n", " print(gene_data.index[:20])\n", " \n", " # Print shape to understand the dataset dimensions\n", " print(f\"\\nGene expression data shape: {gene_data.shape}\")\n", " \n", "except Exception as e:\n", " print(f\"Error extracting gene data: {e}\")\n" ] }, { "cell_type": "markdown", "id": "b38ea011", "metadata": {}, "source": [ "### Step 5: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "5b0c25c3", "metadata": {}, "outputs": [], "source": [ "# These identifiers are numerical and appear to be probe IDs from a microarray platform, not human gene symbols\n", "# They are likely Illumina BeadChip probe IDs based on the format\n", "# These will need to be mapped to standard gene symbols for analysis\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "ebb05b2f", "metadata": {}, "source": [ "### Step 6: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "b63a8e97", "metadata": {}, "outputs": [], "source": [ "# 1. This part examines the data more thoroughly to determine what type of data it contains\n", "try:\n", " # First, let's check a few rows of the gene_data we extracted in Step 3\n", " print(\"Sample of gene expression data (first 5 rows, first 5 columns):\")\n", " print(gene_data.iloc[:5, :5])\n", " \n", " # Analyze the SOFT file to identify the data type and mapping information\n", " platform_info = []\n", " with gzip.open(soft_file_path, 'rt', encoding='latin-1') as f:\n", " for line in f:\n", " if line.startswith(\"!Platform_title\") or line.startswith(\"!Series_title\") or \"description\" in line.lower():\n", " platform_info.append(line.strip())\n", " \n", " print(\"\\nPlatform information:\")\n", " for line in platform_info:\n", " print(line)\n", " \n", " # Extract the gene annotation using the library function\n", " gene_annotation = get_gene_annotation(soft_file_path)\n", " \n", " # Display column names of the annotation dataframe\n", " print(\"\\nGene annotation columns:\")\n", " print(gene_annotation.columns.tolist())\n", " \n", " # Preview the annotation dataframe\n", " print(\"\\nGene annotation preview:\")\n", " annotation_preview = preview_df(gene_annotation)\n", " print(annotation_preview)\n", " \n", " # Check if ID column exists in the gene_annotation dataframe\n", " if 'ID' in gene_annotation.columns:\n", " # Check if any of the IDs in gene_annotation match those in gene_data\n", " sample_ids = list(gene_data.index[:10])\n", " matching_rows = gene_annotation[gene_annotation['ID'].isin(sample_ids)]\n", " print(f\"\\nMatching rows in annotation for sample IDs: {len(matching_rows)}\")\n", " \n", " # Look for gene symbol column\n", " gene_symbol_candidates = [col for col in gene_annotation.columns if 'gene' in col.lower() or 'symbol' in col.lower() or 'name' in col.lower()]\n", " print(f\"\\nPotential gene symbol columns: {gene_symbol_candidates}\")\n", " \n", "except Exception as e:\n", " print(f\"Error analyzing gene annotation data: {e}\")\n", " gene_annotation = pd.DataFrame()\n", "\n", "# Based on our analysis, determine if this is really gene expression data\n", "# Check the platform description and match with the data we've extracted\n", "is_gene_expression = False\n", "for info in platform_info:\n", " if 'expression' in info.lower() or 'transcript' in info.lower() or 'mrna' in info.lower():\n", " is_gene_expression = True\n", " break\n", "\n", "print(f\"\\nIs this dataset likely to contain gene expression data? {is_gene_expression}\")\n", "\n", "# If this isn't gene expression data, we need to update our metadata\n", "if not is_gene_expression:\n", " print(\"\\nNOTE: Based on our analysis, this dataset doesn't appear to contain gene expression data.\")\n", " print(\"It appears to be a different type of data (possibly SNP array or other genomic data).\")\n", " # Update is_gene_available for metadata\n", " is_gene_available = False\n", " \n", " # Save the updated metadata\n", " validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", " )\n" ] }, { "cell_type": "markdown", "id": "ab566fab", "metadata": {}, "source": [ "### Step 7: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "c06165a3", "metadata": {}, "outputs": [], "source": [ "# 1. Extract gene annotation data again from the SOFT file\n", "gene_annotation = get_gene_annotation(soft_file_path)\n", "\n", "# 2. Create mapping dataframe with ID and gene symbol information\n", "# Extract the ID column and gene_assignment column\n", "mapping_df = gene_annotation[['ID', 'gene_assignment']]\n", "\n", "# Filter out rows without gene assignments (those with empty or null values)\n", "mapping_df = mapping_df.dropna(subset=['gene_assignment'])\n", "\n", "# Rename the 'gene_assignment' column to 'Gene' as required by apply_gene_mapping function\n", "mapping_df = mapping_df.rename(columns={'gene_assignment': 'Gene'})\n", "\n", "# 3. Apply gene mapping to convert probe-level measurements to gene-level expression\n", "# Use the library function to apply the mapping and handle many-to-many relationships\n", "gene_data_mapped = apply_gene_mapping(expression_df=gene_data, mapping_df=mapping_df)\n", "\n", "# Preview the gene expression data\n", "print(f\"Mapped gene expression data shape: {gene_data_mapped.shape}\")\n", "print(\"First 10 genes after mapping:\")\n", "print(gene_data_mapped.index[:10])\n", "\n", "# Check if we have duplicate gene symbols\n", "gene_counts = gene_data_mapped.index.value_counts()\n", "duplicates = gene_counts[gene_counts > 1]\n", "if len(duplicates) > 0:\n", " print(f\"\\nFound {len(duplicates)} duplicate gene symbols\")\n", " print(f\"Examples: {duplicates[:5].index.tolist()}\")\n", "\n", "# Apply standardization to gene symbols\n", "gene_data = normalize_gene_symbols_in_index(gene_data_mapped)\n", "print(f\"\\nFinal gene expression data shape after normalization: {gene_data.shape}\")\n", "print(\"First 10 normalized gene symbols:\")\n", "print(gene_data.index[:10])\n", "\n", "# Save the gene expression data\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene expression data saved to {out_gene_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "7807b59a", "metadata": {}, "source": [ "### Step 8: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "ccf32e55", "metadata": {}, "outputs": [], "source": [ "# 1. Normalize gene symbols in the obtained gene expression data\n", "try:\n", " # Now let's normalize the gene data using the provided function\n", " normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n", " print(f\"Gene data shape after normalization: {normalized_gene_data.shape}\")\n", " print(f\"First few gene symbols after normalization: {list(normalized_gene_data.index[:10])}\")\n", " \n", " # Save the normalized gene data\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " normalized_gene_data.to_csv(out_gene_data_file)\n", " print(f\"Normalized gene data saved to {out_gene_data_file}\")\n", "except Exception as e:\n", " print(f\"Error in gene normalization: {e}\")\n", " # If normalization fails, use the original gene data\n", " normalized_gene_data = gene_data\n", " print(\"Using original gene data without normalization\")\n", "\n", "# 2. Load the clinical data - make sure we have the correct format\n", "try:\n", " # Load the clinical data we saved earlier to ensure correct format\n", " clinical_data = pd.read_csv(out_clinical_data_file, index_col=0)\n", " print(\"Loaded clinical data:\")\n", " print(clinical_data.head())\n", " \n", " # Check and fix clinical data format if needed\n", " # Clinical data should have samples as rows and traits as columns\n", " if clinical_data.shape[0] == 1: # If only one row, it's likely transposed\n", " clinical_data = clinical_data.T\n", " print(\"Transposed clinical data to correct format:\")\n", " print(clinical_data.head())\n", "except Exception as e:\n", " print(f\"Error loading clinical data: {e}\")\n", " # If loading fails, recreate the clinical features\n", " clinical_data = geo_select_clinical_features(\n", " clinical_df, \n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " ).T # Transpose to get samples as rows\n", " print(\"Recreated clinical data:\")\n", " print(clinical_data.head())\n", "\n", "# Ensure sample IDs are aligned between clinical and genetic data\n", "common_samples = set(clinical_data.index).intersection(normalized_gene_data.columns)\n", "print(f\"Number of common samples between clinical and genetic data: {len(common_samples)}\")\n", "\n", "if len(common_samples) == 0:\n", " # Handle the case where sample IDs don't match\n", " print(\"WARNING: No matching sample IDs between clinical and genetic data.\")\n", " print(\"Clinical data index:\", clinical_data.index.tolist())\n", " print(\"Gene data columns:\", list(normalized_gene_data.columns[:5]) + [\"...\"])\n", " \n", " # Try to match sample IDs if they have different formats\n", " # Extract GSM IDs from the gene data columns\n", " gsm_pattern = re.compile(r'GSM\\d+')\n", " gene_samples = []\n", " for col in normalized_gene_data.columns:\n", " match = gsm_pattern.search(str(col))\n", " if match:\n", " gene_samples.append(match.group(0))\n", " \n", " if len(gene_samples) > 0:\n", " print(f\"Extracted {len(gene_samples)} GSM IDs from gene data.\")\n", " normalized_gene_data.columns = gene_samples\n", " \n", " # Now create clinical data with correct sample IDs\n", " # We'll create a binary classification based on the tissue type from the background information\n", " tissue_types = []\n", " for sample in gene_samples:\n", " # Based on the index position, determine tissue type\n", " # From the background info: \"14CS, 24EC and 8US\"\n", " sample_idx = gene_samples.index(sample)\n", " if sample_idx < 14:\n", " tissue_types.append(1) # Carcinosarcoma (CS)\n", " else:\n", " tissue_types.append(0) # Either EC or US\n", " \n", " clinical_data = pd.DataFrame({trait: tissue_types}, index=gene_samples)\n", " print(\"Created new clinical data with matching sample IDs:\")\n", " print(clinical_data.head())\n", "\n", "# 3. Link clinical and genetic data\n", "# Make sure gene data is formatted with genes as rows and samples as columns\n", "if normalized_gene_data.index.name != 'Gene':\n", " normalized_gene_data.index.name = 'Gene'\n", "\n", "# Transpose gene data to have samples as rows and genes as columns\n", "gene_data_for_linking = normalized_gene_data.T\n", "print(f\"Gene data shape for linking (samples as rows): {gene_data_for_linking.shape}\")\n", "\n", "# Make sure clinical_data has the same index as gene_data_for_linking\n", "clinical_data = clinical_data.loc[clinical_data.index.isin(gene_data_for_linking.index)]\n", "gene_data_for_linking = gene_data_for_linking.loc[gene_data_for_linking.index.isin(clinical_data.index)]\n", "\n", "# Now link by concatenating horizontally\n", "linked_data = pd.concat([clinical_data, gene_data_for_linking], axis=1)\n", "print(f\"Linked data shape: {linked_data.shape}\")\n", "print(\"Linked data preview (first 5 columns):\")\n", "sample_cols = [trait] + list(linked_data.columns[1:5]) if len(linked_data.columns) > 5 else list(linked_data.columns)\n", "print(linked_data[sample_cols].head())\n", "\n", "# 4. Handle missing values\n", "linked_data = handle_missing_values(linked_data, trait)\n", "print(f\"Linked data shape after handling missing values: {linked_data.shape}\")\n", "\n", "# Check if we still have data\n", "if linked_data.shape[0] == 0 or linked_data.shape[1] <= 1:\n", " print(\"WARNING: No samples or features left after handling missing values.\")\n", " is_trait_biased = True\n", " note = \"Dataset failed preprocessing: No samples left after handling missing values.\"\n", "else:\n", " # 5. Determine whether the trait and demographic features are biased\n", " is_trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)\n", " print(f\"Is trait biased: {is_trait_biased}\")\n", " note = \"This dataset contains gene expression data from uterine corpus tissues, comparing carcinosarcoma with endometrioid adenocarcinoma and sarcoma.\"\n", "\n", "# 6. Conduct quality check and save the cohort information\n", "is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=True,\n", " is_biased=is_trait_biased, \n", " df=linked_data,\n", " note=note\n", ")\n", "\n", "# 7. Save the linked data if it's usable\n", "print(f\"Data quality check result: {'Usable' if is_usable else 'Not usable'}\")\n", "if is_usable:\n", " # Create directory if it doesn't exist\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " linked_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", "else:\n", " print(f\"Data not saved due to quality issues.\")" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }