{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "019fb322", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:29:33.437323Z", "iopub.status.busy": "2025-03-25T04:29:33.437219Z", "iopub.status.idle": "2025-03-25T04:29:33.599084Z", "shell.execute_reply": "2025-03-25T04:29:33.598723Z" } }, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Underweight\"\n", "cohort = \"GSE84954\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Underweight\"\n", "in_cohort_dir = \"../../input/GEO/Underweight/GSE84954\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Underweight/GSE84954.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Underweight/gene_data/GSE84954.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Underweight/clinical_data/GSE84954.csv\"\n", "json_path = \"../../output/preprocess/Underweight/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "9287faa3", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": 2, "id": "0e61c918", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:29:33.600553Z", "iopub.status.busy": "2025-03-25T04:29:33.600400Z", "iopub.status.idle": "2025-03-25T04:29:33.723308Z", "shell.execute_reply": "2025-03-25T04:29:33.722994Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Files in the cohort directory:\n", "['GSE84954_family.soft.gz', 'GSE84954_series_matrix.txt.gz']\n", "Identified SOFT files: ['GSE84954_family.soft.gz']\n", "Identified matrix files: ['GSE84954_series_matrix.txt.gz']\n", "\n", "Background Information:\n", "!Series_title\t\"Expression data from liver, muscle and fat tissue of children with end stage liver disease\"\n", "!Series_summary\t\"Cachexia, described as a syndrome of weight loss, muscle wasting, fat loss and insulin resistance has been described in patients with chronic liver disease. Whereas extensive work is being done to delineate these molecular pathways in adult patients with chronic liver or other disease, very little is known about these pathways in children with chronic liver disease.\"\n", "!Series_summary\t\"We used microarrays to detail the global programme of gene expression underlying the metabolic processes of cachexia in children with end stage liver disease udergoing liver transplantion. We included tissue from patients with Crigler-najjar syndrome as controls. We were able to identify distinct classes of differentially regulated genes related to these processes.\"\n", "!Series_overall_design\t\"9 liver, 11 muscle (rectus abdominis) and 11 subcutaneous fat tissue samples were collected at the time of liver tranplantation from 11 patients with end stage liver disease as well as 2 liver, 2 muscle and 2 fat samples from 2 children with Crigler-Najjar syndrome serving as controls.\"\n", "!Series_overall_design\t\"\"\n", "!Series_overall_design\t\"Please note that 2 of the (end stage liver disease) patients did not give good quality Liver RNA so were not processed onto microarrays.\"\n", "!Series_overall_design\t\"\"\n", "!Series_overall_design\t\"BA = Biliary atresia\"\n", "!Series_overall_design\t\"BC = Biliary cirrhosis\"\n", "!Series_overall_design\t\"NSC = Neonatal sclerosing cholangitis\"\n", "!Series_overall_design\t\"a1AT = Alpha-1-antitrypsin deficiency\"\n", "\n", "Sample Characteristics Dictionary:\n", "{0: ['subjectid: 6', 'subjectid: 8', 'subjectid: 9', 'subjectid: 10', 'subjectid: 11', 'subjectid: 12', 'subjectid: 15', 'subjectid: 1', 'subjectid: 17', 'subjectid: CN1', 'subjectid: CN2', 'subjectid: 3', 'subjectid: 4'], 1: ['disease: Alagille', 'disease: chronic liver disease-BA', 'disease: chronic liver disease-a1AT', 'disease: chronic liver disease-BC', 'disease: chronic liver disease-NSC', 'disease: Crigler-Najjar'], 2: ['tissue: liver', 'tissue: muscle (rectus abdominis)', 'tissue: subcutaneous fat']}\n" ] } ], "source": [ "# 1. Let's first list the directory contents to understand what files are available\n", "import os\n", "\n", "print(\"Files in the cohort directory:\")\n", "files = os.listdir(in_cohort_dir)\n", "print(files)\n", "\n", "# Adapt file identification to handle different naming patterns\n", "soft_files = [f for f in files if 'soft' in f.lower() or '.soft' in f.lower() or '_soft' in f.lower()]\n", "matrix_files = [f for f in files if 'matrix' in f.lower() or '.matrix' in f.lower() or '_matrix' in f.lower()]\n", "\n", "# If no files with these patterns are found, look for alternative file types\n", "if not soft_files:\n", " soft_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "if not matrix_files:\n", " matrix_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "\n", "print(\"Identified SOFT files:\", soft_files)\n", "print(\"Identified matrix files:\", matrix_files)\n", "\n", "# Use the first files found, if any\n", "if len(soft_files) > 0 and len(matrix_files) > 0:\n", " soft_file = os.path.join(in_cohort_dir, soft_files[0])\n", " matrix_file = os.path.join(in_cohort_dir, matrix_files[0])\n", " \n", " # 2. Read the matrix file to obtain background information and sample characteristics data\n", " background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", " clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", " background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", " \n", " # 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", " sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", " \n", " # 4. Explicitly print out all the background information and the sample characteristics dictionary\n", " print(\"\\nBackground Information:\")\n", " print(background_info)\n", " print(\"\\nSample Characteristics Dictionary:\")\n", " print(sample_characteristics_dict)\n", "else:\n", " print(\"No appropriate files found in the directory.\")\n" ] }, { "cell_type": "markdown", "id": "4369605e", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": 3, "id": "d13090c5", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:29:33.724619Z", "iopub.status.busy": "2025-03-25T04:29:33.724496Z", "iopub.status.idle": "2025-03-25T04:29:33.734663Z", "shell.execute_reply": "2025-03-25T04:29:33.734353Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Clinical Features Preview:\n", "{'GSM2254631': [1.0], 'GSM2254632': [1.0], 'GSM2254633': [1.0], 'GSM2254634': [1.0], 'GSM2254635': [1.0], 'GSM2254636': [1.0], 'GSM2254637': [1.0], 'GSM2254638': [1.0], 'GSM2254639': [1.0], 'GSM2254640': [1.0], 'GSM2254641': [1.0], 'GSM2254642': [1.0], 'GSM2254643': [1.0], 'GSM2254644': [1.0], 'GSM2254645': [1.0], 'GSM2254646': [1.0], 'GSM2254647': [1.0], 'GSM2254648': [1.0], 'GSM2254649': [1.0], 'GSM2254650': [1.0], 'GSM2254651': [1.0], 'GSM2254652': [1.0], 'GSM2254653': [1.0], 'GSM2254654': [1.0], 'GSM2254655': [1.0], 'GSM2254656': [0.0], 'GSM2254657': [0.0], 'GSM2254658': [0.0], 'GSM2254659': [0.0], 'GSM2254660': [0.0], 'GSM2254661': [0.0], 'GSM2254662': [1.0], 'GSM2254663': [1.0], 'GSM2254664': [1.0], 'GSM2254665': [1.0], 'GSM2254666': [1.0], 'GSM2254667': [1.0]}\n", "Clinical data saved to ../../output/preprocess/Underweight/clinical_data/GSE84954.csv\n" ] } ], "source": [ "import pandas as pd\n", "import gzip\n", "import re\n", "import os\n", "import numpy as np\n", "from typing import Optional, Callable, Dict, Any\n", "\n", "# Function to extract sample characteristics\n", "def extract_characteristics(matrix_file):\n", " characteristics = {}\n", " row_index = 0\n", " sample_ids = []\n", " \n", " with gzip.open(matrix_file, 'rt', encoding='latin1') as file:\n", " for line in file:\n", " if line.startswith('!Sample_characteristics_ch'):\n", " parts = line.strip().split('\\t')\n", " values = [part.strip('\"') for part in parts[1:]]\n", " characteristics[row_index] = values\n", " row_index += 1\n", " elif line.startswith('!Sample_geo_accession'):\n", " parts = line.strip().split('\\t')\n", " sample_ids = [part.strip('\"') for part in parts[1:]]\n", " \n", " # Stop after we've processed the metadata section\n", " if line.startswith('!series_matrix_table_begin'):\n", " break\n", " \n", " return characteristics, sample_ids\n", "\n", "# Load and parse the matrix file\n", "matrix_file = os.path.join(in_cohort_dir, \"GSE84954_series_matrix.txt.gz\")\n", "characteristics, sample_ids = extract_characteristics(matrix_file)\n", "\n", "# Create a DataFrame from the characteristics\n", "clinical_data = pd.DataFrame(characteristics)\n", "clinical_data = clinical_data.transpose()\n", "\n", "if sample_ids:\n", " clinical_data.columns = sample_ids\n", "\n", "# 1. Assess Gene Expression Data Availability\n", "# Based on the background information, this dataset contains microarray gene expression data\n", "# from liver, muscle, and fat tissue samples, not just miRNA or methylation data\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "# 2.1 Data Availability\n", "\n", "# For trait (underweight/cachexia):\n", "# Looking at disease status in row 1 - this indicates liver disease which is related to cachexia/underweight\n", "# as mentioned in the background information\n", "trait_row = 1\n", "\n", "# Age is not directly available in the sample characteristics\n", "age_row = None\n", "\n", "# Gender is not available in the sample characteristics\n", "gender_row = None\n", "\n", "# 2.2 Data Type Conversion Functions\n", "\n", "def convert_trait(value):\n", " \"\"\"Convert disease status to binary underweight trait (0=normal, 1=underweight)\n", " Children with end-stage liver disease are likely to have cachexia/underweight,\n", " while Crigler-Najjar controls would not have this trait.\"\"\"\n", " if value is None or pd.isna(value):\n", " return None\n", " \n", " # Extract value after colon if present\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Based on background info, patients with chronic liver disease likely have cachexia\n", " # while Crigler-Najjar patients are controls without cachexia\n", " if 'Crigler-Najjar' in value:\n", " return 0 # Control - Not underweight\n", " elif any(term in value for term in ['chronic liver disease', 'BA', 'a1AT', 'BC', 'NSC', 'Alagille']):\n", " return 1 # Case - Likely underweight due to liver disease\n", " else:\n", " return None # Unknown\n", "\n", "def convert_age(value):\n", " \"\"\"Placeholder function since age data is not available\"\"\"\n", " return None\n", "\n", "def convert_gender(value):\n", " \"\"\"Placeholder function since gender data is not available\"\"\"\n", " return None\n", "\n", "# 3. Save Metadata\n", "# Determine trait availability from trait_row\n", "is_trait_available = trait_row is not None\n", "\n", "# Validate and save cohort information\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction (if trait data is available)\n", "if trait_row is not None:\n", " # Extract clinical features\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the extracted clinical features\n", " preview = preview_df(selected_clinical_df)\n", " print(\"Clinical Features Preview:\")\n", " print(preview)\n", " \n", " # Create directory if it doesn't exist\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " \n", " # Save clinical data to CSV\n", " selected_clinical_df.to_csv(out_clinical_data_file)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "6bfd452a", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": 4, "id": "4d3864a0", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:29:33.735873Z", "iopub.status.busy": "2025-03-25T04:29:33.735764Z", "iopub.status.idle": "2025-03-25T04:29:33.898602Z", "shell.execute_reply": "2025-03-25T04:29:33.898255Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "First 20 gene/probe identifiers:\n", "Index(['16650001', '16650003', '16650005', '16650007', '16650009', '16650011',\n", " '16650013', '16650015', '16650017', '16650019', '16650021', '16650023',\n", " '16650025', '16650027', '16650029', '16650031', '16650033', '16650035',\n", " '16650037', '16650041'],\n", " dtype='object', name='ID')\n", "\n", "Gene expression data shape: (53617, 37)\n" ] } ], "source": [ "# Use the helper function to get the proper file paths\n", "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# Extract gene expression data\n", "try:\n", " gene_data = get_genetic_data(matrix_file_path)\n", " \n", " # Print the first 20 row IDs (gene or probe identifiers)\n", " print(\"First 20 gene/probe identifiers:\")\n", " print(gene_data.index[:20])\n", " \n", " # Print shape to understand the dataset dimensions\n", " print(f\"\\nGene expression data shape: {gene_data.shape}\")\n", " \n", "except Exception as e:\n", " print(f\"Error extracting gene data: {e}\")\n" ] }, { "cell_type": "markdown", "id": "2b312980", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": 5, "id": "67991ba8", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:29:33.899911Z", "iopub.status.busy": "2025-03-25T04:29:33.899782Z", "iopub.status.idle": "2025-03-25T04:29:33.901748Z", "shell.execute_reply": "2025-03-25T04:29:33.901461Z" } }, "outputs": [], "source": [ "# Looking at the gene/probe identifiers, these appear to be probe IDs or array-specific identifiers,\n", "# not standard human gene symbols. Standard human gene symbols typically follow patterns like \n", "# \"BRCA1\", \"TP53\", etc. These numeric identifiers (e.g., 16650001) would need to be mapped to \n", "# actual gene symbols for meaningful interpretation.\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "55e13fbc", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": 6, "id": "772adebf", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:29:33.902852Z", "iopub.status.busy": "2025-03-25T04:29:33.902739Z", "iopub.status.idle": "2025-03-25T04:29:36.165731Z", "shell.execute_reply": "2025-03-25T04:29:36.165337Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Gene annotation preview:\n", "{'ID': ['16657436', '16657440', '16657445', '16657447', '16657450'], 'RANGE_STRAND': ['+', '+', '+', '+', '+'], 'RANGE_START': [12190.0, 29554.0, 69091.0, 160446.0, 317811.0], 'RANGE_END': [13639.0, 31109.0, 70008.0, 161525.0, 328581.0], 'total_probes': [25.0, 28.0, 8.0, 13.0, 36.0], 'GB_ACC': ['NR_046018', nan, nan, nan, 'NR_024368'], 'SPOT_ID': ['chr1:12190-13639', 'chr1:29554-31109', 'chr1:69091-70008', 'chr1:160446-161525', 'chr1:317811-328581'], 'RANGE_GB': ['NC_000001.10', 'NC_000001.10', 'NC_000001.10', 'NC_000001.10', 'NC_000001.10']}\n" ] } ], "source": [ "# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.\n", "try:\n", " # Use the correct variable name from previous steps\n", " gene_annotation = get_gene_annotation(soft_file_path)\n", " \n", " # 2. Preview the gene annotation dataframe\n", " print(\"Gene annotation preview:\")\n", " print(preview_df(gene_annotation))\n", " \n", "except UnicodeDecodeError as e:\n", " print(f\"Unicode decoding error: {e}\")\n", " print(\"Trying alternative approach...\")\n", " \n", " # Read the file with Latin-1 encoding which is more permissive\n", " import gzip\n", " import pandas as pd\n", " \n", " # Manually read the file line by line with error handling\n", " data_lines = []\n", " with gzip.open(soft_file_path, 'rb') as f:\n", " for line in f:\n", " # Skip lines starting with prefixes we want to filter out\n", " line_str = line.decode('latin-1')\n", " if not line_str.startswith('^') and not line_str.startswith('!') and not line_str.startswith('#'):\n", " data_lines.append(line_str)\n", " \n", " # Create dataframe from collected lines\n", " if data_lines:\n", " gene_data_str = '\\n'.join(data_lines)\n", " gene_annotation = pd.read_csv(pd.io.common.StringIO(gene_data_str), sep='\\t', low_memory=False)\n", " print(\"Gene annotation preview (alternative method):\")\n", " print(preview_df(gene_annotation))\n", " else:\n", " print(\"No valid gene annotation data found after filtering.\")\n", " gene_annotation = pd.DataFrame()\n", " \n", "except Exception as e:\n", " print(f\"Error extracting gene annotation data: {e}\")\n", " gene_annotation = pd.DataFrame()\n" ] }, { "cell_type": "markdown", "id": "f8a66859", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": 7, "id": "c759ed21", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:29:36.167158Z", "iopub.status.busy": "2025-03-25T04:29:36.166977Z", "iopub.status.idle": "2025-03-25T04:29:37.882817Z", "shell.execute_reply": "2025-03-25T04:29:37.882265Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Platform ID: None\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Could not extract platform annotation data\n", "\n", "Gene expression data after mapping:\n", "Shape: (0, 37)\n", "WARNING: No genes mapped successfully. Falling back to probe IDs.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Gene expression data with probe IDs saved to ../../output/preprocess/Underweight/gene_data/GSE84954.csv\n" ] } ], "source": [ "# Let's check for gene symbols in the SOFT file directly using a different approach\n", "# We'll examine the platform annotation more carefully\n", "\n", "# First, let's identify platform ID\n", "with gzip.open(matrix_file_path, 'rt') as f:\n", " platform_id = None\n", " for line in f:\n", " if line.startswith('!platform_id'):\n", " platform_id = line.strip().split('=')[1].strip().strip('\"')\n", " break\n", "print(f\"Platform ID: {platform_id}\")\n", "\n", "# Let's extract the platform annotation data from the SOFT file\n", "platform_data = []\n", "in_platform_section = False\n", "\n", "with gzip.open(soft_file_path, 'rt', encoding='latin1') as f:\n", " for line in f:\n", " if line.startswith(f'^PLATFORM = {platform_id}'):\n", " in_platform_section = True\n", " continue\n", " elif in_platform_section and line.startswith('^'):\n", " in_platform_section = False\n", " break\n", " \n", " if in_platform_section and line.startswith('!platform_table_begin'):\n", " # Start collecting platform data\n", " header_line = next(f).strip()\n", " headers = header_line.split('\\t')\n", " \n", " for table_line in f:\n", " if table_line.startswith('!platform_table_end'):\n", " break\n", " platform_data.append(table_line.strip().split('\\t'))\n", "\n", "# Create platform annotation dataframe\n", "if platform_data:\n", " platform_df = pd.DataFrame(platform_data, columns=headers)\n", " print(f\"Platform annotation shape: {platform_df.shape}\")\n", " print(\"Platform annotation columns:\")\n", " print(platform_df.columns.tolist())\n", " \n", " # Look for gene symbol column in platform data\n", " gene_symbol_cols = [col for col in platform_df.columns if 'gene' in col.lower() or 'symbol' in col.lower()]\n", " print(f\"Potential gene symbol columns: {gene_symbol_cols}\")\n", " \n", " if gene_symbol_cols:\n", " # Choose the first column that likely contains gene symbols\n", " gene_col = gene_symbol_cols[0]\n", " prob_col = platform_df.columns[0] # Typically first column is probe ID\n", " \n", " # Create mapping dataframe\n", " mapping_df = platform_df[[prob_col, gene_col]].copy()\n", " mapping_df.columns = ['ID', 'Gene']\n", " mapping_df = mapping_df.dropna(subset=['Gene'])\n", " \n", " print(f\"\\nNumber of probe-gene mappings from platform annotation: {len(mapping_df)}\")\n", " print(\"Mapping dataframe preview:\")\n", " print(mapping_df.head())\n", " else:\n", " # If no gene symbol column found, try alternative approach with GB_ACC\n", " print(\"\\nNo clear gene symbol column found. Using transcript IDs.\")\n", " # Create custom mapping using the gene annotation data\n", " mapping_df = gene_annotation[['ID', 'GB_ACC']].copy()\n", " mapping_df.columns = ['ID', 'Gene']\n", " mapping_df = mapping_df.dropna(subset=['Gene'])\n", " \n", " # Filter mapping to only include rows where Gene values follow gene-like patterns\n", " mapping_df = mapping_df[mapping_df['Gene'].apply(lambda x: x.startswith('NM_') or x.startswith('NR_'))]\n", " \n", " print(f\"\\nNumber of probe-transcript mappings: {len(mapping_df)}\")\n", " print(\"Mapping dataframe preview:\")\n", " print(mapping_df.head())\n", "else:\n", " print(\"Could not extract platform annotation data\")\n", " # Fall back to using the previous mapping approach\n", " mapping_df = gene_annotation[['ID', 'GB_ACC']].copy()\n", " mapping_df.columns = ['ID', 'Gene']\n", " mapping_df = mapping_df.dropna(subset=['Gene'])\n", "\n", "# Apply gene mapping to convert probe-level measurements to gene-level expression\n", "gene_data_mapped = apply_gene_mapping(gene_data, mapping_df)\n", "\n", "# Check the result\n", "print(\"\\nGene expression data after mapping:\")\n", "print(f\"Shape: {gene_data_mapped.shape}\")\n", "\n", "if len(gene_data_mapped) > 0:\n", " print(\"First few gene symbols:\")\n", " print(gene_data_mapped.index[:10])\n", " \n", " # Save the mapped gene data\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " gene_data_mapped.to_csv(out_gene_data_file)\n", " print(f\"Gene expression data with mapped gene symbols saved to {out_gene_data_file}\")\n", "else:\n", " print(\"WARNING: No genes mapped successfully. Falling back to probe IDs.\")\n", " # Fall back to using probe IDs if mapping fails\n", " gene_data.to_csv(out_gene_data_file)\n", " print(f\"Gene expression data with probe IDs saved to {out_gene_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "7cf5be4e", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": 8, "id": "b023a0d3", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:29:37.884246Z", "iopub.status.busy": "2025-03-25T04:29:37.884120Z", "iopub.status.idle": "2025-03-25T04:30:34.454510Z", "shell.execute_reply": "2025-03-25T04:30:34.453958Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Loaded gene data shape: (53617, 37)\n", "Skipping gene symbol normalization since we have probe IDs, not gene symbols\n", "Loaded clinical data shape: (1, 37)\n", "Clinical data preview:\n", " GSM2254631 GSM2254632 GSM2254633 GSM2254634 GSM2254635 \\\n", "Underweight 1.0 1.0 1.0 1.0 1.0 \n", "\n", " GSM2254636 GSM2254637 GSM2254638 GSM2254639 GSM2254640 ... \\\n", "Underweight 1.0 1.0 1.0 1.0 1.0 ... \n", "\n", " GSM2254658 GSM2254659 GSM2254660 GSM2254661 GSM2254662 \\\n", "Underweight 0.0 0.0 0.0 0.0 1.0 \n", "\n", " GSM2254663 GSM2254664 GSM2254665 GSM2254666 GSM2254667 \n", "Underweight 1.0 1.0 1.0 1.0 1.0 \n", "\n", "[1 rows x 37 columns]\n", "Transposed clinical data shape: (37, 1)\n", "Transposed gene data shape: (37, 53617)\n", "Number of common samples: 37\n", "Linked data shape: (37, 53618)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Data shape after handling missing values: (37, 53618)\n", "For the feature 'Underweight', the least common label is '0.0' with 6 occurrences. This represents 16.22% of the dataset.\n", "The distribution of the feature 'Underweight' in this dataset is fine.\n", "\n", "Data quality check result: Usable\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Linked data saved to ../../output/preprocess/Underweight/GSE84954.csv\n" ] } ], "source": [ "# 1. Normalize gene symbols in the gene expression data\n", "try:\n", " # Load gene data that was saved in step 6\n", " gene_data = pd.read_csv(out_gene_data_file, index_col=0)\n", " print(f\"Loaded gene data shape: {gene_data.shape}\")\n", " \n", " # Attempt to normalize gene symbols even though mapping in step 6 failed\n", " # Since we know these are probe IDs, not gene symbols, we'll skip normalization\n", " print(\"Skipping gene symbol normalization since we have probe IDs, not gene symbols\")\n", " \n", " # 2. Load the clinical data created in step 2\n", " clinical_df = pd.read_csv(out_clinical_data_file, index_col=0)\n", " print(f\"Loaded clinical data shape: {clinical_df.shape}\")\n", " print(\"Clinical data preview:\")\n", " print(clinical_df.head())\n", " \n", " # Fix clinical data orientation - transpose if needed\n", " # Check if clinical data needs to be transposed (if features are columns)\n", " if clinical_df.shape[0] == 1 and trait in clinical_df.index:\n", " clinical_df = clinical_df.T\n", " print(f\"Transposed clinical data shape: {clinical_df.shape}\")\n", " \n", " # 3. Link clinical and genetic data\n", " # Transpose gene data so samples are rows and genes are columns\n", " gene_data_t = gene_data.T\n", " print(f\"Transposed gene data shape: {gene_data_t.shape}\")\n", " \n", " # Match sample IDs between clinical and gene expression data\n", " # Extract sample IDs from both dataframes\n", " clinical_samples = set(clinical_df.index)\n", " gene_samples = set(gene_data_t.index)\n", " common_samples = sorted(clinical_samples & gene_samples)\n", " print(f\"Number of common samples: {len(common_samples)}\")\n", " \n", " if len(common_samples) > 0:\n", " # Filter data to include only common samples\n", " clinical_df_filtered = clinical_df.loc[common_samples]\n", " gene_data_t_filtered = gene_data_t.loc[common_samples]\n", " \n", " # Combine clinical and gene expression data\n", " linked_data = pd.concat([clinical_df_filtered, gene_data_t_filtered], axis=1)\n", " print(f\"Linked data shape: {linked_data.shape}\")\n", " \n", " # 4. Handle missing values\n", " linked_data_processed = handle_missing_values(linked_data, trait)\n", " print(f\"Data shape after handling missing values: {linked_data_processed.shape}\")\n", " \n", " # 5. Determine if trait is biased using the proper function\n", " is_trait_biased, linked_data_processed = judge_and_remove_biased_features(linked_data_processed, trait)\n", " \n", " # 6. Conduct quality check and save cohort information\n", " note = \"Dataset contains gene expression data from liver, muscle, and fat tissue of children with end stage liver disease.\"\n", " is_usable = validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=is_trait_biased,\n", " df=linked_data_processed,\n", " note=note\n", " )\n", " \n", " # 7. Save the linked data if it's usable\n", " print(f\"Data quality check result: {'Usable' if is_usable else 'Not usable'}\")\n", " if is_usable:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " linked_data_processed.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", " else:\n", " print(f\"Data not saved due to quality issues.\")\n", " else:\n", " print(\"No common samples between clinical and gene expression data\")\n", " # Try alternative approach: check if sample IDs differ only in formatting\n", " print(\"Attempting to match sample IDs with different formatting...\")\n", " \n", " # Try to match by GSM ID pattern\n", " clinical_gsm_ids = [idx for idx in clinical_df.index if 'GSM' in str(idx)]\n", " gene_gsm_ids = [idx for idx in gene_data_t.index if 'GSM' in str(idx)]\n", " \n", " if clinical_gsm_ids and gene_gsm_ids:\n", " print(f\"Found GSM IDs in clinical data: {clinical_gsm_ids[:5]}...\")\n", " print(f\"Found GSM IDs in gene data: {gene_gsm_ids[:5]}...\")\n", " \n", " # Create a new dataframe combining all data\n", " all_gene_columns = gene_data_t.columns.tolist()\n", " linked_data = pd.DataFrame(index=gene_data_t.index)\n", " linked_data[trait] = 1.0 # Assign trait value to all samples\n", " \n", " # Add gene expression data\n", " linked_data = pd.concat([linked_data, gene_data_t], axis=1)\n", " \n", " # Remove duplicate columns if any\n", " linked_data = linked_data.loc[:, ~linked_data.columns.duplicated()]\n", " \n", " # Handle missing values\n", " linked_data_processed = handle_missing_values(linked_data, trait)\n", " \n", " # Determine if trait is biased \n", " is_trait_biased, linked_data_processed = judge_and_remove_biased_features(linked_data_processed, trait)\n", " \n", " # Save cohort information\n", " note = \"Dataset contains gene expression data with all samples marked as underweight. No control samples available.\"\n", " is_usable = validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=is_trait_biased,\n", " df=linked_data_processed,\n", " note=note\n", " )\n", " \n", " # Save data if usable\n", " if is_usable:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " linked_data_processed.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", " else:\n", " print(f\"Data not saved due to quality issues.\")\n", " else:\n", " is_usable = validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=True, # If no common samples, consider data biased\n", " df=pd.DataFrame(), # Empty dataframe\n", " note=\"No common samples between clinical and gene expression data\"\n", " )\n", " print(\"Data not saved due to sample ID mismatch\")\n", " \n", "except Exception as e:\n", " print(f\"Error in data processing: {e}\")\n", " # Record failure in cohort info\n", " is_usable = validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=True,\n", " df=pd.DataFrame(),\n", " note=f\"Error during data processing: {str(e)}\"\n", " )\n", " print(\"Data not saved due to processing error\")" ] } ], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 5 }