{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "b59e24fd", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T05:29:52.810334Z", "iopub.status.busy": "2025-03-25T05:29:52.810107Z", "iopub.status.idle": "2025-03-25T05:29:52.980004Z", "shell.execute_reply": "2025-03-25T05:29:52.979567Z" } }, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Heart_rate\"\n", "cohort = \"GSE18583\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Heart_rate\"\n", "in_cohort_dir = \"../../input/GEO/Heart_rate/GSE18583\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Heart_rate/GSE18583.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Heart_rate/gene_data/GSE18583.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Heart_rate/clinical_data/GSE18583.csv\"\n", "json_path = \"../../output/preprocess/Heart_rate/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "b4ecfc8e", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": 2, "id": "f951aa94", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T05:29:52.981338Z", "iopub.status.busy": "2025-03-25T05:29:52.981185Z", "iopub.status.idle": "2025-03-25T05:29:53.277423Z", "shell.execute_reply": "2025-03-25T05:29:53.276679Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Background Information:\n", "!Series_title\t\"Baseline skeletal muscle gene expression\"\n", "!Series_summary\t\"Muscle biopsy samples were obtained from two groups of male subjects prior to endurance training. The samples were used to predict training responses.\"\n", "!Series_summary\t\"Baseline gene expression involving 30 probe sets was able to classify subjects into high and low responders.\"\n", "!Series_overall_design\t\"Resting skeletal muscle sample after an overnight fast.\"\n", "Sample Characteristics Dictionary:\n", "{0: ['gender: male'], 1: ['protocol: Resting skeletal muscle sample prior to endurance training'], 2: ['heart rate (bpm): 173', 'heart rate (bpm): 155', 'heart rate (bpm): 183', 'heart rate (bpm): 149', 'heart rate (bpm): 146', 'heart rate (bpm): 157', 'heart rate (bpm): 162', 'heart rate (bpm): 170', 'heart rate (bpm): 165', 'heart rate (bpm): 144', 'heart rate (bpm): 167', 'heart rate (bpm): 191', 'heart rate (bpm): 160', 'heart rate (bpm): 177', 'heart rate (bpm): 174', 'heart rate (bpm): 190', 'heart rate (bpm): 169', nan], 3: ['vo2 (l/min): 2.98', 'vo2 (l/min): 1.94', 'vo2 (l/min): 2.99', 'vo2 (l/min): 2.53', 'vo2 (l/min): 2.8', 'vo2 (l/min): 2.42', 'vo2 (l/min): 3.3', 'vo2 (l/min): 2.688', 'vo2 (l/min): 1.68', 'vo2 (l/min): 2.33', 'vo2 (l/min): 2.63', 'vo2 (l/min): 2.9', 'vo2 (l/min): 2.38', 'vo2 (l/min): 2.59', 'vo2 (l/min): 2.79', 'vo2 (l/min): 2.2', 'vo2 (l/min): 2.015', 'vo2 (l/min): 2.854', 'vo2 (l/min): 3.21', 'vo2 (l/min): 2.15', 'vo2 (l/min): 3.63', 'vo2 (l/min): 3.01', 'vo2 (l/min): 1.62', nan], 4: ['rer: 0.96', 'rer: 0.99', 'rer: 1.01', 'rer: 0.98', 'rer: 1.09', 'rer: 1.24', 'rer: 1.18', 'rer: 1.05', 'rer: 0.9', 'rer: 0.97', 'rer: 1.02', 'rer: 1.04', 'rer: 0.95', 'rer: 1', 'rer: 1.07', nan], 5: ['ve (l/min): 72.5', 've (l/min): 62.9', 've (l/min): 89.8', 've (l/min): 54.2', 've (l/min): 63.5', 've (l/min): 69', 've (l/min): 76.5', 've (l/min): 93.9', 've (l/min): 64', 've (l/min): 62.2', 've (l/min): 56.5', 've (l/min): 71.3', 've (l/min): 58.1', 've (l/min): 71.4', 've (l/min): 75.6', 've (l/min): 80.9', 've (l/min): 46.6', 've (l/min): 73', 've (l/min): 70.7', 've (l/min): 75.8', 've (l/min): 122', 've (l/min): 86.9', 've (l/min): 48.3', nan], 6: ['duration (mins): 15.165', 'duration (mins): 11.415', 'duration (mins): 14.5', 'duration (mins): 16.83', 'duration (mins): 20.5', 'duration (mins): 14.33', 'duration (mins): 19.5', 'duration (mins): 13.83', 'duration (mins): 11.875', 'duration (mins): 18.25', 'duration (mins): 14.25', 'duration (mins): 16.165', 'duration (mins): 14.415', 'duration (mins): 16.25', 'duration (mins): 16.5', 'duration (mins): 15', 'duration (mins): 18', 'duration (mins): 10.25', 'duration (mins): 13', nan], 7: ['max work (watts): 300', 'max work (watts): 240', 'max work (watts): 280', 'max work (watts): 330', 'max work (watts): 420', 'max work (watts): 290', 'max work (watts): 400', 'max work (watts): 380', 'max work (watts): 320', 'max work (watts): 340', 'max work (watts): 360', 'max work (watts): 210', 'max work (watts): 310', 'max work (watts): 260', nan], 8: ['end borg: 19', 'end borg: 17.5', 'end borg: 18.5', 'end borg: 19.5', 'end borg: 20', 'end borg: 18', nan], 9: ['end hr (bpm): 190', 'end hr (bpm): 182', 'end hr (bpm): 197', 'end hr (bpm): 181', 'end hr (bpm): 187.5', 'end hr (bpm): 197.5', 'end hr (bpm): 196', 'end hr (bpm): 210', 'end hr (bpm): 185', 'end hr (bpm): 194', 'end hr (bpm): 201', 'end hr (bpm): 178.5', 'end hr (bpm): 199.5', 'end hr (bpm): 202', 'end hr (bpm): 193.5', 'end hr (bpm): 198', 'end hr (bpm): 195', 'end hr (bpm): 179.5', nan], 10: ['vo2 end (l/min): 3.885', 'vo2 end (l/min): 2.84', 'vo2 end (l/min): 4.02', 'vo2 end (l/min): 3.81', 'vo2 end (l/min): 4.505', 'vo2 end (l/min): 3.445', 'vo2 end (l/min): 4.6', 'vo2 end (l/min): 3.64', 'vo2 end (l/min): 2.61', 'vo2 end (l/min): 4.31', 'vo2 end (l/min): 3.34', 'vo2 end (l/min): 3.9', 'vo2 end (l/min): 3.61', 'vo2 end (l/min): 3.955', 'vo2 end (l/min): 4.035', 'vo2 end (l/min): 3.57', 'vo2 end (l/min): 3.255', 'vo2 end (l/min): 3.775', 'vo2 end (l/min): 3.625', 'vo2 end (l/min): 4.375', 'vo2 end (l/min): 2.565', 'vo2 end (l/min): 4.19', 'vo2 end (l/min): 4.005', 'vo2 end (l/min): 3.115', nan], 11: ['body mass: 106', 'body mass: 63', 'body mass: 83', 'body mass: 78.5', 'body mass: 79', 'body mass: 69', 'body mass: 85.5', 'body mass: 74', 'body mass: 55.5', 'body mass: 91', 'body mass: 83.5', 'body mass: 74.6', 'body mass: 75.5', 'body mass: 69.5', 'body mass: 67.5', 'body mass: 66', 'body mass: 64.5', 'body mass: 82', 'body mass: 80', 'body mass: 60.5', 'body mass: 77.5', 'body mass: 84.5', nan], 12: ['vo2max per kg: 36.6509433962264', 'vo2max per kg: 45.0793650793651', 'vo2max per kg: 48.433734939759', 'vo2max per kg: 48.5350318471338', 'vo2max per kg: 57.0253164556962', 'vo2max per kg: 49.9275362318841', 'vo2max per kg: 53.8011695906433', 'vo2max per kg: 49.1891891891892', 'vo2max per kg: 47.027027027027', 'vo2max per kg: 47.3626373626374', 'vo2max per kg: 40', 'vo2max per kg: 52.2788203753351', 'vo2max per kg: 47.8145695364238', 'vo2max per kg: 56.9064748201439', 'vo2max per kg: 44.3406593406593', 'vo2max per kg: 52.8888888888889', 'vo2max per kg: 49.3181818181818', 'vo2max per kg: 58.5271317829457', 'vo2max per kg: 44.2073170731707', 'vo2max per kg: 54.6875', 'vo2max per kg: 42.396694214876', 'vo2max per kg: 54.0645161290323', 'vo2max per kg: 47.396449704142', 'vo2max per kg: 40.1935483870968', nan], 13: ['rer end: 1.19', 'rer end: 1.095', 'rer end: 1.155', 'rer end: 1.235', 'rer end: 1.165', 'rer end: 1.175', 'rer end: 1.285', 'rer end: 1.415', 'rer end: 1.3', 'rer end: 1.215', 'rer end: 1.15', 'rer end: 1.2', 'rer end: 1.22', 'rer end: 1.205', 'rer end: 1.28', 'rer end: 1.23', 'rer end: 1.145', 'rer end: 1.245', 'rer end: 1.13', nan], 14: ['ve end (l/min): 134.9', 've end (l/min): 90.2', 've end (l/min): 159.25', 've end (l/min): 129.45', 've end (l/min): 168.85', 've end (l/min): 122.3', 've end (l/min): 143.2', 've end (l/min): 151.85', 've end (l/min): 178.9', 've end (l/min): 96.8', 've end (l/min): 135.1', 've end (l/min): 122.1', 've end (l/min): 155.35', 've end (l/min): 138.75', 've end (l/min): 140.25', 've end (l/min): 137.85', 've end (l/min): 123.4', 've end (l/min): 137.2', 've end (l/min): 134.25', 've end (l/min): 92.55', 've end (l/min): 177.65', 've end (l/min): 131.25', 've end (l/min): 125.25', nan], 15: ['rr end (breaths/min): 48.5', 'rr end (breaths/min): 38.9', 'rr end (breaths/min): 50.75', 'rr end (breaths/min): 40.35', 'rr end (breaths/min): 50', 'rr end (breaths/min): 54', 'rr end (breaths/min): 37.15', 'rr end (breaths/min): 58.75', 'rr end (breaths/min): 58.35', 'rr end (breaths/min): 41.5', 'rr end (breaths/min): 40.7', 'rr end (breaths/min): 56.6', 'rr end (breaths/min): 58.5', 'rr end (breaths/min): 42.75', 'rr end (breaths/min): 53.35', 'rr end (breaths/min): 59.25', 'rr end (breaths/min): 44.25', 'rr end (breaths/min): 52.4', 'rr end (breaths/min): 47.25', 'rr end (breaths/min): 29.25', 'rr end (breaths/min): 44.9', 'rr end (breaths/min): 48.9', nan]}\n" ] } ], "source": [ "from tools.preprocess import *\n", "# 1. Identify the paths to the SOFT file and the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Read the matrix file to obtain background information and sample characteristics data\n", "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", "\n", "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n", "print(\"Background Information:\")\n", "print(background_info)\n", "print(\"Sample Characteristics Dictionary:\")\n", "print(sample_characteristics_dict)\n" ] }, { "cell_type": "markdown", "id": "fb8836b0", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": 3, "id": "7cab3e31", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T05:29:53.279961Z", "iopub.status.busy": "2025-03-25T05:29:53.279715Z", "iopub.status.idle": "2025-03-25T05:29:53.313907Z", "shell.execute_reply": "2025-03-25T05:29:53.313286Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Clinical data preview: {0: [nan], 1: [nan], 2: [183.0]}\n", "Clinical data saved to ../../output/preprocess/Heart_rate/clinical_data/GSE18583.csv\n" ] } ], "source": [ "import pandas as pd\n", "import numpy as np\n", "import os\n", "import json\n", "from typing import Optional, Callable, Dict, Any\n", "\n", "# 1. Gene Expression Data Availability\n", "# Based on the background information which mentions \"skeletal muscle gene expression\" \n", "# and \"30 probe sets\", this dataset likely contains gene expression data.\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "# 2.1 Data Availability\n", "# Heart rate data is available in row 2\n", "trait_row = 2\n", "# Age is not available in the sample characteristics\n", "age_row = None\n", "# Gender is available in row 0, but it's constant (all male)\n", "gender_row = None # Setting to None because it's constant (all male)\n", "\n", "# 2.2 Data Type Conversion\n", "def convert_trait(value):\n", " \"\"\"Convert heart rate values to continuous numeric values.\"\"\"\n", " if pd.isna(value):\n", " return None\n", " \n", " # Extract the numeric value after the colon\n", " try:\n", " # Extract from format like \"heart rate (bpm): 173\"\n", " heart_rate = float(value.split(': ')[1])\n", " return heart_rate\n", " except (IndexError, ValueError):\n", " return None\n", "\n", "def convert_age(value):\n", " \"\"\"Placeholder function for age conversion.\"\"\"\n", " return None # Age data not available\n", "\n", "def convert_gender(value):\n", " \"\"\"Placeholder function for gender conversion.\"\"\"\n", " return None # Gender data not available (all male)\n", "\n", "# 3. Save Metadata\n", "# Determine trait data availability\n", "is_trait_available = trait_row is not None\n", "# Conduct initial filtering on dataset usability\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "if trait_row is not None:\n", " # Load the clinical data (assuming it's available in a specific format)\n", " clinical_data_path = os.path.join(in_cohort_dir, \"clinical_data.csv\")\n", " \n", " # If clinical data is available in another format or location, adjust accordingly\n", " try:\n", " clinical_data = pd.read_csv(clinical_data_path)\n", " except FileNotFoundError:\n", " # If clinical_data.csv doesn't exist, try to find another file that might contain clinical data\n", " clinical_files = [f for f in os.listdir(in_cohort_dir) if f.endswith('.csv') and 'clinical' in f.lower()]\n", " if clinical_files:\n", " clinical_data = pd.read_csv(os.path.join(in_cohort_dir, clinical_files[0]))\n", " else:\n", " # If no clinical data file is found, create one from the sample characteristics dictionary\n", " sample_chars = {0: ['gender: male'], \n", " 1: ['protocol: Resting skeletal muscle sample prior to endurance training'], \n", " 2: ['heart rate (bpm): 173', 'heart rate (bpm): 155', 'heart rate (bpm): 183', 'heart rate (bpm): 149', \n", " 'heart rate (bpm): 146', 'heart rate (bpm): 157', 'heart rate (bpm): 162', 'heart rate (bpm): 170', \n", " 'heart rate (bpm): 165', 'heart rate (bpm): 144', 'heart rate (bpm): 167', 'heart rate (bpm): 191', \n", " 'heart rate (bpm): 160', 'heart rate (bpm): 177', 'heart rate (bpm): 174', 'heart rate (bpm): 190', \n", " 'heart rate (bpm): 169', np.nan]}\n", " \n", " # Convert to DataFrame format (assuming the structure is consistent)\n", " rows = []\n", " for i in range(max(len(vals) if isinstance(vals, list) else 0 for vals in sample_chars.values())):\n", " row = {}\n", " for key, vals in sample_chars.items():\n", " if i < len(vals):\n", " row[key] = vals[i]\n", " else:\n", " row[key] = np.nan\n", " rows.append(row)\n", " \n", " clinical_data = pd.DataFrame(rows)\n", " \n", " # Extract clinical features\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the extracted clinical data\n", " preview = preview_df(selected_clinical_df)\n", " print(\"Clinical data preview:\", preview)\n", " \n", " # Save the clinical data\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " selected_clinical_df.to_csv(out_clinical_data_file, index=False)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "41d813ff", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": 4, "id": "63e2e841", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T05:29:53.315692Z", "iopub.status.busy": "2025-03-25T05:29:53.315514Z", "iopub.status.idle": "2025-03-25T05:29:53.427296Z", "shell.execute_reply": "2025-03-25T05:29:53.426795Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found data marker at line 73\n", "Header line: \"ID_REF\"\t\"GSM462215\"\t\"GSM462216\"\t\"GSM462217\"\t\"GSM462218\"\t\"GSM462219\"\t\"GSM462220\"\t\"GSM462221\"\t\"GSM462222\"\t\"GSM462223\"\t\"GSM462224\"\t\"GSM462225\"\t\"GSM462226\"\t\"GSM462227\"\t\"GSM462228\"\t\"GSM462229\"\t\"GSM462230\"\t\"GSM462231\"\t\"GSM462232\"\t\"GSM462233\"\t\"GSM462234\"\t\"GSM462235\"\t\"GSM462236\"\t\"GSM462237\"\t\"GSM462238\"\t\"GSM462239\"\t\"GSM462240\"\t\"GSM462241\"\t\"GSM462242\"\t\"GSM462243\"\t\"GSM462244\"\t\"GSM462245\"\t\"GSM462246\"\t\"GSM462247\"\t\"GSM462248\"\t\"GSM462249\"\t\"GSM462250\"\t\"GSM462251\"\t\"GSM462252\"\t\"GSM462253\"\t\"GSM462254\"\t\"GSM462255\"\n", "First data line: \"ENST00000000233_at\"\t315.42\t450.67\t465.51\t876.96\t377.22\t550.95\t499.31\t918.83\t341.56\t367.81\t541.69\t446.74\t466.85\t342.56\t328.26\t414.94\t482.4\t400.64\t420.95\t521.42\t721.28\t303.8\t441.64\t551.9\t431.16\t719.8\t471\t434.85\t518.32\t475.11\t468.78\t608.02\t218.24\t384.56\t603.23\t553.91\t387.35\t481.86\t527.56\t214.81\t537.14\n", "Index(['ENST00000000233_at', 'ENST00000000412_at', 'ENST00000000442_at',\n", " 'ENST00000001008_at', 'ENST00000002125_at', 'ENST00000002165_at',\n", " 'ENST00000002501_at', 'ENST00000002829_at', 'ENST00000003100_at',\n", " 'ENST00000003302_at', 'ENST00000003583_at', 'ENST00000003607_at',\n", " 'ENST00000003912_at', 'ENST00000004531_at', 'ENST00000004921_at',\n", " 'ENST00000004980_at', 'ENST00000004982_at', 'ENST00000005082_at',\n", " 'ENST00000005178_at', 'ENST00000005198_at'],\n", " dtype='object', name='ID')\n" ] } ], "source": [ "# 1. Get the file paths for the SOFT file and matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. First, let's examine the structure of the matrix file to understand its format\n", "import gzip\n", "\n", "# Peek at the first few lines of the file to understand its structure\n", "with gzip.open(matrix_file, 'rt') as file:\n", " # Read first 100 lines to find the header structure\n", " for i, line in enumerate(file):\n", " if '!series_matrix_table_begin' in line:\n", " print(f\"Found data marker at line {i}\")\n", " # Read the next line which should be the header\n", " header_line = next(file)\n", " print(f\"Header line: {header_line.strip()}\")\n", " # And the first data line\n", " first_data_line = next(file)\n", " print(f\"First data line: {first_data_line.strip()}\")\n", " break\n", " if i > 100: # Limit search to first 100 lines\n", " print(\"Matrix table marker not found in first 100 lines\")\n", " break\n", "\n", "# 3. Now try to get the genetic data with better error handling\n", "try:\n", " gene_data = get_genetic_data(matrix_file)\n", " print(gene_data.index[:20])\n", "except KeyError as e:\n", " print(f\"KeyError: {e}\")\n", " \n", " # Alternative approach: manually extract the data\n", " print(\"\\nTrying alternative approach to read the gene data:\")\n", " with gzip.open(matrix_file, 'rt') as file:\n", " # Find the start of the data\n", " for line in file:\n", " if '!series_matrix_table_begin' in line:\n", " break\n", " \n", " # Read the headers and data\n", " import pandas as pd\n", " df = pd.read_csv(file, sep='\\t', index_col=0)\n", " print(f\"Column names: {df.columns[:5]}\")\n", " print(f\"First 20 row IDs: {df.index[:20]}\")\n", " gene_data = df\n" ] }, { "cell_type": "markdown", "id": "0ba3b7fa", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": 5, "id": "26f2bbb1", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T05:29:53.428665Z", "iopub.status.busy": "2025-03-25T05:29:53.428543Z", "iopub.status.idle": "2025-03-25T05:29:53.430610Z", "shell.execute_reply": "2025-03-25T05:29:53.430259Z" } }, "outputs": [], "source": [ "# Looking at the identifiers in the gene expression data\n", "# Identifiers like 'ENST00000000233_at' are Ensembl transcript IDs (ENST)\n", "# These are not standard human gene symbols (like BRCA1, TP53, etc.)\n", "# They need to be mapped to gene symbols for easier interpretation and analysis\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "5677c0a2", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": 6, "id": "888eaa5e", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T05:29:53.431950Z", "iopub.status.busy": "2025-03-25T05:29:53.431836Z", "iopub.status.idle": "2025-03-25T05:29:54.969325Z", "shell.execute_reply": "2025-03-25T05:29:54.968851Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Platform title: !Platform_title = Affymetrix GeneChip Human Genome U133 Plus 2.0 Array [HGU133Plus2_Hs_ENST (v10) CDF]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Initial gene annotation retrieval returned limited columns. Trying alternative approach...\n", "\n", "Gene annotation columns:\n", "['ID', 'SPOT_ID']\n", "\n", "Gene annotation preview:\n", "{'ID': ['ENST00000000233_at', 'ENST00000000412_at', 'ENST00000000442_at', 'ENST00000001008_at', 'ENST00000002125_at'], 'SPOT_ID': ['ENST00000000233', 'ENST00000000412', 'ENST00000000442', 'ENST00000001008', 'ENST00000002125']}\n", "\n", "Preparing mapping from Ensembl transcript IDs...\n", "Mapping structure preview:\n", "{'ID': ['ENST00000000233_at', 'ENST00000000412_at', 'ENST00000000442_at', 'ENST00000001008_at', 'ENST00000002125_at'], 'SPOT_ID': ['ENST00000000233', 'ENST00000000412', 'ENST00000000442', 'ENST00000001008', 'ENST00000002125']}\n" ] } ], "source": [ "# 1. Extract gene annotation data from the SOFT file\n", "try:\n", " # Let's first look for specific annotation information in the SOFT file\n", " annotation_info = []\n", " with gzip.open(soft_file, 'rt') as file:\n", " for line in file:\n", " if line.startswith('!Platform_title') or line.startswith('!platform_title'):\n", " print(f\"Platform title: {line.strip()}\")\n", " if line.startswith('!Platform_annotation') or line.startswith('!platform_annotation'):\n", " print(f\"Platform annotation: {line.strip()}\")\n", " if 'ANNOTATION information' in line or 'annotation information' in line:\n", " annotation_info.append(line.strip())\n", " \n", " if annotation_info:\n", " print(\"\\nFound annotation information:\")\n", " for info in annotation_info[:5]: # Limit to first 5 to avoid overwhelming output\n", " print(info)\n", " \n", " # Use the library function to extract gene annotation\n", " gene_annotation = get_gene_annotation(soft_file)\n", " \n", " # If the gene annotation is too limited, try a more specific approach\n", " if len(gene_annotation.columns) <= 2:\n", " print(\"\\nInitial gene annotation retrieval returned limited columns. Trying alternative approach...\")\n", " \n", " # Try to extract the platform section manually which contains gene annotation\n", " platform_data = []\n", " with gzip.open(soft_file, 'rt') as file:\n", " in_platform_section = False\n", " for line in file:\n", " if line.startswith('^PLATFORM'):\n", " in_platform_section = True\n", " continue\n", " if in_platform_section and line.startswith('!platform_table_begin'):\n", " # Skip the !platform_table_begin line\n", " # Next line should be the header\n", " header = next(file).strip()\n", " platform_data.append(header)\n", " # Read until the end of the platform table\n", " for table_line in file:\n", " if table_line.startswith('!platform_table_end'):\n", " break\n", " platform_data.append(table_line.strip())\n", " break\n", " \n", " # Convert platform data to DataFrame if we found it\n", " if platform_data:\n", " import io\n", " platform_text = '\\n'.join(platform_data)\n", " gene_annotation = pd.read_csv(io.StringIO(platform_text), delimiter='\\t', \n", " low_memory=False, on_bad_lines='skip')\n", " \n", " # 2. Preview the gene annotation dataframe\n", " print(\"\\nGene annotation columns:\")\n", " print(gene_annotation.columns.tolist())\n", " \n", " print(\"\\nGene annotation preview:\")\n", " print(preview_df(gene_annotation))\n", " \n", " # Since we only have Ensembl transcript IDs without gene symbols in the annotation,\n", " # we'll need to create a mapping structure using the SPOT_ID (Ensembl ID)\n", " # This will be used for mapping in subsequent steps\n", " print(\"\\nPreparing mapping from Ensembl transcript IDs...\")\n", " # Create a mapping DataFrame with ID and SPOT_ID (which is the Ensembl ID without _at)\n", " mapping_df = gene_annotation[['ID', 'SPOT_ID']].copy()\n", " print(\"Mapping structure preview:\")\n", " print(preview_df(mapping_df))\n", " \n", "except Exception as e:\n", " print(f\"Error processing gene annotation: {e}\")\n" ] }, { "cell_type": "markdown", "id": "46172194", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": 7, "id": "d192db25", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T05:29:54.970663Z", "iopub.status.busy": "2025-03-25T05:29:54.970540Z", "iopub.status.idle": "2025-03-25T05:30:01.900331Z", "shell.execute_reply": "2025-03-25T05:30:01.899576Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Gene mapping dataframe sample:\n", " ID Gene\n", "0 ENST00000000233_at ENST00000000233\n", "1 ENST00000000412_at ENST00000000412\n", "2 ENST00000000442_at ENST00000000442\n", "3 ENST00000001008_at ENST00000001008\n", "4 ENST00000002125_at ENST00000002125\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Gene expression data sample (after mapping):\n", "(225803, 41)\n", " GSM462215 GSM462216 GSM462217 GSM462218 GSM462219 GSM462220 \\\n", "Gene \n", "0.05 3.84 117.86 5.48 4.81 4.86 0.12 \n", "0.06 10.69 24.27 39.88 29.90 77.64 48.53 \n", "0.12 7.68 235.72 10.96 9.62 9.72 0.24 \n", "0.13 27.54 193.82 22.92 47.62 109.90 0.39 \n", "0.14 49.13 74.34 128.75 69.59 92.31 101.45 \n", "\n", " GSM462221 GSM462222 GSM462223 GSM462224 ... GSM462246 GSM462247 \\\n", "Gene ... \n", "0.05 0.25 103.00 17.13 6.13 ... 0.25 65.76 \n", "0.06 146.23 52.53 41.74 2.58 ... 2.55 92.85 \n", "0.12 0.50 206.00 34.26 12.26 ... 0.50 131.52 \n", "0.13 25.56 176.19 20.39 12.82 ... 165.57 128.90 \n", "0.14 81.02 75.10 58.00 109.47 ... 149.83 146.74 \n", "\n", " GSM462248 GSM462249 GSM462250 GSM462251 GSM462252 GSM462253 \\\n", "Gene \n", "0.05 17.02 33.84 0.12 0.15 15.53 6.44 \n", "0.06 13.85 3.26 138.48 1.57 6.40 4.07 \n", "0.12 34.04 67.68 0.24 0.30 31.06 12.88 \n", "0.13 105.25 129.51 44.66 4.91 74.38 66.12 \n", "0.14 127.75 142.53 85.66 75.15 117.17 51.60 \n", "\n", " GSM462254 GSM462255 \n", "Gene \n", "0.05 18.85 0.38 \n", "0.06 41.88 24.74 \n", "0.12 37.70 0.76 \n", "0.13 197.27 31.28 \n", "0.14 89.94 85.70 \n", "\n", "[5 rows x 41 columns]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Gene expression data saved to ../../output/preprocess/Heart_rate/gene_data/GSE18583.csv\n" ] } ], "source": [ "# 1. Looking at the gene identifiers and annotation data\n", "# From previous steps, we observed:\n", "# - Gene expression data has identifiers like ENST00000000233_at\n", "# - Gene annotation data has 'ID' and 'SPOT_ID' columns\n", "# - 'ID' column matches the gene expression data index (Ensembl transcript IDs with _at suffix)\n", "# - 'SPOT_ID' contains Ensembl transcript IDs without the _at suffix\n", "\n", "# Create a mapping dataframe with probe IDs and their corresponding Ensembl IDs\n", "gene_annotation = get_gene_annotation(soft_file)\n", "\n", "# Create a mapping dataframe using the provided annotation\n", "# We'll use the Ensembl IDs directly since we don't have gene symbols\n", "mapping_df = gene_annotation[['ID', 'SPOT_ID']].copy()\n", "mapping_df = mapping_df.rename(columns={'SPOT_ID': 'Gene'})\n", "\n", "# Print a sample of the mapping dataframe\n", "print(\"Gene mapping dataframe sample:\")\n", "print(mapping_df.head())\n", "\n", "# Since the apply_gene_mapping function expects string values in Gene column that can be extracted\n", "# with extract_human_gene_symbols, but we want to use the Ensembl IDs directly, \n", "# we'll modify our approach\n", "\n", "# First, get the gene expression data\n", "gene_data = get_genetic_data(matrix_file)\n", "\n", "# Create a simplified version of apply_gene_mapping for our Ensembl IDs\n", "# This will directly map probes to Ensembl IDs without symbol extraction\n", "def apply_direct_mapping(expression_df, mapping_df):\n", " # Make sure mapping_df has the necessary columns\n", " mapping_df = mapping_df[mapping_df['ID'].isin(expression_df.index)].copy()\n", " # Set 1-to-1 mapping as we're directly using the Ensembl IDs\n", " mapping_df['num_genes'] = 1\n", " mapping_df.set_index('ID', inplace=True)\n", " \n", " # Merge and distribute expression values\n", " merged_df = mapping_df.join(expression_df)\n", " expr_cols = [col for col in merged_df.columns if col not in ['Gene', 'num_genes']]\n", " \n", " # Group by Gene (Ensembl ID) and sum values (simple 1:1 mapping)\n", " gene_expression_df = merged_df.groupby('Gene')[expr_cols].sum()\n", " \n", " return gene_expression_df\n", "\n", "# Apply our direct mapping function\n", "gene_data = apply_direct_mapping(gene_data, mapping_df)\n", "\n", "# Print a sample of the resulting gene expression data\n", "print(\"\\nGene expression data sample (after mapping):\")\n", "print(gene_data.shape)\n", "print(gene_data.head())\n", "\n", "# Save the gene data to the specified output file\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"\\nGene expression data saved to {out_gene_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "cad9000d", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": 8, "id": "3bf7633f", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T05:30:01.901903Z", "iopub.status.busy": "2025-03-25T05:30:01.901760Z", "iopub.status.idle": "2025-03-25T05:38:48.774158Z", "shell.execute_reply": "2025-03-25T05:38:48.773635Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Clinical data saved to ../../output/preprocess/Heart_rate/clinical_data/GSE18583.csv\n", "Gene data shape: (225803, 41)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Gene data saved to ../../output/preprocess/Heart_rate/gene_data/GSE18583.csv\n", "Linked data shape: (41, 225805)\n", "Linked data columns preview: ['Heart_rate', 'Gender', '0.05', '0.06', '0.12', '0.13', '0.14', '0.15', '0.16', '0.17']\n", "\n", "Missing values before handling:\n", " Trait (Heart_rate) missing: 17 out of 41\n", " Genes with >20% missing: 0 out of 225803\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ " Samples with >5% missing genes: 0 out of 41\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Data shape after handling missing values: (24, 225805)\n", "Quartiles for 'Heart_rate':\n", " 25%: 159.25\n", " 50% (Median): 166.0\n", " 75%: 174.75\n", "Min: 144.0\n", "Max: 191.0\n", "The distribution of the feature 'Heart_rate' in this dataset is fine.\n", "\n", "For the feature 'Gender', the least common label is '1.0' with 24 occurrences. This represents 100.00% of the dataset.\n", "The distribution of the feature 'Gender' in this dataset is severely biased.\n", "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Linked data saved to ../../output/preprocess/Heart_rate/GSE18583.csv\n" ] } ], "source": [ "# 1. Load the clinical data again to ensure we have the correct data\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file)\n", "\n", "# Get a proper view of the sample characteristics\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# Extract heart rate data using the convert_trait function from Step 2\n", "def convert_trait(value):\n", " \"\"\"Convert heart rate value to continuous numeric type.\"\"\"\n", " if pd.isna(value):\n", " return None\n", " try:\n", " # Extract the numerical value after the colon and \"bpm:\"\n", " parts = value.split(\":\")\n", " if len(parts) < 2:\n", " return None\n", " numeric_value = parts[1].strip()\n", " # Remove possible 'bpm' text and convert to float\n", " numeric_value = numeric_value.replace(\"bpm\", \"\").strip()\n", " return float(numeric_value)\n", " except (ValueError, IndexError):\n", " return None\n", "\n", "# Gender conversion function (defined in Step 2)\n", "def convert_gender(value):\n", " \"\"\"Convert gender to binary (0=female, 1=male).\"\"\"\n", " if pd.isna(value):\n", " return None\n", " try:\n", " gender = value.split(\":\")[1].strip().lower()\n", " if \"male\" in gender:\n", " return 1\n", " elif \"female\" in gender:\n", " return 0\n", " else:\n", " return None\n", " except (ValueError, IndexError):\n", " return None\n", "\n", "# Extract clinical features based on the rows identified in Step 2\n", "trait_row = 2 # Heart rate data is in row 2\n", "gender_row = 0 # Gender data is in row 0\n", "selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", ")\n", "\n", "# Transpose the clinical data for easier processing\n", "selected_clinical_df = selected_clinical_df.T\n", "selected_clinical_df.index.name = 'Sample'\n", "\n", "# Save clinical data to file\n", "os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", "selected_clinical_df.to_csv(out_clinical_data_file)\n", "print(f\"Clinical data saved to {out_clinical_data_file}\")\n", "\n", "# 2. Keep using the original gene expression data since mapping failed\n", "# The gene_data object from Step 6 already contains our gene expression data\n", "print(f\"Gene data shape: {gene_data.shape}\")\n", "\n", "# Save the gene data to the output file\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene data saved to {out_gene_data_file}\")\n", "\n", "# 3. Link clinical and genetic data\n", "# Make sure sample IDs match between clinical and genetic data\n", "common_samples = list(set(selected_clinical_df.index) & set(gene_data.columns))\n", "if not common_samples:\n", " print(\"Warning: No matching sample IDs between clinical and genetic data!\")\n", " # Try to match based on order rather than IDs if needed\n", " selected_clinical_df.index = gene_data.columns[:len(selected_clinical_df)]\n", " common_samples = list(selected_clinical_df.index)\n", "\n", "# Select only common samples from both datasets\n", "clinical_subset = selected_clinical_df.loc[common_samples]\n", "gene_subset = gene_data[common_samples]\n", "\n", "# Now link the data\n", "linked_data = pd.concat([clinical_subset, gene_subset.T], axis=1)\n", "print(f\"Linked data shape: {linked_data.shape}\")\n", "print(f\"Linked data columns preview: {list(linked_data.columns[:10])}\")\n", "\n", "# 4. Handle missing values\n", "print(\"\\nMissing values before handling:\")\n", "print(f\" Trait ({trait}) missing: {linked_data[trait].isna().sum()} out of {len(linked_data)}\")\n", "gene_cols = [col for col in linked_data.columns if col != trait and col != 'Gender']\n", "if gene_cols:\n", " missing_genes_pct = linked_data[gene_cols].isna().mean()\n", " genes_with_high_missing = sum(missing_genes_pct > 0.2)\n", " print(f\" Genes with >20% missing: {genes_with_high_missing} out of {len(gene_cols)}\")\n", " \n", " if len(linked_data) > 0:\n", " missing_per_sample = linked_data[gene_cols].isna().mean(axis=1)\n", " samples_with_high_missing = sum(missing_per_sample > 0.05)\n", " print(f\" Samples with >5% missing genes: {samples_with_high_missing} out of {len(linked_data)}\")\n", "\n", "# Handle missing values\n", "cleaned_data = handle_missing_values(linked_data, trait)\n", "print(f\"Data shape after handling missing values: {cleaned_data.shape}\")\n", "\n", "# 5. Evaluate bias in trait and demographic features\n", "trait_biased, cleaned_data = judge_and_remove_biased_features(cleaned_data, trait)\n", "\n", "# 6. Final validation and save\n", "note = \"Dataset contains gene expression data from skeletal muscle samples before and after endurance training, with heart rate measurements.\"\n", "\n", "is_gene_available = len(gene_data) > 0\n", "is_trait_available = True # We've confirmed trait data is available\n", "\n", "is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=is_gene_available, \n", " is_trait_available=is_trait_available, \n", " is_biased=trait_biased, \n", " df=cleaned_data,\n", " note=note\n", ")\n", "\n", "# 7. Save if usable\n", "if is_usable and len(cleaned_data) > 0:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " cleaned_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", "else:\n", " print(\"Data was determined to be unusable or empty and was not saved\")" ] } ], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 5 }