{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "060b9180", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T03:57:05.705130Z", "iopub.status.busy": "2025-03-25T03:57:05.704943Z", "iopub.status.idle": "2025-03-25T03:57:05.876498Z", "shell.execute_reply": "2025-03-25T03:57:05.876126Z" } }, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Sickle_Cell_Anemia\"\n", "cohort = \"GSE84632\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Sickle_Cell_Anemia\"\n", "in_cohort_dir = \"../../input/GEO/Sickle_Cell_Anemia/GSE84632\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Sickle_Cell_Anemia/GSE84632.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Sickle_Cell_Anemia/gene_data/GSE84632.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Sickle_Cell_Anemia/clinical_data/GSE84632.csv\"\n", "json_path = \"../../output/preprocess/Sickle_Cell_Anemia/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "816c7125", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": 2, "id": "723f8fb6", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T03:57:05.877993Z", "iopub.status.busy": "2025-03-25T03:57:05.877831Z", "iopub.status.idle": "2025-03-25T03:57:06.401886Z", "shell.execute_reply": "2025-03-25T03:57:06.401519Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Background Information:\n", "!Series_title\t\"Gene expression of peripheral blood mononuclear cells from adults with sickle cell disease (UIC cohort)\"\n", "!Series_summary\t\"Sickle cell disease is associated with systemic complications, many associated with either severity of disease or increased risk of mortality. We sought to identify a circulating gene expression profile whose predictive capacity spanned the spectrum of these poor outcomes in sickle cell disease.\"\n", "!Series_summary\t\"The Training cohort consisted of patients with SCD who were prospectively recruited from the University of Illinois. The Testing cohort consisted of a combination of patients prospectively seen at two separate institutions including the University of Chicago and Howard University\"\n", "!Series_overall_design\t\"The gene expression of PBMC from 172 sickle cell disease patients at UIC were analyzed\"\n", "Sample Characteristics Dictionary:\n", "{0: ['tissue: peripheral blood'], 1: ['cell type: mononuclear cells'], 2: ['disease: Sickle cell disease']}\n" ] } ], "source": [ "from tools.preprocess import *\n", "# 1. Identify the paths to the SOFT file and the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Read the matrix file to obtain background information and sample characteristics data\n", "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", "\n", "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n", "print(\"Background Information:\")\n", "print(background_info)\n", "print(\"Sample Characteristics Dictionary:\")\n", "print(sample_characteristics_dict)\n" ] }, { "cell_type": "markdown", "id": "3cdc8af5", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": 3, "id": "7585ab82", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T03:57:06.403269Z", "iopub.status.busy": "2025-03-25T03:57:06.403142Z", "iopub.status.idle": "2025-03-25T03:57:06.410843Z", "shell.execute_reply": "2025-03-25T03:57:06.410386Z" } }, "outputs": [ { "data": { "text/plain": [ "False" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 1. Gene Expression Data Availability\n", "# Based on the background information, this dataset likely contains gene expression data \n", "# as it specifically mentions \"Gene expression of peripheral blood mononuclear cells\"\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "\n", "# 2.1 Data Availability\n", "# For trait (Sickle_Cell_Anemia), we can see from sample characteristics that all patients have SCD\n", "# The key 2 contains \"disease: Sickle cell disease\" but since all patients have the same disease,\n", "# there's no variation to study\n", "trait_row = None\n", "\n", "# For age, there is no information in the sample characteristics\n", "age_row = None\n", "\n", "# For gender, there is no information in the sample characteristics\n", "gender_row = None\n", "\n", "# 2.2 Data Type Conversion\n", "# Since trait_row is None (all patients have the same disease), we don't need to define convert_trait\n", "# But for completeness, we'll define it anyway\n", "def convert_trait(value):\n", " if value is None:\n", " return None\n", " \n", " value = value.lower().strip()\n", " if \":\" in value:\n", " value = value.split(\":\", 1)[1].strip()\n", " \n", " if \"sickle cell disease\" in value or \"scd\" in value:\n", " return 1\n", " else:\n", " return 0\n", "\n", "# Define conversion functions for age and gender even though we don't have that data\n", "def convert_age(value):\n", " if value is None:\n", " return None\n", " \n", " if \":\" in value:\n", " value = value.split(\":\", 1)[1].strip()\n", " \n", " try:\n", " return float(value)\n", " except:\n", " return None\n", "\n", "def convert_gender(value):\n", " if value is None:\n", " return None\n", " \n", " if \":\" in value:\n", " value = value.split(\":\", 1)[1].strip()\n", " \n", " value = value.lower().strip()\n", " if \"female\" in value or \"f\" == value:\n", " return 0\n", " elif \"male\" in value or \"m\" == value:\n", " return 1\n", " else:\n", " return None\n", "\n", "# 3. Save Metadata\n", "# Determine trait data availability\n", "is_trait_available = trait_row is not None\n", "\n", "# Validate and save cohort info (initial filtering)\n", "validate_and_save_cohort_info(\n", " is_final=False, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=is_gene_available, \n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Since trait_row is None, we skip the clinical feature extraction step\n" ] }, { "cell_type": "markdown", "id": "51461ddd", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": 4, "id": "ed60b04b", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T03:57:06.412182Z", "iopub.status.busy": "2025-03-25T03:57:06.412062Z", "iopub.status.idle": "2025-03-25T03:57:07.419234Z", "shell.execute_reply": "2025-03-25T03:57:07.418577Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found data marker at line 65\n", "Header line: \"ID_REF\"\t\"GSM2243130\"\t\"GSM2243131\"\t\"GSM2243132\"\t\"GSM2243133\"\t\"GSM2243134\"\t\"GSM2243135\"\t\"GSM2243136\"\t\"GSM2243137\"\t\"GSM2243138\"\t\"GSM2243139\"\t\"GSM2243140\"\t\"GSM2243141\"\t\"GSM2243142\"\t\"GSM2243143\"\t\"GSM2243144\"\t\"GSM2243145\"\t\"GSM2243146\"\t\"GSM2243147\"\t\"GSM2243148\"\t\"GSM2243149\"\t\"GSM2243150\"\t\"GSM2243151\"\t\"GSM2243152\"\t\"GSM2243153\"\t\"GSM2243154\"\t\"GSM2243155\"\t\"GSM2243156\"\t\"GSM2243157\"\t\"GSM2243158\"\t\"GSM2243159\"\t\"GSM2243160\"\t\"GSM2243161\"\t\"GSM2243162\"\t\"GSM2243163\"\t\"GSM2243164\"\t\"GSM2243165\"\t\"GSM2243166\"\t\"GSM2243167\"\t\"GSM2243168\"\t\"GSM2243169\"\t\"GSM2243170\"\t\"GSM2243171\"\t\"GSM2243172\"\t\"GSM2243173\"\t\"GSM2243174\"\t\"GSM2243175\"\t\"GSM2243176\"\t\"GSM2243177\"\t\"GSM2243178\"\t\"GSM2243179\"\t\"GSM2243180\"\t\"GSM2243181\"\t\"GSM2243182\"\t\"GSM2243183\"\t\"GSM2243184\"\t\"GSM2243185\"\t\"GSM2243186\"\t\"GSM2243187\"\t\"GSM2243188\"\t\"GSM2243189\"\t\"GSM2243190\"\t\"GSM2243191\"\t\"GSM2243192\"\t\"GSM2243193\"\t\"GSM2243194\"\t\"GSM2243195\"\t\"GSM2243196\"\t\"GSM2243197\"\t\"GSM2243198\"\t\"GSM2243199\"\t\"GSM2243200\"\t\"GSM2243201\"\t\"GSM2243202\"\t\"GSM2243203\"\t\"GSM2243204\"\t\"GSM2243205\"\t\"GSM2243206\"\t\"GSM2243207\"\t\"GSM2243208\"\t\"GSM2243209\"\t\"GSM2243210\"\t\"GSM2243211\"\t\"GSM2243212\"\t\"GSM2243213\"\t\"GSM2243214\"\t\"GSM2243215\"\t\"GSM2243216\"\t\"GSM2243217\"\t\"GSM2243218\"\t\"GSM2243219\"\t\"GSM2243220\"\t\"GSM2243221\"\t\"GSM2243222\"\t\"GSM2243223\"\t\"GSM2243224\"\t\"GSM2243225\"\t\"GSM2243226\"\t\"GSM2243227\"\t\"GSM2243228\"\t\"GSM2243229\"\t\"GSM2243230\"\t\"GSM2243231\"\t\"GSM2243232\"\t\"GSM2243233\"\t\"GSM2243234\"\t\"GSM2243235\"\t\"GSM2243236\"\t\"GSM2243237\"\t\"GSM2243238\"\t\"GSM2243239\"\t\"GSM2243240\"\t\"GSM2243241\"\t\"GSM2243242\"\t\"GSM2243243\"\t\"GSM2243244\"\t\"GSM2243245\"\t\"GSM2243246\"\t\"GSM2243247\"\t\"GSM2243248\"\t\"GSM2243249\"\t\"GSM2243250\"\t\"GSM2243251\"\t\"GSM2243252\"\t\"GSM2243253\"\t\"GSM2243254\"\t\"GSM2243255\"\t\"GSM2243256\"\t\"GSM2243257\"\t\"GSM2243258\"\t\"GSM2243259\"\t\"GSM2243260\"\t\"GSM2243261\"\t\"GSM2243262\"\t\"GSM2243263\"\t\"GSM2243264\"\t\"GSM2243265\"\t\"GSM2243266\"\t\"GSM2243267\"\t\"GSM2243268\"\t\"GSM2243269\"\t\"GSM2243270\"\t\"GSM2243271\"\t\"GSM2243272\"\t\"GSM2243273\"\t\"GSM2243274\"\t\"GSM2243275\"\t\"GSM2243276\"\t\"GSM2243277\"\t\"GSM2243278\"\t\"GSM2243279\"\t\"GSM2243280\"\t\"GSM2243281\"\t\"GSM2243282\"\t\"GSM2243283\"\t\"GSM2243284\"\t\"GSM2243285\"\t\"GSM2243286\"\t\"GSM2243287\"\t\"GSM2243288\"\t\"GSM2243289\"\t\"GSM2243290\"\t\"GSM2243291\"\t\"GSM2243292\"\t\"GSM2243293\"\t\"GSM2243294\"\t\"GSM2243295\"\t\"GSM2243296\"\t\"GSM2243297\"\t\"GSM2243298\"\t\"GSM2243299\"\t\"GSM2243300\"\t\"GSM2243301\"\n", "First data line: 16650001\t1.905590374\t0.876530933\t1.428847293\t1.306173904\t1.691706154\t1.910957458\t1.968130715\t1.203128338\t0.717902138\t1.256850104\t0.984356046\t1.924217312\t2.005207005\t1.762318185\t1.594248752\t1.478184606\t1.422779725\t1.157138662\t1.733301056\t2.251875821\t1.359094932\t1.438486339\t1.754779258\t1.481383548\t1.403997895\t1.008685512\t1.008981491\t0.921302674\t1.311955359\t0.943264309\t1.8000147\t1.173664151\t2.413083413\t0.872673341\t1.187160789\t2.073094699\t1.441446128\t1.052391729\t1.880905733\t1.500257135\t1.006524866\t1.875893824\t1.141167037\t2.549571399\t1.055312054\t1.727726786\t1.03657659\t1.87037875\t2.268233588\t1.063392278\t1.163354219\t0.645401009\t1.457482775\t0.980409661\t1.020968636\t1.368359071\t1.518124395\t1.618707891\t1.229239122\t1.804040013\t2.295078875\t1.277217302\t1.567473944\t1.560232327\t1.448372034\t1.499063354\t1.170872083\t1.154534048\t1.150183158\t1.720939004\t1.527832503\t1.159953176\t3.087976751\t1.008409265\t1.202393836\t0.87301865\t1.387449711\t2.009824276\t1.404882868\t1.54433826\t2.464862238\t1.862239618\t1.391129715\t0.791397535\t1.76237605\t1.068552177\t1.123616828\t1.464342187\t1.384212712\t0.702978281\t1.441796761\t1.423299005\t1.025391275\t1.860220932\t2.611635601\t1.550023049\t0.821339197\t1.345620792\t1.507239265\t2.139675052\t1.072521413\t1.223891965\t1.591231022\t1.355302245\t0.934231122\t1.482788446\t1.613457167\t1.317235594\t0.928463448\t1.321355362\t1.662585393\t1.099876669\t1.394285543\t1.247175444\t1.442866827\t1.479755664\t1.848466446\t1.161528987\t1.255758832\t1.41120695\t1.098968942\t1.217922862\t1.51515474\t1.009346531\t1.204810997\t1.431826814\t1.863568243\t0.91234438\t1.001522822\t1.277681002\t1.843572928\t1.271810754\t1.376360368\t2.366340728\t1.115938399\t1.516052543\t1.255933456\t1.4553966\t1.978144668\t1.109051957\t1.608358496\t2.984532125\t1.734406044\t1.105559406\t1.130914932\t1.834977058\t0.896371385\t1.144155054\t2.205703113\t2.189661056\t1.313573377\t2.149684173\t1.823594352\t1.190751999\t0.895049346\t1.182287003\t2.69646161\t1.236046637\t1.582213693\t0.685664006\t1.494367155\t1.894076794\t0.922920692\t1.53783659\t1.86228866\t1.249592324\t0.525804249\t1.958107354\t2.061367625\t1.462597406\t1.419491004\t2.061966443\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Index(['16650001', '16650003', '16650005', '16650007', '16650009', '16650011',\n", " '16650013', '16650015', '16650017', '16650019', '16650021', '16650023',\n", " '16650025', '16650027', '16650029', '16650031', '16650033', '16650035',\n", " '16650037', '16650041'],\n", " dtype='object', name='ID')\n" ] } ], "source": [ "# 1. Get the file paths for the SOFT file and matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. First, let's examine the structure of the matrix file to understand its format\n", "import gzip\n", "\n", "# Peek at the first few lines of the file to understand its structure\n", "with gzip.open(matrix_file, 'rt') as file:\n", " # Read first 100 lines to find the header structure\n", " for i, line in enumerate(file):\n", " if '!series_matrix_table_begin' in line:\n", " print(f\"Found data marker at line {i}\")\n", " # Read the next line which should be the header\n", " header_line = next(file)\n", " print(f\"Header line: {header_line.strip()}\")\n", " # And the first data line\n", " first_data_line = next(file)\n", " print(f\"First data line: {first_data_line.strip()}\")\n", " break\n", " if i > 100: # Limit search to first 100 lines\n", " print(\"Matrix table marker not found in first 100 lines\")\n", " break\n", "\n", "# 3. Now try to get the genetic data with better error handling\n", "try:\n", " gene_data = get_genetic_data(matrix_file)\n", " print(gene_data.index[:20])\n", "except KeyError as e:\n", " print(f\"KeyError: {e}\")\n", " \n", " # Alternative approach: manually extract the data\n", " print(\"\\nTrying alternative approach to read the gene data:\")\n", " with gzip.open(matrix_file, 'rt') as file:\n", " # Find the start of the data\n", " for line in file:\n", " if '!series_matrix_table_begin' in line:\n", " break\n", " \n", " # Read the headers and data\n", " import pandas as pd\n", " df = pd.read_csv(file, sep='\\t', index_col=0)\n", " print(f\"Column names: {df.columns[:5]}\")\n", " print(f\"First 20 row IDs: {df.index[:20]}\")\n", " gene_data = df\n" ] }, { "cell_type": "markdown", "id": "07762cce", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": 5, "id": "1aff0584", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T03:57:07.420648Z", "iopub.status.busy": "2025-03-25T03:57:07.420523Z", "iopub.status.idle": "2025-03-25T03:57:07.422904Z", "shell.execute_reply": "2025-03-25T03:57:07.422468Z" } }, "outputs": [], "source": [ "# Let's analyze the gene identifiers in the gene expression data\n", "# Looking at the identifiers: \"16650001\", \"16650003\", \"16650005\", etc.\n", "\n", "# These appear to be numeric IDs rather than human gene symbols\n", "# Human gene symbols typically have alphabetic characters like \"BRCA1\" or \"TP53\"\n", "# These numeric IDs appear to be probe or feature identifiers from a microarray platform\n", "# They will need to be mapped to standard gene symbols\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "c199770f", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": 6, "id": "b12594cd", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T03:57:07.424257Z", "iopub.status.busy": "2025-03-25T03:57:07.424134Z", "iopub.status.idle": "2025-03-25T03:57:07.515820Z", "shell.execute_reply": "2025-03-25T03:57:07.515183Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Examining SOFT file structure:\n", "Line 0: ^DATABASE = GeoMiame\n", "Line 1: !Database_name = Gene Expression Omnibus (GEO)\n", "Line 2: !Database_institute = NCBI NLM NIH\n", "Line 3: !Database_web_link = http://www.ncbi.nlm.nih.gov/geo\n", "Line 4: !Database_email = geo@ncbi.nlm.nih.gov\n", "Line 5: ^SERIES = GSE84632\n", "Line 6: !Series_title = Gene expression of peripheral blood mononuclear cells from adults with sickle cell disease (UIC cohort)\n", "Line 7: !Series_geo_accession = GSE84632\n", "Line 8: !Series_status = Public on Sep 08 2017\n", "Line 9: !Series_submission_date = Jul 20 2016\n", "Line 10: !Series_last_update_date = Mar 15 2019\n", "Line 11: !Series_pubmed_id = 28373264\n", "Line 12: !Series_summary = Sickle cell disease is associated with systemic complications, many associated with either severity of disease or increased risk of mortality. We sought to identify a circulating gene expression profile whose predictive capacity spanned the spectrum of these poor outcomes in sickle cell disease.\n", "Line 13: !Series_summary = The Training cohort consisted of patients with SCD who were prospectively recruited from the University of Illinois. The Testing cohort consisted of a combination of patients prospectively seen at two separate institutions including the University of Chicago and Howard University\n", "Line 14: !Series_overall_design = The gene expression of PBMC from 172 sickle cell disease patients at UIC were analyzed\n", "Line 15: !Series_type = Expression profiling by array\n", "Line 16: !Series_contributor = Zhengdeng,,Lei\n", "Line 17: !Series_contributor = Ankit,,Desai\n", "Line 18: !Series_contributor = Roberto,,Machado\n", "Line 19: !Series_sample_id = GSM2243130\n", "\n", "Gene annotation preview:\n", "{'ID': ['16657436', '16657440', '16657445', '16657447', '16657450'], 'RANGE_STRAND': ['+', '+', '+', '+', '+'], 'RANGE_START': [12190.0, 29554.0, 69091.0, 160446.0, 317811.0], 'RANGE_END': [13639.0, 31109.0, 70008.0, 161525.0, 328581.0], 'total_probes': [25.0, 28.0, 8.0, 13.0, 36.0], 'GB_ACC': ['NR_046018', nan, nan, nan, 'NR_024368'], 'SPOT_ID': ['chr1:12190-13639', 'chr1:29554-31109', 'chr1:69091-70008', 'chr1:160446-161525', 'chr1:317811-328581'], 'RANGE_GB': ['NC_000001.10', 'NC_000001.10', 'NC_000001.10', 'NC_000001.10', 'NC_000001.10']}\n" ] } ], "source": [ "# 1. Let's first examine the structure of the SOFT file before trying to parse it\n", "import gzip\n", "\n", "# Look at the first few lines of the SOFT file to understand its structure\n", "print(\"Examining SOFT file structure:\")\n", "try:\n", " with gzip.open(soft_file, 'rt') as file:\n", " # Read first 20 lines to understand the file structure\n", " for i, line in enumerate(file):\n", " if i < 20:\n", " print(f\"Line {i}: {line.strip()}\")\n", " else:\n", " break\n", "except Exception as e:\n", " print(f\"Error reading SOFT file: {e}\")\n", "\n", "# 2. Now let's try a more robust approach to extract the gene annotation\n", "# Instead of using the library function which failed, we'll implement a custom approach\n", "try:\n", " # First, look for the platform section which contains gene annotation\n", " platform_data = []\n", " with gzip.open(soft_file, 'rt') as file:\n", " in_platform_section = False\n", " for line in file:\n", " if line.startswith('^PLATFORM'):\n", " in_platform_section = True\n", " continue\n", " if in_platform_section and line.startswith('!platform_table_begin'):\n", " # Next line should be the header\n", " header = next(file).strip()\n", " platform_data.append(header)\n", " # Read until the end of the platform table\n", " for table_line in file:\n", " if table_line.startswith('!platform_table_end'):\n", " break\n", " platform_data.append(table_line.strip())\n", " break\n", " \n", " # If we found platform data, convert it to a DataFrame\n", " if platform_data:\n", " import pandas as pd\n", " import io\n", " platform_text = '\\n'.join(platform_data)\n", " gene_annotation = pd.read_csv(io.StringIO(platform_text), delimiter='\\t', \n", " low_memory=False, on_bad_lines='skip')\n", " print(\"\\nGene annotation preview:\")\n", " print(preview_df(gene_annotation))\n", " else:\n", " print(\"Could not find platform table in SOFT file\")\n", " \n", " # Try an alternative approach - extract mapping from other sections\n", " with gzip.open(soft_file, 'rt') as file:\n", " for line in file:\n", " if 'ANNOTATION information' in line or 'annotation information' in line:\n", " print(f\"Found annotation information: {line.strip()}\")\n", " if line.startswith('!Platform_title') or line.startswith('!platform_title'):\n", " print(f\"Platform title: {line.strip()}\")\n", " \n", "except Exception as e:\n", " print(f\"Error processing gene annotation: {e}\")\n" ] }, { "cell_type": "markdown", "id": "672a96c1", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": 7, "id": "d7d33cfa", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T03:57:07.517439Z", "iopub.status.busy": "2025-03-25T03:57:07.517298Z", "iopub.status.idle": "2025-03-25T03:57:24.482033Z", "shell.execute_reply": "2025-03-25T03:57:24.481653Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Gene annotation columns: ['ID', 'RANGE_STRAND', 'RANGE_START', 'RANGE_END', 'total_probes', 'GB_ACC', 'SPOT_ID', 'RANGE_GB']\n", "ID sample values: ['16657436', '16657440', '16657445']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "RANGE_STRAND sample values: ['+', '+', '+']\n", "RANGE_START sample values: [12190.0, 29554.0, 69091.0]\n", "RANGE_END sample values: [13639.0, 31109.0, 70008.0]\n", "total_probes sample values: [25.0, 28.0, 8.0]\n", "GB_ACC sample values: ['NR_046018', 'NR_024368', 'NR_029406']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "SPOT_ID sample values: ['chr1:12190-13639', 'chr1:29554-31109', 'chr1:69091-70008']\n", "RANGE_GB sample values: ['NC_000001.10', 'NC_000001.10', 'NC_000001.10']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Gene mapping dataframe preview:\n", "{'ID': ['16657436', '16657450', '16657476', '16657485', '16657514'], 'Gene': ['NR_046018', 'NR_024368', 'NR_029406', 'XR_132471', 'NR_047526']}\n", "Number of probes with gene mappings: 17623\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Gene expression data preview (after mapping):\n", "Shape: (16346, 172)\n", "First 5 gene symbols: ['NR_046018', 'NR_024368', 'NR_029406', 'XR_132471', 'NR_047526']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Gene expression data saved to: ../../output/preprocess/Sickle_Cell_Anemia/gene_data/GSE84632.csv\n", "Successfully mapped 16346 genes.\n" ] } ], "source": [ "# 1. Identify which columns to use for mapping\n", "# Looking at the gene annotation preview, we can see that 'ID' corresponds to the same \n", "# probe ID format as in the gene expression data.\n", "\n", "# First let's get the gene annotation data from the SOFT file\n", "gene_annotation = get_gene_annotation(soft_file)\n", "print(\"Gene annotation columns:\", gene_annotation.columns.tolist())\n", "\n", "# Check the columns to understand what kind of annotation we have\n", "for col in gene_annotation.columns:\n", " non_null_values = gene_annotation[col].dropna().head(3).tolist()\n", " if non_null_values:\n", " print(f\"{col} sample values: {non_null_values}\")\n", "\n", "# Since there's no clear gene symbol column, we need a different approach\n", "# Let's modify our extract_human_gene_symbols function to also accept RefSeq IDs\n", "def custom_extract_gene_info(text):\n", " \"\"\"Custom function to extract gene information from annotation text including RefSeq IDs\"\"\"\n", " if not isinstance(text, str):\n", " return None\n", " \n", " # Accept RefSeq IDs for this dataset since that's what we have\n", " if isinstance(text, str) and text.startswith(('NR_', 'XR_', 'NM_', 'XM_')):\n", " return text\n", " \n", " return None\n", "\n", "# Use a direct mapping from probe IDs to the GB_ACC column which contains RefSeq IDs\n", "# Create a simple mapping dataframe with ID and GB_ACC columns\n", "mapping_df = gene_annotation[['ID', 'GB_ACC']].copy()\n", "mapping_df = mapping_df.dropna()\n", "mapping_df = mapping_df.rename(columns={'GB_ACC': 'Gene'})\n", "print(\"\\nGene mapping dataframe preview:\")\n", "print(preview_df(mapping_df))\n", "\n", "# Count how many valid mappings we have\n", "print(f\"Number of probes with gene mappings: {len(mapping_df)}\")\n", "\n", "# 3. Convert probe-level measurements to gene expression data\n", "# We'll customize the mapping approach since we're working with RefSeq IDs\n", "# instead of standard gene symbols\n", "\n", "# First ensure our gene_data has the right index type\n", "gene_data.index = gene_data.index.astype(str)\n", "\n", "# Create a mapping dictionary from probe ID to RefSeq ID\n", "id_to_gene = dict(zip(mapping_df['ID'], mapping_df['Gene']))\n", "\n", "# Apply a direct mapping - simpler than the extract_human_gene_symbols approach\n", "# since we're working with RefSeq IDs directly\n", "gene_values = {}\n", "\n", "# For each probe in our expression data\n", "for probe_id in gene_data.index:\n", " if probe_id in id_to_gene:\n", " gene = id_to_gene[probe_id]\n", " # If this is the first time we've seen this gene, initialize its values\n", " if gene not in gene_values:\n", " gene_values[gene] = gene_data.loc[probe_id].values\n", " else:\n", " # Otherwise, add the probe's values to the gene's existing values\n", " gene_values[gene] = gene_values[gene] + gene_data.loc[probe_id].values\n", "\n", "# Convert our dictionary to a DataFrame\n", "gene_data_mapped = pd.DataFrame(gene_values, index=gene_data.columns).T\n", "\n", "# 5. Preview the resulting gene expression data\n", "print(\"\\nGene expression data preview (after mapping):\")\n", "print(f\"Shape: {gene_data_mapped.shape}\")\n", "if len(gene_data_mapped) > 0:\n", " print(f\"First 5 gene symbols: {gene_data_mapped.index[:5].tolist()}\")\n", "else:\n", " print(\"No genes were mapped. This dataset may not have proper gene annotations.\")\n", "\n", "# 6. Save the processed gene expression data\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data_mapped.to_csv(out_gene_data_file)\n", "print(f\"\\nGene expression data saved to: {out_gene_data_file}\")\n", "\n", "# In case the mapping didn't work and we got an empty dataframe, \n", "# let's provide a fallback - save the original probe-level data\n", "if len(gene_data_mapped) == 0:\n", " print(\"\\nWARNING: No genes were mapped. Saving probe-level data as a fallback.\")\n", " gene_data.to_csv(out_gene_data_file)\n", " print(f\"Probe-level data saved to: {out_gene_data_file}\")\n", " \n", " # Flag that no proper gene mapping was available for this dataset\n", " print(\"\\nThis dataset lacks proper gene symbol annotations in the provided files.\")\n", " is_gene_available = False\n", " \n", " # Update metadata\n", " is_trait_available = False # Already determined to be False in Step 2\n", " validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=is_gene_available, \n", " is_trait_available=is_trait_available,\n", " is_biased=None,\n", " df=pd.DataFrame(),\n", " note=\"Dataset lacks proper gene symbol annotations and trait information.\"\n", " )\n", "else:\n", " print(f\"Successfully mapped {len(gene_data_mapped)} genes.\")\n" ] }, { "cell_type": "markdown", "id": "c480ee87", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": 8, "id": "6d74c910", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T03:57:24.483544Z", "iopub.status.busy": "2025-03-25T03:57:24.483430Z", "iopub.status.idle": "2025-03-25T03:57:26.170884Z", "shell.execute_reply": "2025-03-25T03:57:26.170257Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Gene data shape before processing: (16346, 172)\n", "Using RefSeq IDs as gene identifiers since they cannot be normalized to standard gene symbols.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Gene data with RefSeq IDs saved to ../../output/preprocess/Sickle_Cell_Anemia/gene_data/GSE84632.csv\n", "Trait row is None. Cannot extract trait information from clinical data.\n", "Abnormality detected in the cohort: GSE84632. Preprocessing failed.\n", "Data was determined to be unusable due to missing trait indicators and was not saved\n" ] } ], "source": [ "# 1. Get the most recently created gene data from previous step\n", "# Instead of trying to normalize with NCBI gene symbols (which doesn't work for RefSeq IDs),\n", "# we'll use the gene_data_mapped from the previous step\n", "print(f\"Gene data shape before processing: {gene_data_mapped.shape}\")\n", "\n", "# We can't normalize RefSeq IDs with the standard gene symbol dictionary\n", "# So we'll skip the normalization step and work with the RefSeq IDs directly\n", "print(\"Using RefSeq IDs as gene identifiers since they cannot be normalized to standard gene symbols.\")\n", "normalized_gene_data = gene_data_mapped\n", "\n", "# Save the mapped gene data (using RefSeq IDs)\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "normalized_gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene data with RefSeq IDs saved to {out_gene_data_file}\")\n", "\n", "# 2. Check if trait data is available before proceeding with clinical data extraction\n", "if trait_row is None:\n", " print(\"Trait row is None. Cannot extract trait information from clinical data.\")\n", " # Create an empty dataframe for clinical features\n", " clinical_features = pd.DataFrame()\n", " \n", " # Create an empty dataframe for linked data\n", " linked_data = pd.DataFrame()\n", " \n", " # Validate and save cohort info\n", " validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=False, # Trait data is not available\n", " is_biased=True, # Not applicable but required\n", " df=pd.DataFrame(), # Empty dataframe\n", " note=f\"Dataset contains gene expression data with RefSeq IDs but lacks clear trait indicators for {trait} status.\"\n", " )\n", " print(\"Data was determined to be unusable due to missing trait indicators and was not saved\")\n", "else:\n", " try:\n", " # Get the file paths for the matrix file to extract clinical data\n", " _, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", " \n", " # Get raw clinical data from the matrix file\n", " _, clinical_raw = get_background_and_clinical_data(matrix_file)\n", " \n", " # Verify clinical data structure\n", " print(\"Raw clinical data shape:\", clinical_raw.shape)\n", " \n", " # Extract clinical features using the defined conversion functions\n", " clinical_features = geo_select_clinical_features(\n", " clinical_df=clinical_raw,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " print(\"Clinical features:\")\n", " print(clinical_features)\n", " \n", " # Save clinical features to file\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " clinical_features.to_csv(out_clinical_data_file)\n", " print(f\"Clinical features saved to {out_clinical_data_file}\")\n", " \n", " # 3. Link clinical and genetic data\n", " linked_data = geo_link_clinical_genetic_data(clinical_features, normalized_gene_data)\n", " print(f\"Linked data shape: {linked_data.shape}\")\n", " print(\"Linked data preview (first 5 rows, first 5 columns):\")\n", " print(linked_data.iloc[:5, :5])\n", " \n", " # 4. Handle missing values\n", " print(\"Missing values before handling:\")\n", " print(f\" Trait ({trait}) missing: {linked_data[trait].isna().sum()} out of {len(linked_data)}\")\n", " if 'Age' in linked_data.columns:\n", " print(f\" Age missing: {linked_data['Age'].isna().sum()} out of {len(linked_data)}\")\n", " if 'Gender' in linked_data.columns:\n", " print(f\" Gender missing: {linked_data['Gender'].isna().sum()} out of {len(linked_data)}\")\n", " \n", " gene_cols = [col for col in linked_data.columns if col not in [trait, 'Age', 'Gender']]\n", " print(f\" Genes with >20% missing: {sum(linked_data[gene_cols].isna().mean() > 0.2)}\")\n", " print(f\" Samples with >5% missing genes: {sum(linked_data[gene_cols].isna().mean(axis=1) > 0.05)}\")\n", " \n", " cleaned_data = handle_missing_values(linked_data, trait)\n", " print(f\"Data shape after handling missing values: {cleaned_data.shape}\")\n", " \n", " # 5. Evaluate bias in trait and demographic features\n", " is_trait_biased = False\n", " if len(cleaned_data) > 0:\n", " trait_biased, cleaned_data = judge_and_remove_biased_features(cleaned_data, trait)\n", " is_trait_biased = trait_biased\n", " else:\n", " print(\"No data remains after handling missing values.\")\n", " is_trait_biased = True\n", " \n", " # 6. Final validation and save\n", " is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=True, \n", " is_biased=is_trait_biased, \n", " df=cleaned_data,\n", " note=f\"Dataset contains only {trait} patients with no healthy controls, making it unsuitable for case-control analysis.\"\n", " )\n", " \n", " # 7. Save if usable\n", " if is_usable and len(cleaned_data) > 0:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " cleaned_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", " else:\n", " print(\"Data was determined to be unusable or empty and was not saved\")\n", " \n", " except Exception as e:\n", " print(f\"Error processing data: {e}\")\n", " # Handle the error case by still recording cohort info\n", " validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=False, # Mark as not available due to processing issues\n", " is_biased=True, \n", " df=pd.DataFrame(), # Empty dataframe\n", " note=f\"Error processing data for {trait}: {str(e)}\"\n", " )\n", " print(\"Data was determined to be unusable and was not saved\")" ] } ], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 5 }