{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "f4759428", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:28:45.514691Z", "iopub.status.busy": "2025-03-25T04:28:45.514465Z", "iopub.status.idle": "2025-03-25T04:28:45.684345Z", "shell.execute_reply": "2025-03-25T04:28:45.683893Z" } }, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Underweight\"\n", "cohort = \"GSE130563\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Underweight\"\n", "in_cohort_dir = \"../../input/GEO/Underweight/GSE130563\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Underweight/GSE130563.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Underweight/gene_data/GSE130563.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Underweight/clinical_data/GSE130563.csv\"\n", "json_path = \"../../output/preprocess/Underweight/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "711312f0", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": 2, "id": "195a27ac", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:28:45.685631Z", "iopub.status.busy": "2025-03-25T04:28:45.685477Z", "iopub.status.idle": "2025-03-25T04:28:45.784260Z", "shell.execute_reply": "2025-03-25T04:28:45.783842Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Files in the cohort directory:\n", "['GSE130563_family.soft.gz', 'GSE130563_series_matrix.txt.gz']\n", "Identified SOFT files: ['GSE130563_family.soft.gz']\n", "Identified matrix files: ['GSE130563_series_matrix.txt.gz']\n", "\n", "Background Information:\n", "!Series_title\t\"Skeletal Muscle Fibrosis in Pancreatic Cancer Patients with Respect to Survival\"\n", "!Series_summary\t\"Skeletal muscle wasting is a devastating consequence of cancer that may be responsible for nearly 30% of cancer-related deaths. In addition to muscle atrophy, we have identified significant muscle fiber damage and replacement of muscle with fibrotic tissue in rectus abdominis muscle biopsies from cachectic pancreatic ductal adenocarcinoma (PDAC) patients that associates with poor survival. Transcriptional profiling of muscle harvested from these same patients supported these findings by identifying gene clusters related to wounding, inflammation and cellular response to TGF-B upregulated in cachectic PDAC patients compared with non-cancer controls.\"\n", "!Series_summary\t\"In this dataset, we include the expression data obtained from rectus abdominis muscle biopsies fron non-cancer controls patients undergoing abdominal surgery for benign reasons and from PDAC patients undergoing tumor-resection surgery. PDAC patients were further classified as non-cachectic or cachectic. Cachexia was defined as a body weight loss of >5% during the 6 months prior to surgery. The purpose of this study was to identify the broader transcriptional networks changed in cachectic PDAC patients versus non-cancer controls, that may be associated with the histological changes observed in muscle biopsies harvested from these same patients.\"\n", "!Series_overall_design\t\"For microarray analysis, a total of 46 RNA samples across four groups are included. The groups are as follows: Non-cancer control patients (n = 16); Chronic pancreatitis patients (n = 8); Non-cachectic PDAC patients (n = 5); Cachectic PDAC patients (n = 17). To identify broader gene networks changed in cachectic PDAC patients that may be associated with histological findings of muscle damage and replacement of muscle with fat and fibrotic tissue, we performed differential expression analysis between non-cancer controls and cachectic PDAC patients, and between non-cancer controls and non-cachectic PDAC patients. PDAC patients receiving Stage IV diagnosis were excluded from analyses. Due to the inflammatory nature of chronic pancreatitis, patients diagnosed with chronic pancreatitis were not included in the non-cancer control group and were excluded from analyses.\"\n", "\n", "Sample Characteristics Dictionary:\n", "{0: ['diagnosis: mucinous cystadenoma', 'diagnosis: squamoid cyst', 'diagnosis: IPMN', 'diagnosis: bile duct injury', 'diagnosis: choledocholithiasis s/p cholecystectomy', 'diagnosis: common bile duct stricture', 'diagnosis: cholecystitis', 'diagnosis: bile duct injury and acute pancreatic necrosis', 'diagnosis: open cholecystectomy', 'diagnosis: acute pancreatic necrosis', 'diagnosis: sclerosing cholangitis', 'diagnosis: stricture of choledochojejunostomy', 'diagnosis: common bile duct injury', 'diagnosis: choledochal cyst', 'diagnosis: pancreatic ductal adenocarcinoma', 'diagnosis: chronic pancreatitis'], 1: ['Sex: F', 'Sex: M'], 2: ['tnm: n/a', 'tnm: pT3N1M0', 'tnm: pT3N0M0', 'tnm: Stage IV', 'tnm: pT4N1M0', 'tnm: pT1N0M0', 'tnm: pT2N1M0'], 3: ['bw loss in 6 months prior to surgery: 0', 'bw loss in 6 months prior to surgery: 4', 'bw loss in 6 months prior to surgery: 3', 'bw loss in 6 months prior to surgery: 6.5', 'bw loss in 6 months prior to surgery: 11.1', 'bw loss in 6 months prior to surgery: 10', 'bw loss in 6 months prior to surgery: 18', 'bw loss in 6 months prior to surgery: 16', 'bw loss in 6 months prior to surgery: 12.8', 'bw loss in 6 months prior to surgery: 10.6', 'bw loss in 6 months prior to surgery: 17.8', 'bw loss in 6 months prior to surgery: 6.7', 'bw loss in 6 months prior to surgery: 16.6', 'bw loss in 6 months prior to surgery: 32.3', 'bw loss in 6 months prior to surgery: 14.6', 'bw loss in 6 months prior to surgery: 5.9', 'bw loss in 6 months prior to surgery: 9.7', 'bw loss in 6 months prior to surgery: 15.4', 'bw loss in 6 months prior to surgery: 14.7', 'bw loss in 6 months prior to surgery: 19.2', 'bw loss in 6 months prior to surgery: 11.8', 'bw loss in 6 months prior to surgery: 33.3', 'bw loss in 6 months prior to surgery: 29.4', 'bw loss in 6 months prior to surgery: n.d. (not determined)'], 4: ['age: 33', 'age: 68', 'age: 73', 'age: 49', 'age: 78', 'age: 57', 'age: 55', 'age: 50', 'age: 47', 'age: 63', 'age: 51', 'age: 69', 'age: 60', 'age: 66', 'age: 54', 'age: 64', 'age: 76', 'age: 56', 'age: 80', 'age: 79', 'age: 72', 'age: 52', 'age: 74', 'age: 77', 'age: 70', 'age: 59', 'age: 30', 'age: 45', 'age: 58'], 5: ['neoadjuvant therapy (y/n): n/a', 'neoadjuvant therapy (y/n): Y', 'neoadjuvant therapy (y/n): N'], 6: ['survival post-surgery (or days elapsed since surgery, if still alive at time of censor): n.d. (not determined)', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 187', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 178', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 170', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 268', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 220', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 1016', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 318', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 1097', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 73', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 802', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 55', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 637', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 620', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 15', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 505', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 449', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 305', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 851', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 403', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 366', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 132', 'survival post-surgery (or days elapsed since surgery, if still alive at time of censor): 367'], 7: ['tissue: rectus abdominis muscle']}\n" ] } ], "source": [ "# 1. Let's first list the directory contents to understand what files are available\n", "import os\n", "\n", "print(\"Files in the cohort directory:\")\n", "files = os.listdir(in_cohort_dir)\n", "print(files)\n", "\n", "# Adapt file identification to handle different naming patterns\n", "soft_files = [f for f in files if 'soft' in f.lower() or '.soft' in f.lower() or '_soft' in f.lower()]\n", "matrix_files = [f for f in files if 'matrix' in f.lower() or '.matrix' in f.lower() or '_matrix' in f.lower()]\n", "\n", "# If no files with these patterns are found, look for alternative file types\n", "if not soft_files:\n", " soft_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "if not matrix_files:\n", " matrix_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "\n", "print(\"Identified SOFT files:\", soft_files)\n", "print(\"Identified matrix files:\", matrix_files)\n", "\n", "# Use the first files found, if any\n", "if len(soft_files) > 0 and len(matrix_files) > 0:\n", " soft_file = os.path.join(in_cohort_dir, soft_files[0])\n", " matrix_file = os.path.join(in_cohort_dir, matrix_files[0])\n", " \n", " # 2. Read the matrix file to obtain background information and sample characteristics data\n", " background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", " clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", " background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", " \n", " # 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", " sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", " \n", " # 4. Explicitly print out all the background information and the sample characteristics dictionary\n", " print(\"\\nBackground Information:\")\n", " print(background_info)\n", " print(\"\\nSample Characteristics Dictionary:\")\n", " print(sample_characteristics_dict)\n", "else:\n", " print(\"No appropriate files found in the directory.\")\n" ] }, { "cell_type": "markdown", "id": "85d33185", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": 3, "id": "b9e33127", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:28:45.785567Z", "iopub.status.busy": "2025-03-25T04:28:45.785451Z", "iopub.status.idle": "2025-03-25T04:28:45.796915Z", "shell.execute_reply": "2025-03-25T04:28:45.796484Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "{'GSM3743555': [0.0, 33.0, 0.0], 'GSM3743556': [0.0, 68.0, 0.0], 'GSM3743557': [0.0, 73.0, 0.0], 'GSM3743558': [0.0, 49.0, 0.0], 'GSM3743559': [0.0, 78.0, 0.0], 'GSM3743560': [0.0, 57.0, 0.0], 'GSM3743561': [0.0, 55.0, 0.0], 'GSM3743562': [0.0, 50.0, 0.0], 'GSM3743563': [0.0, 47.0, 0.0], 'GSM3743564': [0.0, 63.0, 0.0], 'GSM3743565': [0.0, 51.0, 1.0], 'GSM3743566': [0.0, 50.0, 1.0], 'GSM3743567': [1.0, 69.0, 1.0], 'GSM3743568': [0.0, 50.0, 1.0], 'GSM3743569': [1.0, 60.0, 0.0], 'GSM3743570': [1.0, 68.0, 0.0], 'GSM3743571': [1.0, 66.0, 1.0], 'GSM3743572': [1.0, 54.0, 1.0], 'GSM3743573': [1.0, 64.0, 0.0], 'GSM3743574': [1.0, 76.0, 0.0], 'GSM3743575': [1.0, 68.0, 0.0], 'GSM3743576': [1.0, 73.0, 1.0], 'GSM3743577': [1.0, 56.0, 0.0], 'GSM3743578': [1.0, 80.0, 0.0], 'GSM3743579': [1.0, 68.0, 0.0], 'GSM3743580': [1.0, 79.0, 0.0], 'GSM3743581': [1.0, 72.0, 1.0], 'GSM3743582': [1.0, 52.0, 0.0], 'GSM3743583': [1.0, 74.0, 1.0], 'GSM3743584': [1.0, 74.0, 0.0], 'GSM3743585': [1.0, 55.0, 1.0], 'GSM3743586': [1.0, 56.0, 1.0], 'GSM3743587': [1.0, 77.0, 0.0], 'GSM3743588': [0.0, 70.0, 1.0], 'GSM3743589': [0.0, 70.0, 1.0], 'GSM3743590': [0.0, 63.0, 1.0], 'GSM3743591': [0.0, 59.0, 0.0], 'GSM3743592': [0.0, 74.0, 1.0], 'GSM3743593': [nan, 30.0, 1.0], 'GSM3743594': [nan, 51.0, 1.0], 'GSM3743595': [nan, 55.0, 1.0], 'GSM3743596': [nan, 55.0, 0.0], 'GSM3743597': [nan, 45.0, 0.0], 'GSM3743598': [nan, 58.0, 0.0], 'GSM3743599': [nan, 50.0, 1.0], 'GSM3743600': [nan, 54.0, 1.0]}\n" ] } ], "source": [ "# 1. Gene Expression Data Availability\n", "# Based on the background information, this dataset contains gene expression data from\n", "# skeletal muscle (rectus abdominis) biopsies. Not miRNA or methylation data.\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "# 2.1 Data Availability\n", "\n", "# For trait (Underweight):\n", "# Looking at the data, row 3 contains \"bw loss in 6 months prior to surgery\" which can be used to determine underweight status\n", "# The background mentions \"Cachexia was defined as a body weight loss of >5% during the 6 months prior to surgery\"\n", "trait_row = 3\n", "\n", "# For age:\n", "# Row 4 contains age information\n", "age_row = 4\n", "\n", "# For gender:\n", "# Row 1 contains sex information\n", "gender_row = 1\n", "\n", "# 2.2 Data Type Conversion\n", "\n", "def convert_trait(value):\n", " \"\"\"\n", " Convert body weight loss value to Underweight status (binary)\n", " Cachexia/Underweight defined as body weight loss >5% in 6 months prior to surgery\n", " \"\"\"\n", " # Extract the value after the colon\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " if value == 'n.d. (not determined)':\n", " return None\n", " \n", " try:\n", " # Convert to float and determine if >5% (cachexia/underweight)\n", " bw_loss = float(value)\n", " # According to the background info, >5% weight loss = cachectic/underweight\n", " return 1 if bw_loss > 5 else 0\n", " except:\n", " return None\n", "\n", "def convert_age(value):\n", " \"\"\"Convert age value to continuous numeric value\"\"\"\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " try:\n", " return float(value)\n", " except:\n", " return None\n", "\n", "def convert_gender(value):\n", " \"\"\"Convert sex to binary (0=female, 1=male)\"\"\"\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " if value.upper() == 'F':\n", " return 0\n", " elif value.upper() == 'M':\n", " return 1\n", " else:\n", " return None\n", "\n", "# 3. Save Metadata\n", "# Determine trait data availability\n", "is_trait_available = trait_row is not None\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "if trait_row is not None:\n", " # The clinical_data is already available from the previous step\n", " # Extract clinical features using the provided clinical_data\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the processed clinical data\n", " print(preview_df(selected_clinical_df))\n", " \n", " # Save the clinical data to CSV\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " selected_clinical_df.to_csv(out_clinical_data_file)\n" ] }, { "cell_type": "markdown", "id": "d0d4311e", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": 4, "id": "403a24c1", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:28:45.798396Z", "iopub.status.busy": "2025-03-25T04:28:45.798275Z", "iopub.status.idle": "2025-03-25T04:28:45.936223Z", "shell.execute_reply": "2025-03-25T04:28:45.935679Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "First 20 gene/probe identifiers:\n", "Index(['100009613_at', '100009676_at', '10000_at', '10001_at', '10002_at',\n", " '100033413_at', '100033422_at', '100033423_at', '100033424_at',\n", " '100033425_at', '100033426_at', '100033427_at', '100033428_at',\n", " '100033430_at', '100033431_at', '100033432_at', '100033434_at',\n", " '100033435_at', '100033436_at', '100033437_at'],\n", " dtype='object', name='ID')\n", "\n", "Gene expression data shape: (23786, 46)\n" ] } ], "source": [ "# Use the helper function to get the proper file paths\n", "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# Extract gene expression data\n", "try:\n", " gene_data = get_genetic_data(matrix_file_path)\n", " \n", " # Print the first 20 row IDs (gene or probe identifiers)\n", " print(\"First 20 gene/probe identifiers:\")\n", " print(gene_data.index[:20])\n", " \n", " # Print shape to understand the dataset dimensions\n", " print(f\"\\nGene expression data shape: {gene_data.shape}\")\n", " \n", "except Exception as e:\n", " print(f\"Error extracting gene data: {e}\")\n" ] }, { "cell_type": "markdown", "id": "fe833edc", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": 5, "id": "1bdf78e3", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:28:45.937693Z", "iopub.status.busy": "2025-03-25T04:28:45.937577Z", "iopub.status.idle": "2025-03-25T04:28:45.939760Z", "shell.execute_reply": "2025-03-25T04:28:45.939390Z" } }, "outputs": [], "source": [ "# Reviewing the gene identifiers\n", "# The identifiers like '100009613_at', '10000_at' appear to be probe IDs from a microarray\n", "# They have a numeric component followed by '_at' which is a typical format for Affymetrix probe IDs\n", "# These are not standard human gene symbols and would need to be mapped to gene symbols\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "528a5a60", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": 6, "id": "f410b1f5", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:28:45.941108Z", "iopub.status.busy": "2025-03-25T04:28:45.940996Z", "iopub.status.idle": "2025-03-25T04:28:47.123705Z", "shell.execute_reply": "2025-03-25T04:28:47.123100Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Gene annotation preview:\n", "{'ID': ['1_at', '10_at', '100_at', '1000_at', '10000_at'], 'ORF': ['1', '10', '100', '1000', '10000']}\n" ] } ], "source": [ "# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.\n", "try:\n", " # Use the correct variable name from previous steps\n", " gene_annotation = get_gene_annotation(soft_file_path)\n", " \n", " # 2. Preview the gene annotation dataframe\n", " print(\"Gene annotation preview:\")\n", " print(preview_df(gene_annotation))\n", " \n", "except UnicodeDecodeError as e:\n", " print(f\"Unicode decoding error: {e}\")\n", " print(\"Trying alternative approach...\")\n", " \n", " # Read the file with Latin-1 encoding which is more permissive\n", " import gzip\n", " import pandas as pd\n", " \n", " # Manually read the file line by line with error handling\n", " data_lines = []\n", " with gzip.open(soft_file_path, 'rb') as f:\n", " for line in f:\n", " # Skip lines starting with prefixes we want to filter out\n", " line_str = line.decode('latin-1')\n", " if not line_str.startswith('^') and not line_str.startswith('!') and not line_str.startswith('#'):\n", " data_lines.append(line_str)\n", " \n", " # Create dataframe from collected lines\n", " if data_lines:\n", " gene_data_str = '\\n'.join(data_lines)\n", " gene_annotation = pd.read_csv(pd.io.common.StringIO(gene_data_str), sep='\\t', low_memory=False)\n", " print(\"Gene annotation preview (alternative method):\")\n", " print(preview_df(gene_annotation))\n", " else:\n", " print(\"No valid gene annotation data found after filtering.\")\n", " gene_annotation = pd.DataFrame()\n", " \n", "except Exception as e:\n", " print(f\"Error extracting gene annotation data: {e}\")\n", " gene_annotation = pd.DataFrame()\n" ] }, { "cell_type": "markdown", "id": "c424b388", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": 7, "id": "ed0dc9de", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:28:47.125582Z", "iopub.status.busy": "2025-03-25T04:28:47.125453Z", "iopub.status.idle": "2025-03-25T04:28:48.100804Z", "shell.execute_reply": "2025-03-25T04:28:48.100169Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found platform ID: GPL17930\n", "\n", "Looking for gene symbols in platform data...\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Error extracting platform data: Error tokenizing data. C error: Expected 1 fields in line 4, saw 2\n", "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Saved gene expression data to ../../output/preprocess/Underweight/gene_data/GSE130563.csv\n" ] } ], "source": [ "# 1. Analyze the gene identifiers and gene annotation data\n", "# First, let's check more thoroughly for gene symbol information in the SOFT file\n", "\n", "# Look for gene symbols in the SOFT file\n", "gene_symbols_found = False\n", "platform_id = None\n", "\n", "try:\n", " # Check first few lines of the SOFT file for platform information\n", " with gzip.open(soft_file_path, 'rt') as f:\n", " for i, line in enumerate(f):\n", " if \"^PLATFORM\" in line:\n", " # Extract platform ID\n", " platform_id = line.strip().split(\"=\")[1]\n", " print(f\"Found platform ID: {platform_id}\")\n", " if \"Gene symbol\" in line or \"gene_symbol\" in line or \"SYMBOL\" in line:\n", " print(f\"Found potential gene symbol reference at line {i}: {line.strip()}\")\n", " gene_symbols_found = True\n", " if i > 200: # Check first 200 lines\n", " break\n", "except Exception as e:\n", " print(f\"Error reading SOFT file: {e}\")\n", "\n", "# If gene symbols weren't found in annotation, let's extract platform annotation data\n", "if not gene_symbols_found:\n", " print(\"\\nLooking for gene symbols in platform data...\")\n", " \n", " try:\n", " # Read the SOFT file to extract platform data\n", " platform_lines = []\n", " in_platform = False\n", " with gzip.open(soft_file_path, 'rt') as f:\n", " for line in f:\n", " if line.startswith('^PLATFORM'):\n", " in_platform = True\n", " elif line.startswith('^'):\n", " in_platform = False\n", " if in_platform and line.startswith('!Platform_table_begin'):\n", " platform_lines = []\n", " continue\n", " if in_platform and line.startswith('!Platform_table_end'):\n", " break\n", " if in_platform and platform_lines is not None and not line.startswith('!'):\n", " platform_lines.append(line.strip())\n", " \n", " if platform_lines:\n", " # Create a DataFrame from the platform data\n", " platform_data = pd.read_csv(io.StringIO('\\n'.join(platform_lines)), sep='\\t')\n", " print(\"Platform data columns:\", platform_data.columns.tolist())\n", " \n", " # Look for ID and gene symbol columns\n", " id_col = next((col for col in platform_data.columns if 'ID' in col or 'id' in col), None)\n", " gene_symbol_cols = [col for col in platform_data.columns if 'symbol' in col.lower() or 'gene' in col.lower()]\n", " \n", " if id_col and gene_symbol_cols:\n", " print(f\"Found ID column: {id_col}\")\n", " print(f\"Found potential gene symbol columns: {gene_symbol_cols}\")\n", " \n", " # Preview the potential gene symbol column contents\n", " for col in gene_symbol_cols:\n", " print(f\"\\nSample values from {col}:\")\n", " print(platform_data[col].head(5).tolist())\n", " \n", " # Choose the most appropriate gene symbol column\n", " gene_symbol_col = gene_symbol_cols[0]\n", " \n", " # Create a mapping DataFrame\n", " mapping_df = platform_data[[id_col, gene_symbol_col]].copy()\n", " mapping_df.columns = ['ID', 'Gene']\n", " \n", " print(\"\\nGene mapping dataframe preview:\")\n", " print(preview_df(mapping_df))\n", " \n", " # 3. Apply gene mapping to convert probe-level measurements to gene expression\n", " gene_data = apply_gene_mapping(gene_data, mapping_df)\n", " \n", " print(\"\\nFirst few rows of mapped gene expression data:\")\n", " print(preview_df(gene_data))\n", " \n", " # Print shape to see how many unique genes we have after mapping\n", " print(f\"\\nGene expression data shape after mapping: {gene_data.shape}\")\n", " else:\n", " print(\"Could not find appropriate ID and gene symbol columns in platform data.\")\n", " except Exception as e:\n", " print(f\"Error extracting platform data: {e}\")\n", "\n", "# If we still don't have gene symbols, use IDs as symbols and proceed\n", "if 'gene_data' not in locals() or gene_data.empty:\n", " print(\"\\nUsing original gene identifiers and extracting gene symbols from them...\")\n", " \n", " # Extract gene symbols from the probe IDs themselves\n", " # Assuming probe IDs follow a pattern like GENE_at (e.g., BRCA1_at)\n", " probe_ids = gene_data.index.tolist()\n", " gene_symbols = []\n", " \n", " for probe in probe_ids:\n", " # Try to extract gene symbol part from probe ID\n", " if '_at' in probe:\n", " symbol = probe.split('_at')[0]\n", " gene_symbols.append(symbol)\n", " else:\n", " gene_symbols.append(probe)\n", " \n", " # Create mapping DataFrame\n", " mapping_df = pd.DataFrame({\n", " 'ID': probe_ids,\n", " 'Gene': gene_symbols\n", " })\n", " \n", " print(\"\\nGene mapping dataframe preview (from probe IDs):\")\n", " print(preview_df(mapping_df))\n", " \n", " # Apply gene mapping\n", " gene_data = apply_gene_mapping(gene_data, mapping_df)\n", " \n", " print(\"\\nFirst few rows of mapped gene expression data:\")\n", " print(preview_df(gene_data))\n", " \n", " # Print shape to see how many unique genes we have after mapping\n", " print(f\"\\nGene expression data shape after mapping: {gene_data.shape}\")\n", "\n", "# Save the gene expression data to CSV\n", "if not gene_data.empty:\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " gene_data.to_csv(out_gene_data_file)\n", " print(f\"\\nSaved gene expression data to {out_gene_data_file}\")\n", "else:\n", " print(\"\\nNo gene expression data to save.\")\n" ] }, { "cell_type": "markdown", "id": "b726f621", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": 8, "id": "e6779aac", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T04:28:48.102660Z", "iopub.status.busy": "2025-03-25T04:28:48.102520Z", "iopub.status.idle": "2025-03-25T04:29:00.737927Z", "shell.execute_reply": "2025-03-25T04:29:00.737453Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Gene data shape after manual mapping: (23786, 46)\n", "First few gene symbols after manual mapping: ['1', '10', '100', '1000', '10000', '100009613', '100009676', '10001', '10002', '10003']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Mapped gene data saved to ../../output/preprocess/Underweight/gene_data/GSE130563.csv\n", "Clinical features extracted:\n", "{'GSM3743555': [0.0, 33.0, 0.0], 'GSM3743556': [0.0, 68.0, 0.0], 'GSM3743557': [0.0, 73.0, 0.0], 'GSM3743558': [0.0, 49.0, 0.0], 'GSM3743559': [0.0, 78.0, 0.0], 'GSM3743560': [0.0, 57.0, 0.0], 'GSM3743561': [0.0, 55.0, 0.0], 'GSM3743562': [0.0, 50.0, 0.0], 'GSM3743563': [0.0, 47.0, 0.0], 'GSM3743564': [0.0, 63.0, 0.0], 'GSM3743565': [0.0, 51.0, 1.0], 'GSM3743566': [0.0, 50.0, 1.0], 'GSM3743567': [1.0, 69.0, 1.0], 'GSM3743568': [0.0, 50.0, 1.0], 'GSM3743569': [1.0, 60.0, 0.0], 'GSM3743570': [1.0, 68.0, 0.0], 'GSM3743571': [1.0, 66.0, 1.0], 'GSM3743572': [1.0, 54.0, 1.0], 'GSM3743573': [1.0, 64.0, 0.0], 'GSM3743574': [1.0, 76.0, 0.0], 'GSM3743575': [1.0, 68.0, 0.0], 'GSM3743576': [1.0, 73.0, 1.0], 'GSM3743577': [1.0, 56.0, 0.0], 'GSM3743578': [1.0, 80.0, 0.0], 'GSM3743579': [1.0, 68.0, 0.0], 'GSM3743580': [1.0, 79.0, 0.0], 'GSM3743581': [1.0, 72.0, 1.0], 'GSM3743582': [1.0, 52.0, 0.0], 'GSM3743583': [1.0, 74.0, 1.0], 'GSM3743584': [1.0, 74.0, 0.0], 'GSM3743585': [1.0, 55.0, 1.0], 'GSM3743586': [1.0, 56.0, 1.0], 'GSM3743587': [1.0, 77.0, 0.0], 'GSM3743588': [0.0, 70.0, 1.0], 'GSM3743589': [0.0, 70.0, 1.0], 'GSM3743590': [0.0, 63.0, 1.0], 'GSM3743591': [0.0, 59.0, 0.0], 'GSM3743592': [0.0, 74.0, 1.0], 'GSM3743593': [nan, 30.0, 1.0], 'GSM3743594': [nan, 51.0, 1.0], 'GSM3743595': [nan, 55.0, 1.0], 'GSM3743596': [nan, 55.0, 0.0], 'GSM3743597': [nan, 45.0, 0.0], 'GSM3743598': [nan, 58.0, 0.0], 'GSM3743599': [nan, 50.0, 1.0], 'GSM3743600': [nan, 54.0, 1.0]}\n", "Clinical data saved to ../../output/preprocess/Underweight/clinical_data/GSE130563.csv\n", "Linked data shape: (46, 23789)\n", "Linked data preview (first 5 columns):\n", " Underweight Age Gender 1 10\n", "GSM3743555 0.0 33.0 0.0 3.125861 1.056466\n", "GSM3743556 0.0 68.0 0.0 3.080549 1.210227\n", "GSM3743557 0.0 73.0 0.0 3.291881 1.358551\n", "GSM3743558 0.0 49.0 0.0 2.988209 1.276230\n", "GSM3743559 0.0 78.0 0.0 3.317610 1.075299\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Linked data shape after handling missing values: (38, 23789)\n", "For the feature 'Underweight', the least common label is '0.0' with 18 occurrences. This represents 47.37% of the dataset.\n", "The distribution of the feature 'Underweight' in this dataset is fine.\n", "\n", "Quartiles for 'Age':\n", " 25%: 55.0\n", " 50% (Median): 65.0\n", " 75%: 72.75\n", "Min: 33.0\n", "Max: 80.0\n", "The distribution of the feature 'Age' in this dataset is fine.\n", "\n", "For the feature 'Gender', the least common label is '1.0' with 15 occurrences. This represents 39.47% of the dataset.\n", "The distribution of the feature 'Gender' in this dataset is fine.\n", "\n", "Is trait biased: False\n", "A new JSON file was created at: ../../output/preprocess/Underweight/cohort_info.json\n", "Data quality check result: Usable\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Linked data saved to ../../output/preprocess/Underweight/GSE130563.csv\n" ] } ], "source": [ "# 1. Let's first check what's happening with the gene data\n", "# Instead of normalizing, which is causing an empty dataframe, \n", "# we'll use extract_human_gene_symbols to get symbols from probe IDs\n", "\n", "# Create a better mapping approach\n", "import re\n", "\n", "# Parse probe IDs to potential gene symbols\n", "# Example: \"100009613_at\" -> \"100009613\"\n", "mapping_dict = {}\n", "for probe_id in gene_data.index:\n", " # For Affymetrix probe IDs, we'll extract the numeric part \n", " # This isn't ideal but better than dropping all data\n", " if '_at' in probe_id:\n", " # Just remove the '_at' suffix\n", " potential_symbol = probe_id.split('_at')[0]\n", " mapping_dict[probe_id] = potential_symbol\n", " else:\n", " mapping_dict[probe_id] = probe_id\n", "\n", "# Create new gene data with these mappings\n", "new_gene_data = gene_data.copy()\n", "new_gene_data.index = [mapping_dict.get(idx, idx) for idx in new_gene_data.index]\n", "\n", "# Group by the new indices to handle duplicates\n", "new_gene_data = new_gene_data.groupby(level=0).mean()\n", "\n", "print(f\"Gene data shape after manual mapping: {new_gene_data.shape}\")\n", "print(f\"First few gene symbols after manual mapping: {list(new_gene_data.index[:10])}\")\n", "\n", "# Save this version of gene data without normalization\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "new_gene_data.to_csv(out_gene_data_file)\n", "print(f\"Mapped gene data saved to {out_gene_data_file}\")\n", "\n", "# 2. Extract clinical features using the functions defined in Step 2\n", "clinical_features = geo_select_clinical_features(\n", " clinical_data, \n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", ")\n", "\n", "print(\"Clinical features extracted:\")\n", "print(preview_df(clinical_features))\n", "\n", "# Save the clinical data\n", "os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", "clinical_features.to_csv(out_clinical_data_file)\n", "print(f\"Clinical data saved to {out_clinical_data_file}\")\n", "\n", "# 3. Link clinical and genetic data\n", "linked_data = geo_link_clinical_genetic_data(clinical_features, new_gene_data)\n", "print(f\"Linked data shape: {linked_data.shape}\")\n", "print(\"Linked data preview (first 5 columns):\")\n", "sample_cols = list(linked_data.columns[:5]) if len(linked_data.columns) > 5 else list(linked_data.columns)\n", "print(linked_data[sample_cols].head())\n", "\n", "# 4. Handle missing values\n", "linked_data = handle_missing_values(linked_data, trait)\n", "print(f\"Linked data shape after handling missing values: {linked_data.shape}\")\n", "\n", "# Check if we still have data\n", "if linked_data.shape[0] == 0:\n", " print(\"WARNING: No samples left after handling missing values.\")\n", " is_trait_biased = True # Force as biased since we can't properly evaluate\n", " note = \"Dataset failed preprocessing: No samples left after handling missing values.\"\n", "else:\n", " # 5. Determine whether the trait and demographic features are biased\n", " is_trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)\n", " print(f\"Is trait biased: {is_trait_biased}\")\n", " note = \"This dataset contains gene expression data from rectus abdominis muscle biopsies, focusing on cachexia in pancreatic cancer patients.\"\n", "\n", "# 6. Conduct quality check and save the cohort information\n", "is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=True,\n", " is_biased=is_trait_biased, \n", " df=linked_data,\n", " note=note\n", ")\n", "\n", "# 7. Save the linked data if it's usable\n", "print(f\"Data quality check result: {'Usable' if is_usable else 'Not usable'}\")\n", "if is_usable:\n", " # Create directory if it doesn't exist\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " linked_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", "else:\n", " print(f\"Data not saved due to quality issues.\")" ] } ], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 5 }