{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "5b8717cf", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Height\"\n", "cohort = \"GSE131835\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Height\"\n", "in_cohort_dir = \"../../input/GEO/Height/GSE131835\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Height/GSE131835.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Height/gene_data/GSE131835.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Height/clinical_data/GSE131835.csv\"\n", "json_path = \"../../output/preprocess/Height/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "03ab6c0c", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "13683a3d", "metadata": {}, "outputs": [], "source": [ "from tools.preprocess import *\n", "# 1. Identify the paths to the SOFT file and the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Read the matrix file to obtain background information and sample characteristics data\n", "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", "\n", "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n", "print(\"Background Information:\")\n", "print(background_info)\n", "print(\"Sample Characteristics Dictionary:\")\n", "print(sample_characteristics_dict)\n" ] }, { "cell_type": "markdown", "id": "86769e3d", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "5613373d", "metadata": {}, "outputs": [], "source": [ "# 1. Gene Expression Data Availability\n", "# Based on the background information, this dataset contains gene expression data from adipose tissues\n", "# The description mentions using Affymetrix Clariom S Microarray to analyze gene expression\n", "# It's not just miRNA or methylation data, so gene expression is available\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "# 2.1 Data Availability\n", "\n", "# Height data is available in the row 5 as 'height(cm): XXX'\n", "trait_row = 5 # height is available in row 5\n", "\n", "# Age data is available in row 3 as 'age: XX'\n", "age_row = 3 # age is available in row 3\n", "\n", "# Gender/Sex data is available in row 2 as 'Sex: Male/Female' \n", "gender_row = 2 # gender is available in row 2\n", "\n", "# 2.2 Data Type Conversion Functions\n", "\n", "def convert_trait(value):\n", " \"\"\"Convert height values to numeric (continuous) format.\"\"\"\n", " try:\n", " # Extract the number after the 'height(cm):' prefix\n", " if \":\" in value:\n", " height_str = value.split(\":\", 1)[1].strip()\n", " return float(height_str)\n", " else:\n", " return None\n", " except (ValueError, IndexError):\n", " return None\n", "\n", "def convert_age(value):\n", " \"\"\"Convert age values to numeric (continuous) format.\"\"\"\n", " try:\n", " # Extract the number after the 'age:' prefix\n", " if \":\" in value:\n", " age_str = value.split(\":\", 1)[1].strip()\n", " return float(age_str)\n", " else:\n", " return None\n", " except (ValueError, IndexError):\n", " return None\n", "\n", "def convert_gender(value):\n", " \"\"\"Convert gender to binary format (0 for female, 1 for male).\"\"\"\n", " try:\n", " if \":\" in value:\n", " gender_str = value.split(\":\", 1)[1].strip().lower()\n", " if \"female\" in gender_str:\n", " return 0\n", " elif \"male\" in gender_str:\n", " return 1\n", " else:\n", " return None\n", " else:\n", " return None\n", " except (ValueError, IndexError):\n", " return None\n", "\n", "# 3. Save Metadata - Initial Filtering\n", "# Trait data is available since trait_row is not None\n", "is_trait_available = trait_row is not None\n", "\n", "# Validate and save cohort info (initial filtering)\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "# Only proceed if trait_row is not None, which it is in this case\n", "if trait_row is not None:\n", " # Create a sample characteristics dataframe in a format that works with geo_select_clinical_features\n", " # Create a dictionary with the sample characteristics data\n", " sample_chars = {0: ['tissue: Visceral', 'tissue: SubCut'], \n", " 1: ['group: CWS', 'group: CWL', 'group: CONTROL', 'group: CONTROl'], \n", " 2: ['Sex: Male', 'Sex: Female'], \n", " 3: ['age: 51', 'age: 64', 'age: 62', 'age: 78', 'age: 47', 'age: 59', 'age: 57', 'age: 58', \n", " 'age: 53', 'age: 49', 'age: 54', 'age: 60', 'age: 56', 'age: 41', 'age: 76', 'age: 81', \n", " 'age: 48', 'age: 65', 'age: 68', 'age: 72'], \n", " 4: ['tumour: Oesophageal adenocarcinoma', 'tumour: Oesophageal SCC', 'tumour: Gastric adenocarcinoma', \n", " 'tumour: N/A', 'tumour: Gastric'], \n", " 5: ['height(cm): 178', 'height(cm): 170', 'height(cm): 166', 'height(cm): 160', 'height(cm): 180', \n", " 'height(cm): 163', 'height(cm): 183', 'height(cm): 172', 'height(cm): 169', 'height(cm): 158', \n", " 'height(cm): 173', 'height(cm): 193', 'height(cm): 152', 'height(cm): 167', 'height(cm): 168', \n", " 'height(cm): 177', 'height(cm): 165', 'height(cm): 179', 'height(cm): 190']}\n", " \n", " # Create sample IDs (columns) equal to the maximum number of samples\n", " max_samples = max(len(values) for values in sample_chars.values())\n", " sample_ids = [f\"Sample_{i+1}\" for i in range(max_samples)]\n", " \n", " # Create a DataFrame with NaN values\n", " clinical_data = pd.DataFrame(index=range(max(sample_chars.keys()) + 1), columns=sample_ids)\n", " \n", " # Fill the DataFrame with sample characteristics data\n", " for row_idx, row_values in sample_chars.items():\n", " for col_idx, value in enumerate(row_values):\n", " if col_idx < len(sample_ids):\n", " clinical_data.iloc[row_idx, col_idx] = value\n", " \n", " # Extract clinical features\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the dataframe\n", " preview = preview_df(selected_clinical_df)\n", " print(\"Clinical Data Preview:\")\n", " print(pd.DataFrame(preview))\n", " \n", " # Save the processed clinical data\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " selected_clinical_df.to_csv(out_clinical_data_file, index=False)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "28193d69", "metadata": {}, "source": [ "### Step 3: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "21d5dad6", "metadata": {}, "outputs": [], "source": [ "```python\n", "# Analyzing dataset files and structure\n", "import os\n", "\n", "# List files in the cohort directory to understand what's available\n", "print(\"Files in cohort directory:\")\n", "try:\n", " cohort_files = os.listdir(in_cohort_dir)\n", " for file in cohort_files:\n", " print(f\"- {file}\")\n", "except Exception as e:\n", " print(f\"Error accessing directory: {e}\")\n", "\n", "# Check for common GEO file patterns\n", "soft_file = None\n", "matrix_file = None\n", "family_file = None\n", "for file in os.listdir(in_cohort_dir) if os.path.exists(in_cohort_dir) else []:\n", " if file.endswith(\".soft\") or file.endswith(\".soft.gz\"):\n", " soft_file = os.path.join(in_cohort_dir, file)\n", " elif file.endswith(\"_family.soft.gz\") or file.endswith(\"_family.soft\"):\n", " family_file = os.path.join(in_cohort_dir, file)\n", " elif file.endswith(\"_matrix.txt\") or file.endswith(\"_matrix.txt.gz\"):\n", " matrix_file = os.path.join(in_cohort_dir, file)\n", "\n", "print(f\"SOFT file: {soft_file}\")\n", "print(f\"Family file: {family_file}\")\n", "print(f\"Matrix file: {matrix_file}\")\n", "\n", "# Load the sample characteristics from the appropriate file\n", "if soft_file and os.path.exists(soft_file):\n", " # Parse SOFT file to get sample characteristics\n", " with open(soft_file, 'r') as f:\n", " lines = f.readlines()\n", " \n", " # Extract sample characteristics\n", " sample_chars = []\n", " current_section = None\n", " for line in lines:\n", " if line.startswith(\"!Sample_\"):\n", " key = line.split(\"=\")[0].strip().replace(\"!Sample_\", \"\")\n", " value = line.split(\"=\")[1].strip() if \"=\" in line else \"\"\n", " if key == \"table_begin\":\n", " current_section = \"sample_table\"\n", " sample_chars = []\n", " elif key == \"table_end\":\n", " current_section = None\n", " elif current_section == \"sample_table\":\n", " sample_chars.append(line.strip())\n", " \n", " # Create DataFrame from sample characteristics\n", " if sample_chars:\n", " import io\n", " sample_table = io.StringIO(\"\\n\".join(sample_chars))\n", " clinical_data = pd.read_csv(sample_table, sep=\"\\t\")\n", " else:\n", " clinical_data = pd.DataFrame()\n", "else:\n", " # Try to find sample characteristics in other files\n", " sample_chars_file = None\n", " for file in os.listdir(in_cohort_dir) if os.path.exists(in_cohort_dir) else []:\n", " if \"sample\" in file.lower() and \"char\" in file.lower():\n", " sample_chars_file = os.path.join(in_cohort_dir, file)\n", " break\n", " \n", " if sample_chars_file and os.path.exists(sample_chars_file):\n", " clinical_data = pd.read_csv(sample_chars_file)\n", " else:\n", " # Last resort - look for any CSV file that might contain clinical data\n", " csv_files = [f for f in os.listdir(in_cohort_dir) if f.endswith('.csv')] if os.path.exists(in_cohort_dir) else []\n", " if csv_files:\n", " clinical_data = pd.read_csv(os.path.join(in_cohort_dir, csv_files[0]))\n", " else:\n", " clinical_data = pd.DataFrame()\n", "\n", "# Display sample characteristics to make informed decisions\n", "print(\"\\nSample characteristics data shape:\", clinical_data.shape)\n", "print(\"Sample characteristics preview:\")\n", "print(clinical_data.head())\n", "\n", "# Get unique values for each row to identify relevant variables\n", "unique_values = {}\n", "for i in range(len(clinical_data)):\n", " if i < clinical_data.shape[0]:\n", " values = set(clinical_data.iloc[i, 1:].dropna().unique())\n", " unique_values[i] = values\n", " print(f\"Row {i}: {values}\")\n", "\n", "# 1. Gene Expression Data Availability\n", "# Assuming gene expression data is available (can be overridden if evidence suggests otherwise)\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "# 2.1 Identify rows for trait, age, and gender\n", "trait_row = None\n", "age_row = None\n", "gender_row = None\n", "\n", "# Check each row to find trait, age, and gender information\n", "for i in unique_values:\n", " values_str = ' '.join([str(v) for v in unique_values[i]])\n", " row_data_str = ' '.join([str(x) for x in clinical_data.iloc[i, :].values if pd.notna(x)])\n", " \n", " # Looking for height information\n", " if any(h in row_data_str.lower() for h in ['height', 'cm', 'meter', 'tall', 'stature']):\n", " trait_row = i\n", " print(f\"Found Height information in row {i}\")\n", " \n", " # Looking for age information\n", " if any(a in row_data_str.lower() for a in ['age', 'year', 'yrs', 'yo']):\n", " age_row = i\n", " print(f\"Found Age information in row {i}\")\n", " \n", " # Looking for gender information\n", " if any(g in row_data_str.lower() for g in ['gender', 'sex', 'male', 'female']):\n", " gender_row = i\n", " print(f\"Found Gender information in row {i}\")\n", "\n", "# 2.2 Define conversion functions for each variable\n", "def extract_value(s):\n", " \"\"\"Extract value after colon if present.\"\"\"\n", " if isinstance(s, str) and ':' in s:\n", " return s.split(':', 1)[1].strip()\n", " return s\n", "\n", "def convert_trait(value):\n", " \"\"\"Convert height value to float (continuous).\"\"\"\n", " if pd.isna(value):\n", " return None\n", " \n", " value = extract_value(value)\n", " if isinstance(value, str):\n", " # Extract numeric part from the string\n", " try:\n", " import re\n", " nums = re.findall(r'\\d+\\.?\\d*', value)\n", " if nums:\n", " return float(nums[0])\n", " except:\n", " pass\n", " \n", " try:\n", " return float(value)\n", " except:\n", " return None\n", "\n", "def convert_age(value):\n", " \"\"\"Convert age value to float (continuous).\"\"\"\n", " if pd.isna(value):\n", " return None\n", " \n", " value = extract_value(value)\n", " if isinstance(value, str):\n", " # Extract numeric part from the string\n", " try:\n", " import re\n", " nums = re.findall(r'\\d+\\.?\\d*', value)\n", " if nums:\n", " return float(nums[0])\n", " except:\n", " pass\n", " \n", " try:\n", " return float(value)\n", " except:\n", " return None\n", "\n", "def convert_gender(value):\n", " \"\"\"Convert gender to binary (0 for female, 1 for male).\"\"\"\n", " if pd.isna(value):\n", " return None\n", " \n", " value = extract_value(value)\n", " if isinstance(value, str):\n", " value = value.lower()\n", " if any(f in value for f in ['female', 'f', 'woman', 'girl']):\n", " return 0\n", " elif any(m in value for m in ['male', 'm', 'man', 'boy']):\n", " return 1\n", " return None\n", "\n", "# 3. Save metadata about data availability\n", "is_trait_available = trait_row is not None\n", "validate_and_save_cohort_info(\n", " is_final=False, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Extract and save clinical features if trait data is available\n", "if trait_row is not None:\n", " # Extract clinical features\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert\n" ] }, { "cell_type": "markdown", "id": "14d7a3ed", "metadata": {}, "source": [ "### Step 4: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "eb4606d2", "metadata": {}, "outputs": [], "source": [ "# 1. Get the file paths for the SOFT file and matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. First, let's examine the structure of the matrix file to understand its format\n", "import gzip\n", "\n", "# Peek at the first few lines of the file to understand its structure\n", "with gzip.open(matrix_file, 'rt') as file:\n", " # Read first 100 lines to find the header structure\n", " for i, line in enumerate(file):\n", " if '!series_matrix_table_begin' in line:\n", " print(f\"Found data marker at line {i}\")\n", " # Read the next line which should be the header\n", " header_line = next(file)\n", " print(f\"Header line: {header_line.strip()}\")\n", " # And the first data line\n", " first_data_line = next(file)\n", " print(f\"First data line: {first_data_line.strip()}\")\n", " break\n", " if i > 100: # Limit search to first 100 lines\n", " print(\"Matrix table marker not found in first 100 lines\")\n", " break\n", "\n", "# 3. Now try to get the genetic data with better error handling\n", "try:\n", " gene_data = get_genetic_data(matrix_file)\n", " print(gene_data.index[:20])\n", "except KeyError as e:\n", " print(f\"KeyError: {e}\")\n", " \n", " # Alternative approach: manually extract the data\n", " print(\"\\nTrying alternative approach to read the gene data:\")\n", " with gzip.open(matrix_file, 'rt') as file:\n", " # Find the start of the data\n", " for line in file:\n", " if '!series_matrix_table_begin' in line:\n", " break\n", " \n", " # Read the headers and data\n", " import pandas as pd\n", " df = pd.read_csv(file, sep='\\t', index_col=0)\n", " print(f\"Column names: {df.columns[:5]}\")\n", " print(f\"First 20 row IDs: {df.index[:20]}\")\n", " gene_data = df\n" ] }, { "cell_type": "markdown", "id": "ee780b33", "metadata": {}, "source": [ "### Step 5: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "a2bbe313", "metadata": {}, "outputs": [], "source": [ "# Examining the gene identifiers in the dataset\n", "# The identifiers follow the format \"ENSG00000000003_at\", \"ENSG00000000005_at\", etc.\n", "# ENSG identifiers are Ensembl gene IDs, not standard human gene symbols\n", "# They need to be mapped to official gene symbols for better interpretability\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "0e4e7ce4", "metadata": {}, "source": [ "### Step 6: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "b5dd22c7", "metadata": {}, "outputs": [], "source": [ "# 1. Let's first examine the structure of the SOFT file before trying to parse it\n", "import gzip\n", "\n", "# Look at the first few lines of the SOFT file to understand its structure\n", "print(\"Examining SOFT file structure:\")\n", "try:\n", " with gzip.open(soft_file, 'rt') as file:\n", " # Read first 20 lines to understand the file structure\n", " for i, line in enumerate(file):\n", " if i < 20:\n", " print(f\"Line {i}: {line.strip()}\")\n", " else:\n", " break\n", "except Exception as e:\n", " print(f\"Error reading SOFT file: {e}\")\n", "\n", "# 2. Now let's try a more robust approach to extract the gene annotation\n", "# Instead of using the library function which failed, we'll implement a custom approach\n", "try:\n", " # First, look for the platform section which contains gene annotation\n", " platform_data = []\n", " with gzip.open(soft_file, 'rt') as file:\n", " in_platform_section = False\n", " for line in file:\n", " if line.startswith('^PLATFORM'):\n", " in_platform_section = True\n", " continue\n", " if in_platform_section and line.startswith('!platform_table_begin'):\n", " # Next line should be the header\n", " header = next(file).strip()\n", " platform_data.append(header)\n", " # Read until the end of the platform table\n", " for table_line in file:\n", " if table_line.startswith('!platform_table_end'):\n", " break\n", " platform_data.append(table_line.strip())\n", " break\n", " \n", " # If we found platform data, convert it to a DataFrame\n", " if platform_data:\n", " import pandas as pd\n", " import io\n", " platform_text = '\\n'.join(platform_data)\n", " gene_annotation = pd.read_csv(io.StringIO(platform_text), delimiter='\\t', \n", " low_memory=False, on_bad_lines='skip')\n", " print(\"\\nGene annotation preview:\")\n", " print(preview_df(gene_annotation))\n", " else:\n", " print(\"Could not find platform table in SOFT file\")\n", " \n", " # Try an alternative approach - extract mapping from other sections\n", " with gzip.open(soft_file, 'rt') as file:\n", " for line in file:\n", " if 'ANNOTATION information' in line or 'annotation information' in line:\n", " print(f\"Found annotation information: {line.strip()}\")\n", " if line.startswith('!Platform_title') or line.startswith('!platform_title'):\n", " print(f\"Platform title: {line.strip()}\")\n", " \n", "except Exception as e:\n", " print(f\"Error processing gene annotation: {e}\")\n" ] }, { "cell_type": "markdown", "id": "ea803bcd", "metadata": {}, "source": [ "### Step 7: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "71f22fa6", "metadata": {}, "outputs": [], "source": [ "# 1. After analyzing the gene identifiers and gene annotation data:\n", "# - In gene_data, the IDs are like \"ENSG00000000003_at\" \n", "# - In gene_annotation, the matching column is \"ID\" which has the same format\n", "# - The gene symbols are stored in the \"ORF\" column\n", "\n", "# 2. Create gene mapping dataframe with the two relevant columns\n", "gene_mapping = gene_annotation[['ID', 'ORF']]\n", "gene_mapping = gene_mapping.rename(columns={'ORF': 'Gene'})\n", "gene_mapping = gene_mapping.dropna()\n", "\n", "# 3. Apply gene mapping to convert probe-level measurements to gene expression data\n", "# This handles the many-to-many relationship as specified\n", "gene_data = apply_gene_mapping(gene_data, gene_mapping)\n", "\n", "# Preview the gene expression data after mapping\n", "print(\"\\nGene expression data preview after mapping:\")\n", "print(gene_data.shape)\n", "print(gene_data.index[:10]) # Show first 10 gene symbols" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }