{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "335bd1a5", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Vitamin_D_Levels\"\n", "cohort = \"GSE118723\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Vitamin_D_Levels\"\n", "in_cohort_dir = \"../../input/GEO/Vitamin_D_Levels/GSE118723\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Vitamin_D_Levels/GSE118723.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Vitamin_D_Levels/gene_data/GSE118723.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Vitamin_D_Levels/clinical_data/GSE118723.csv\"\n", "json_path = \"../../output/preprocess/Vitamin_D_Levels/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "33624701", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "ef59048c", "metadata": {}, "outputs": [], "source": [ "# 1. Let's first list the directory contents to understand what files are available\n", "import os\n", "\n", "print(\"Files in the cohort directory:\")\n", "files = os.listdir(in_cohort_dir)\n", "print(files)\n", "\n", "# Adapt file identification to handle different naming patterns\n", "soft_files = [f for f in files if 'soft' in f.lower() or '.soft' in f.lower() or '_soft' in f.lower()]\n", "matrix_files = [f for f in files if 'matrix' in f.lower() or '.matrix' in f.lower() or '_matrix' in f.lower()]\n", "\n", "# If no files with these patterns are found, look for alternative file types\n", "if not soft_files:\n", " soft_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "if not matrix_files:\n", " matrix_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "\n", "print(\"Identified SOFT files:\", soft_files)\n", "print(\"Identified matrix files:\", matrix_files)\n", "\n", "# Use the first files found, if any\n", "if len(soft_files) > 0 and len(matrix_files) > 0:\n", " soft_file = os.path.join(in_cohort_dir, soft_files[0])\n", " matrix_file = os.path.join(in_cohort_dir, matrix_files[0])\n", " \n", " # 2. Read the matrix file to obtain background information and sample characteristics data\n", " background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", " clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", " background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", " \n", " # 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", " sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", " \n", " # 4. Explicitly print out all the background information and the sample characteristics dictionary\n", " print(\"\\nBackground Information:\")\n", " print(background_info)\n", " print(\"\\nSample Characteristics Dictionary:\")\n", " print(sample_characteristics_dict)\n", "else:\n", " print(\"No appropriate files found in the directory.\")\n" ] }, { "cell_type": "markdown", "id": "369b4cf7", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "ef559e7a", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "import os\n", "import re\n", "\n", "# 1. Determine if gene expression data is available\n", "# Based on the background information, this study collected single-cell RNA-seq data\n", "# which contains gene expression information, so gene expression data is available.\n", "is_gene_available = True\n", "\n", "# 2. Variable availability and data type conversion\n", "# 2.1 Identify keys for trait, age, and gender\n", "\n", "# For trait (Vitamin D Levels): \n", "# Looking at the sample characteristics, there's no direct mention of Vitamin D levels\n", "# The study focuses on induced pluripotent stem cells and gene expression variation\n", "trait_row = None # No Vitamin D data available\n", "\n", "# For age:\n", "# No age information is provided in the sample characteristics\n", "age_row = None\n", "\n", "# For gender:\n", "# No gender information is provided in the sample characteristics\n", "gender_row = None\n", "\n", "# 2.2 Define conversion functions\n", "def convert_trait(value):\n", " \"\"\"\n", " Convert trait values to appropriate data type.\n", " Since trait data is not available, this function is just a placeholder.\n", " \"\"\"\n", " if value is None or pd.isna(value):\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if isinstance(value, str) and ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " try:\n", " # Convert to float assuming it's continuous\n", " return float(value)\n", " except (ValueError, TypeError):\n", " return None\n", "\n", "# Age conversion function (not used but defined for completeness)\n", "def convert_age(value):\n", " \"\"\"\n", " Convert age values to appropriate data type.\n", " Since age data is not available, this function is just a placeholder.\n", " \"\"\"\n", " if value is None or pd.isna(value):\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if isinstance(value, str) and ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " try:\n", " # Convert to float assuming age is continuous\n", " return float(value)\n", " except (ValueError, TypeError):\n", " return None\n", "\n", "# Gender conversion function (not used but defined for completeness)\n", "def convert_gender(value):\n", " \"\"\"\n", " Convert gender values to appropriate data type.\n", " Since gender data is not available, this function is just a placeholder.\n", " \"\"\"\n", " if value is None or pd.isna(value):\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if isinstance(value, str) and ':' in value:\n", " value = value.split(':', 1)[1].strip().lower()\n", " \n", " # Binary coding: female=0, male=1\n", " if 'female' in value:\n", " return 0\n", " elif 'male' in value:\n", " return 1\n", " else:\n", " return None\n", "\n", "# 3. Save metadata\n", "# Determine trait availability\n", "is_trait_available = trait_row is not None\n", "\n", "# Save initial filtering information\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "# Since trait_row is None, we skip the clinical feature extraction step\n" ] }, { "cell_type": "markdown", "id": "43db2fbf", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "c6202918", "metadata": {}, "outputs": [], "source": [ "# Use the helper function to get the proper file paths\n", "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# First check how many lines we have after the header to understand the data structure\n", "import gzip\n", "marker_found = False\n", "with gzip.open(matrix_file_path, 'rt') as file:\n", " for i, line in enumerate(file):\n", " if \"!series_matrix_table_begin\" in line:\n", " marker_found = True\n", " print(f\"Found marker at line {i}\")\n", " \n", " # Read the header line\n", " header_line = next(file)\n", " print(f\"Header line contains {len(header_line.strip().split('\\t'))} columns\")\n", " \n", " # Try to read the next 3 lines to see the data structure\n", " print(\"\\nFirst 3 data rows after header:\")\n", " for j in range(3):\n", " next_line = next(file, None)\n", " if next_line:\n", " cols = next_line.strip().split('\\t')\n", " print(f\"Row {j+1}: ID={cols[0]}, {len(cols)} columns total\")\n", " if j == 0: # Print a few sample values from first row to verify data format\n", " print(f\"Sample values: {cols[1:5]}...\")\n", " else:\n", " print(f\"No data at row {j+1}\")\n", " break\n", "\n", "# Attempt to read data with pandas directly\n", "try:\n", " # Based on the debugging, we'll read the file directly focusing on the data part\n", " with gzip.open(matrix_file_path, 'rt') as file:\n", " # Skip until we find the marker\n", " marker_line_num = 0\n", " for i, line in enumerate(file):\n", " if \"!series_matrix_table_begin\" in line:\n", " marker_line_num = i\n", " break\n", " \n", " # Now read the data with pandas, skipping to right after the marker line\n", " gene_data = pd.read_csv(\n", " matrix_file_path, \n", " compression='gzip', \n", " skiprows=marker_line_num+1, # +1 to skip the marker itself\n", " sep='\\t', \n", " index_col=0,\n", " low_memory=False, # Avoid mixed type inference errors\n", " header=0, # First row is header\n", " nrows=50, # Read just a subset first to diagnose\n", " comment='!', # Skip lines starting with !\n", " quotechar='\"' # Handle quotes properly\n", " )\n", " \n", " # Check if we have data\n", " if gene_data.shape[0] > 0:\n", " print(f\"\\nSuccessfully read gene data: {gene_data.shape} (showing first 50 rows)\")\n", " print(f\"First 5 row IDs (gene/probe identifiers):\")\n", " print(gene_data.index[:5])\n", " else:\n", " print(\"No data rows found in gene expression matrix\")\n", " \n", " # If all went well, now read the entire dataset\n", " if gene_data.shape[0] > 0:\n", " gene_data = pd.read_csv(\n", " matrix_file_path, \n", " compression='gzip', \n", " skiprows=marker_line_num+1,\n", " sep='\\t', \n", " index_col=0,\n", " low_memory=False,\n", " header=0,\n", " comment='!',\n", " quotechar='\"'\n", " )\n", " print(f\"\\nFull gene expression data shape: {gene_data.shape}\")\n", " \n", " # Save the gene expression data to a file\n", " if gene_data.shape[0] > 0:\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " gene_data.to_csv(out_gene_data_file)\n", " print(f\"Gene expression data saved to {out_gene_data_file}\")\n", " \n", "except Exception as e:\n", " print(f\"Error reading gene data: {e}\")\n", " gene_data = pd.DataFrame() # Create empty DataFrame in case of failure\n", "\n", "# If still unsuccessful, display the file structure in more detail\n", "if gene_data.shape[0] == 0:\n", " print(\"\\nDetailed inspection of file structure:\")\n", " with gzip.open(matrix_file_path, 'rt') as file:\n", " # Skip to the marker\n", " for line in file:\n", " if \"!series_matrix_table_begin\" in line:\n", " break\n", " \n", " # Read the header\n", " header = next(file).strip()\n", " print(f\"Header: {header[:100]}...\")\n", " \n", " # Try to read the first 5 data lines and print raw content\n", " print(\"\\nRaw content of first 5 data lines:\")\n", " for i in range(5):\n", " line = next(file, None)\n", " if line:\n", " print(f\"Line {i+1} (first 100 chars): {line.strip()[:100]}...\")\n", " print(f\"Line {i+1} length: {len(line)}\")\n", " else:\n", " print(f\"No content at line {i+1}\")\n" ] }, { "cell_type": "markdown", "id": "70baf7cc", "metadata": {}, "source": [ "### Step 4: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "ec3e7c37", "metadata": {}, "outputs": [], "source": [ "# Use the helper function to get the proper file paths\n", "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# Detailed inspection of the file structure\n", "print(\"Examining file structure to locate gene expression data:\")\n", "section_markers = {\n", " \"series_begin\": 0,\n", " \"series_end\": 0,\n", " \"platform_begin\": 0,\n", " \"platform_end\": 0,\n", " \"series_matrix_begin\": 0,\n", " \"series_matrix_end\": 0\n", "}\n", "\n", "with gzip.open(matrix_file_path, 'rt') as file:\n", " for i, line in enumerate(file):\n", " if i < 100 or i % 1000 == 0: # Print first 100 lines and then every 1000th line\n", " print(f\"Line {i}: {line[:50].strip()}...\")\n", " \n", " # Track section markers\n", " if \"!Series_table_begin\" in line:\n", " section_markers[\"series_begin\"] = i\n", " elif \"!Series_table_end\" in line:\n", " section_markers[\"series_end\"] = i\n", " elif \"!Platform_table_begin\" in line:\n", " section_markers[\"platform_begin\"] = i\n", " elif \"!Platform_table_end\" in line:\n", " section_markers[\"platform_end\"] = i\n", " elif \"!series_matrix_table_begin\" in line:\n", " section_markers[\"series_matrix_begin\"] = i\n", " elif \"!series_matrix_table_end\" in line:\n", " section_markers[\"series_matrix_end\"] = i\n", " \n", " # Stop after we've found all markers or read enough of the file\n", " if all(v > 0 for v in section_markers.values()) or i > 10000:\n", " break\n", "\n", "print(\"\\nSection markers found:\")\n", "for marker, line_num in section_markers.items():\n", " print(f\"{marker}: line {line_num}\")\n", "\n", "# Try reading from the SOFT file instead, which might contain the gene data\n", "print(\"\\nAttempting to read gene data from SOFT file:\")\n", "try:\n", " # First inspect the SOFT file structure\n", " with gzip.open(soft_file_path, 'rt') as file:\n", " for i, line in enumerate(file):\n", " if i < 20: # Print first 20 lines\n", " print(f\"SOFT file line {i}: {line[:50].strip()}...\")\n", " if i >= 20:\n", " break\n", " \n", " # Try to extract gene annotations from the SOFT file\n", " gene_metadata = get_gene_annotation(soft_file_path)\n", " print(f\"\\nGene metadata shape: {gene_metadata.shape}\")\n", " \n", " if gene_metadata.shape[0] > 0:\n", " print(\"First 5 rows of gene metadata:\")\n", " print(gene_metadata.head())\n", " \n", " # Check if this contains expression data\n", " if any('value' in col.lower() or 'expr' in col.lower() for col in gene_metadata.columns):\n", " print(\"\\nFound potential expression data in gene metadata\")\n", " gene_data = gene_metadata\n", " else:\n", " print(\"\\nNo expression data columns found in gene metadata\")\n", " gene_data = pd.DataFrame()\n", " else:\n", " print(\"No gene metadata found in SOFT file\")\n", " gene_data = pd.DataFrame()\n", " \n", " # If we found data, try to identify expression columns and save them\n", " if gene_data.shape[0] > 0 and gene_data.shape[1] > 1:\n", " if 'ID' in gene_data.columns:\n", " gene_data = gene_data.set_index('ID')\n", " \n", " # Print the first 20 row IDs\n", " print(\"\\nFirst 20 gene/probe identifiers:\")\n", " print(gene_data.index[:20])\n", " \n", " # Print shape to understand the dataset dimensions\n", " print(f\"\\nGene expression data shape: {gene_data.shape}\")\n", " \n", " # Save the gene data\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " gene_data.to_csv(out_gene_data_file)\n", " print(f\"Gene data saved to {out_gene_data_file}\")\n", " else:\n", " print(\"\\nNo usable gene expression data found in either file\")\n", " \n", "except Exception as e:\n", " print(f\"Error extracting data from SOFT file: {e}\")\n", " gene_data = pd.DataFrame()\n", "\n", "# If we still don't have any gene data, check for other files in the directory\n", "if gene_data.shape[0] == 0:\n", " print(\"\\nSearching for alternative data files in the directory:\")\n", " all_files = os.listdir(in_cohort_dir)\n", " for file in all_files:\n", " print(f\" {file}\")\n", " \n", " print(\"\\nThis dataset may require accessing supplementary files or raw data from GEO.\")\n", " print(\"The gene expression data doesn't appear to be in the standard format.\")\n" ] }, { "cell_type": "markdown", "id": "1ec3cebc", "metadata": {}, "source": [ "### Step 5: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "d3bc2cae", "metadata": {}, "outputs": [], "source": [ "# Based on the provided information, I need to determine if gene identifier mapping is required.\n", "# The output shows the SOFT file structure, but there's no clear view of the gene identifiers.\n", "# The GEO dataset GSE118723 appears to be RNA-Seq data from human iPSCs.\n", "\n", "# Let's try to find and examine the supplementary files which might contain the gene expression data\n", "import os\n", "import gzip\n", "import pandas as pd\n", "\n", "# List all files in the directory\n", "print(\"Available files in the directory:\")\n", "for file in os.listdir(in_cohort_dir):\n", " print(file)\n", "\n", "# Check for expression data in supplementary files\n", "supp_files = [f for f in os.listdir(in_cohort_dir) if 'genes' in f.lower() or 'count' in f.lower() or 'expr' in f.lower()]\n", "if supp_files:\n", " print(\"\\nExamining potential gene expression files:\")\n", " for file in supp_files:\n", " file_path = os.path.join(in_cohort_dir, file)\n", " print(f\"\\nFile: {file}\")\n", " \n", " # Try to read the file and examine the first few lines\n", " try:\n", " if file.endswith('.gz'):\n", " with gzip.open(file_path, 'rt') as f:\n", " for i, line in enumerate(f):\n", " if i < 5: # Print only first 5 lines\n", " print(f\"Line {i}: {line.strip()}\")\n", " else:\n", " break\n", " else:\n", " with open(file_path, 'r') as f:\n", " for i, line in enumerate(f):\n", " if i < 5: # Print only first 5 lines\n", " print(f\"Line {i}: {line.strip()}\")\n", " else:\n", " break\n", " except Exception as e:\n", " print(f\"Error reading file: {e}\")\n", "\n", "# Since we couldn't get a clear view of the gene identifiers from the available information,\n", "# I'll need to make an informed decision based on the dataset description\n", "# From the dataset title \"Discovery and characterization of vitamin D sensitive genes in human induced pluripotent stem cells\"\n", "# and knowing it's RNA-Seq data processed with tools like featureCounts, the gene identifiers are likely Ensembl IDs or gene symbols.\n", "\n", "# For RNA-Seq data processed with standard pipelines (Subread/featureCounts), Ensembl gene IDs are commonly used\n", "# Therefore, it's likely that gene mapping may be required if the IDs are not human gene symbols\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "88274ff1", "metadata": {}, "source": [ "### Step 6: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "73d2a458", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", "id": "d4a4f2d3", "metadata": {}, "source": [ "### Step 7: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "5c2f3b25", "metadata": {}, "outputs": [], "source": [ "# Initialize variables outside of try block\n", "# Get the file paths using the library function\n", "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n", "platform_info = []\n", "gene_data = pd.DataFrame() # Empty DataFrame in case it wasn't defined in previous steps\n", "\n", "try:\n", " # Extract gene annotation using the library function\n", " print(\"Extracting gene annotation from SOFT file...\")\n", " gene_annotation = get_gene_annotation(soft_file_path)\n", " \n", " # Display column names of the annotation dataframe\n", " print(\"\\nGene annotation columns:\")\n", " print(gene_annotation.columns.tolist())\n", " \n", " # Preview the annotation dataframe\n", " print(\"\\nGene annotation preview:\")\n", " annotation_preview = preview_df(gene_annotation)\n", " print(annotation_preview)\n", " \n", " # Check if we have annotation data\n", " if gene_annotation.shape[0] > 0:\n", " # Analyze the SOFT file to identify the data type and mapping information\n", " with gzip.open(soft_file_path, 'rt', encoding='latin-1') as f:\n", " for line in f:\n", " if line.startswith(\"!Platform_title\") or line.startswith(\"!Series_title\") or \"description\" in line.lower():\n", " platform_info.append(line.strip())\n", " \n", " print(\"\\nPlatform information:\")\n", " for line in platform_info:\n", " print(line)\n", " \n", " # Look for gene symbol column\n", " gene_symbol_candidates = [col for col in gene_annotation.columns if 'gene' in col.lower() or 'symbol' in col.lower() or 'name' in col.lower()]\n", " print(f\"\\nPotential gene symbol columns: {gene_symbol_candidates}\")\n", " else:\n", " print(\"No gene annotation found in the SOFT file.\")\n", "\n", "except Exception as e:\n", " print(f\"Error analyzing gene annotation data: {e}\")\n", " gene_annotation = pd.DataFrame()\n", "\n", "# Based on platform_info, determine if this is really gene expression data\n", "is_gene_expression = False\n", "for info in platform_info:\n", " if 'expression' in info.lower() or 'transcript' in info.lower() or 'mrna' in info.lower():\n", " is_gene_expression = True\n", " break\n", "\n", "print(f\"\\nIs this dataset likely to contain gene expression data based on platform info? {is_gene_expression}\")\n", "\n", "# If this isn't gene expression data, we need to update our metadata\n", "if not is_gene_expression and platform_info: # Only update if we found platform info\n", " print(\"\\nNOTE: Based on our analysis, this dataset doesn't appear to contain gene expression data.\")\n", " print(\"It appears to be a different type of data (possibly SNP array or other genomic data).\")\n", " # Update is_gene_available for metadata\n", " is_gene_available = False\n", " \n", " # Save the updated metadata\n", " validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", " )" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }