{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "dd6f1b25", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Sickle_Cell_Anemia\"\n", "cohort = \"GSE84633\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Sickle_Cell_Anemia\"\n", "in_cohort_dir = \"../../input/GEO/Sickle_Cell_Anemia/GSE84633\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Sickle_Cell_Anemia/GSE84633.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Sickle_Cell_Anemia/gene_data/GSE84633.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Sickle_Cell_Anemia/clinical_data/GSE84633.csv\"\n", "json_path = \"../../output/preprocess/Sickle_Cell_Anemia/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "be7f2d3d", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "2aa3e4fb", "metadata": {}, "outputs": [], "source": [ "from tools.preprocess import *\n", "# 1. Identify the paths to the SOFT file and the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Read the matrix file to obtain background information and sample characteristics data\n", "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", "\n", "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n", "print(\"Background Information:\")\n", "print(background_info)\n", "print(\"Sample Characteristics Dictionary:\")\n", "print(sample_characteristics_dict)\n" ] }, { "cell_type": "markdown", "id": "7f8e1a4a", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "a998c0bd", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import os\n", "import json\n", "from typing import Callable, Dict, Any, Optional\n", "\n", "# 1. Assess gene expression data availability\n", "is_gene_available = True # Dataset appears to be gene expression data from PBMCs\n", "\n", "# 2. Variable availability and conversion functions\n", "# 2.1. Trait availability\n", "trait_row = 2 # From the characteristics dictionary, disease info is at key 2\n", "\n", "# Define conversion functions\n", "def convert_trait(value):\n", " if value is None:\n", " return None\n", " \n", " # Extract the value after the colon\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Convert to binary (1 for sickle cell disease, 0 for control)\n", " if 'sickle cell disease' in value.lower():\n", " return 1\n", " elif 'control' in value.lower() or 'healthy' in value.lower():\n", " return 0\n", " else:\n", " return None\n", "\n", "# 2.2. Age availability\n", "age_row = None # Age data not available in the sample characteristics\n", "\n", "def convert_age(value):\n", " # This function won't be used since age data is not available\n", " if value is None:\n", " return None\n", " \n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " try:\n", " return float(value)\n", " except:\n", " return None\n", "\n", "# 2.3. Gender availability\n", "gender_row = None # Gender data not available in the sample characteristics\n", "\n", "def convert_gender(value):\n", " # This function won't be used since gender data is not available\n", " if value is None:\n", " return None\n", " \n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " value = value.lower()\n", " if 'female' in value or 'f' == value:\n", " return 0\n", " elif 'male' in value or 'm' == value:\n", " return 1\n", " else:\n", " return None\n", "\n", "# 3. Save metadata with initial filtering\n", "is_trait_available = trait_row is not None\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical feature extraction (if trait data is available)\n", "if trait_row is not None:\n", " try:\n", " # Extract clinical features - assuming clinical_data variable exists from previous step\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age if age_row is not None else None,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender if gender_row is not None else None\n", " )\n", " \n", " # Preview the extracted clinical data\n", " print(\"Preview of extracted clinical data:\")\n", " print(preview_df(selected_clinical_df))\n", " \n", " # Create directory if it doesn't exist\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " \n", " # Save clinical data to CSV\n", " selected_clinical_df.to_csv(out_clinical_data_file, index=False)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n", " except Exception as e:\n", " print(f\"Error extracting clinical features: {e}\")\n", "else:\n", " print(\"No trait data available, skipping clinical feature extraction.\")\n" ] }, { "cell_type": "markdown", "id": "b443f474", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "993157f8", "metadata": {}, "outputs": [], "source": [ "# 1. Get the file paths for the SOFT file and matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. First, let's examine the structure of the matrix file to understand its format\n", "import gzip\n", "\n", "# Peek at the first few lines of the file to understand its structure\n", "with gzip.open(matrix_file, 'rt') as file:\n", " # Read first 100 lines to find the header structure\n", " for i, line in enumerate(file):\n", " if '!series_matrix_table_begin' in line:\n", " print(f\"Found data marker at line {i}\")\n", " # Read the next line which should be the header\n", " header_line = next(file)\n", " print(f\"Header line: {header_line.strip()}\")\n", " # And the first data line\n", " first_data_line = next(file)\n", " print(f\"First data line: {first_data_line.strip()}\")\n", " break\n", " if i > 100: # Limit search to first 100 lines\n", " print(\"Matrix table marker not found in first 100 lines\")\n", " break\n", "\n", "# 3. Now try to get the genetic data with better error handling\n", "try:\n", " gene_data = get_genetic_data(matrix_file)\n", " print(gene_data.index[:20])\n", "except KeyError as e:\n", " print(f\"KeyError: {e}\")\n", " \n", " # Alternative approach: manually extract the data\n", " print(\"\\nTrying alternative approach to read the gene data:\")\n", " with gzip.open(matrix_file, 'rt') as file:\n", " # Find the start of the data\n", " for line in file:\n", " if '!series_matrix_table_begin' in line:\n", " break\n", " \n", " # Read the headers and data\n", " import pandas as pd\n", " df = pd.read_csv(file, sep='\\t', index_col=0)\n", " print(f\"Column names: {df.columns[:5]}\")\n", " print(f\"First 20 row IDs: {df.index[:20]}\")\n", " gene_data = df\n" ] }, { "cell_type": "markdown", "id": "626dc9c5", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "dfdba276", "metadata": {}, "outputs": [], "source": [ "# Examining the gene identifiers from the previous output\n", "# These look like numeric identifiers (e.g., 2315554, 2315633), not standard human gene symbols\n", "# Standard human gene symbols would be alphanumeric like BRCA1, TP53, etc.\n", "# These numeric IDs are likely probe IDs that need mapping to gene symbols\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "08c99b99", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "379f9a6f", "metadata": {}, "outputs": [], "source": [ "# 1. Let's first examine the structure of the SOFT file before trying to parse it\n", "import gzip\n", "\n", "# Look at the first few lines of the SOFT file to understand its structure\n", "print(\"Examining SOFT file structure:\")\n", "try:\n", " with gzip.open(soft_file, 'rt') as file:\n", " # Read first 20 lines to understand the file structure\n", " for i, line in enumerate(file):\n", " if i < 20:\n", " print(f\"Line {i}: {line.strip()}\")\n", " else:\n", " break\n", "except Exception as e:\n", " print(f\"Error reading SOFT file: {e}\")\n", "\n", "# 2. Now let's try a more robust approach to extract the gene annotation\n", "# Instead of using the library function which failed, we'll implement a custom approach\n", "try:\n", " # First, look for the platform section which contains gene annotation\n", " platform_data = []\n", " with gzip.open(soft_file, 'rt') as file:\n", " in_platform_section = False\n", " for line in file:\n", " if line.startswith('^PLATFORM'):\n", " in_platform_section = True\n", " continue\n", " if in_platform_section and line.startswith('!platform_table_begin'):\n", " # Next line should be the header\n", " header = next(file).strip()\n", " platform_data.append(header)\n", " # Read until the end of the platform table\n", " for table_line in file:\n", " if table_line.startswith('!platform_table_end'):\n", " break\n", " platform_data.append(table_line.strip())\n", " break\n", " \n", " # If we found platform data, convert it to a DataFrame\n", " if platform_data:\n", " import pandas as pd\n", " import io\n", " platform_text = '\\n'.join(platform_data)\n", " gene_annotation = pd.read_csv(io.StringIO(platform_text), delimiter='\\t', \n", " low_memory=False, on_bad_lines='skip')\n", " print(\"\\nGene annotation preview:\")\n", " print(preview_df(gene_annotation))\n", " else:\n", " print(\"Could not find platform table in SOFT file\")\n", " \n", " # Try an alternative approach - extract mapping from other sections\n", " with gzip.open(soft_file, 'rt') as file:\n", " for line in file:\n", " if 'ANNOTATION information' in line or 'annotation information' in line:\n", " print(f\"Found annotation information: {line.strip()}\")\n", " if line.startswith('!Platform_title') or line.startswith('!platform_title'):\n", " print(f\"Platform title: {line.strip()}\")\n", " \n", "except Exception as e:\n", " print(f\"Error processing gene annotation: {e}\")\n" ] }, { "cell_type": "markdown", "id": "9ce4a1cd", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "0d7bb96e", "metadata": {}, "outputs": [], "source": [ "# 1. Determine which columns in the gene annotation contain our probe IDs and gene symbols\n", "# From the previous output, we can see:\n", "# - 'ID' column in gene_annotation contains probe IDs like 2315100, matching what we saw in gene_data\n", "# - 'gene_assignment' column contains gene symbol information\n", "\n", "# Define the columns for mapping\n", "probe_id_column = 'ID'\n", "gene_symbol_column = 'gene_assignment'\n", "\n", "# 2. Create a mapping dataframe\n", "mapping_df = gene_annotation[[probe_id_column, gene_symbol_column]].copy()\n", "mapping_df = mapping_df.dropna() # Remove rows with missing gene symbols\n", "mapping_df = mapping_df.rename(columns={gene_symbol_column: 'Gene'}).astype({probe_id_column: 'str'})\n", "\n", "# First let's inspect some examples of the gene assignment strings\n", "print(\"Example gene assignments:\")\n", "for i in range(3):\n", " if i < len(mapping_df) and isinstance(mapping_df.iloc[i]['Gene'], str):\n", " print(f\"Example {i+1}: {mapping_df.iloc[i]['Gene'][:200]}...\")\n", "\n", "# Apply the extract_human_gene_symbols function to get gene symbols\n", "mapping_df['Gene'] = mapping_df['Gene'].apply(extract_human_gene_symbols)\n", "\n", "# Remove rows with empty gene lists\n", "mapping_df = mapping_df[mapping_df['Gene'].apply(len) > 0]\n", "\n", "# Preview the mapping dataframe after extraction\n", "print(\"\\nGene mapping preview after extraction:\")\n", "print(mapping_df.head(10))\n", "print(f\"Total mappings with gene symbols: {len(mapping_df)}\")\n", "\n", "# 3. Apply gene mapping to convert probe measurements to gene expression\n", "# Convert probe IDs in gene_data to strings to match the mapping\n", "gene_data.index = gene_data.index.astype(str)\n", "\n", "# Check overlap between datasets\n", "common_probes = set(gene_data.index) & set(mapping_df['ID'])\n", "print(f\"\\nNumber of common probes between expression data and mapping: {len(common_probes)}\")\n", "\n", "# Apply the mapping to get gene expression values\n", "gene_data_mapped = apply_gene_mapping(gene_data, mapping_df)\n", "\n", "# Preview the mapped gene expression data\n", "print(\"\\nGene expression data after mapping:\")\n", "print(f\"Shape: {gene_data_mapped.shape}\")\n", "if not gene_data_mapped.empty:\n", " print(f\"First few gene symbols: {gene_data_mapped.index[:5]}\")\n", " print(f\"Number of genes: {len(gene_data_mapped)}\")\n", "else:\n", " print(\"Warning: No gene symbols were mapped successfully.\")\n", " \n", " # If first approach fails, try normalizing the gene symbols\n", " print(\"\\nAttempting to normalize gene symbols...\")\n", " if len(gene_data_mapped) > 0:\n", " gene_data_mapped = normalize_gene_symbols_in_index(gene_data_mapped)\n", " print(f\"After normalization - Number of genes: {len(gene_data_mapped)}\")\n", "\n", "# Save the gene expression data\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data_mapped.to_csv(out_gene_data_file)\n", "print(f\"\\nGene expression data saved to {out_gene_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "f8954bd2", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "c7f33553", "metadata": {}, "outputs": [], "source": [ "# 1. Let's try a manual approach to normalize the gene data\n", "# First, extract the gene_data which should be the original probe-level expression data\n", "gene_data_index = gene_data.index.tolist()\n", "print(f\"Original gene_data shape: {gene_data.shape}\")\n", "print(f\"First 10 probes: {gene_data_index[:10]}\")\n", "\n", "# Check if original gene_data is available and well-formed\n", "if gene_data.shape[0] == 0 or gene_data.shape[1] == 0:\n", " print(\"WARNING: Original gene_data is empty. Using original matrix file to get gene data again.\")\n", " # Try to reload gene data from matrix file\n", " with gzip.open(matrix_file, 'rt') as file:\n", " for line in file:\n", " if '!series_matrix_table_begin' in line:\n", " break\n", " # Skip the header line\n", " next(file)\n", " # Read the data\n", " import pandas as pd\n", " gene_data = pd.read_csv(file, sep='\\t', index_col=0)\n", " print(f\"Reloaded gene_data shape: {gene_data.shape}\")\n", "\n", "# Count the mappable genes\n", "mapping_count = mapping_df.groupby('Gene').size().sort_values(ascending=False)\n", "print(f\"Top 10 mapped gene symbols: {mapping_count.head(10)}\")\n", "\n", "# Try another approach for mapping: keep the original probe IDs if mapping fails\n", "# This means we'll use probe IDs as substitutes for gene symbols\n", "print(\"\\nAttempting to create linked data using probe IDs...\")\n", "\n", "# 2. Link clinical and expression data\n", "clinical_features = pd.read_csv(out_clinical_data_file)\n", "clinical_features = clinical_features.set_index(clinical_features.columns[0]) # Set first column as index\n", "\n", "# Transpose gene_data for linking (so samples are rows)\n", "gene_data_t = gene_data.T\n", "print(f\"Transposed gene_data shape: {gene_data_t.shape}\")\n", "\n", "# Link the data\n", "linked_data = pd.concat([clinical_features.T, gene_data_t], axis=1)\n", "print(f\"Linked data shape with probe IDs: {linked_data.shape}\")\n", "\n", "# Check for missing values\n", "print(\"\\nMissing values summary:\")\n", "trait_missing = linked_data[trait].isna().sum()\n", "print(f\" Trait ({trait}) missing: {trait_missing} out of {len(linked_data)}\")\n", "\n", "# Handle missing values\n", "if linked_data.shape[0] > 0:\n", " # Get gene columns (all columns except trait, Age, Gender)\n", " covariate_cols = [trait]\n", " if 'Age' in linked_data.columns:\n", " covariate_cols.append('Age')\n", " if 'Gender' in linked_data.columns:\n", " covariate_cols.append('Gender')\n", " \n", " gene_cols = [col for col in linked_data.columns if col not in covariate_cols]\n", " \n", " # Print missing value statistics before cleaning\n", " gene_missing_pct = linked_data[gene_cols].isna().mean()\n", " print(f\" Genes with >20% missing: {sum(gene_missing_pct > 0.2)} out of {len(gene_cols)}\")\n", " \n", " sample_missing_pct = linked_data[gene_cols].isna().mean(axis=1)\n", " print(f\" Samples with >5% missing genes: {sum(sample_missing_pct > 0.05)} out of {len(linked_data)}\")\n", " \n", " # Apply missing value handling\n", " cleaned_data = handle_missing_values(linked_data, trait)\n", " print(f\"Data shape after handling missing values: {cleaned_data.shape}\")\n", " \n", " # Evaluate bias in trait and demographic features\n", " if len(cleaned_data) > 0:\n", " is_trait_biased, cleaned_data = judge_and_remove_biased_features(cleaned_data, trait)\n", " \n", " # Final validation and save\n", " is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=True, \n", " is_biased=is_trait_biased, \n", " df=cleaned_data,\n", " note=f\"Dataset contains only {trait} patients with no controls. Used probe IDs instead of gene symbols.\"\n", " )\n", " \n", " # Save if usable\n", " if is_usable and len(cleaned_data) > 0:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " cleaned_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", " else:\n", " print(\"Data was determined to be biased or empty and was not saved\")\n", " else:\n", " print(\"No data remains after handling missing values.\")\n", " # Record cohort info\n", " validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=True, \n", " is_biased=True, \n", " df=pd.DataFrame(),\n", " note=f\"Dataset produced empty dataframe after handling missing values.\"\n", " )\n", "else:\n", " print(\"Linked data is empty.\")\n", " # Record cohort info\n", " validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=is_gene_available, \n", " is_trait_available=is_trait_available, \n", " is_biased=True, \n", " df=pd.DataFrame(),\n", " note=f\"Failed to create valid linked data with gene expression values.\"\n", " )\n" ] }, { "cell_type": "markdown", "id": "4b7e5dd9", "metadata": {}, "source": [ "### Step 8: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "8d27acd0", "metadata": {}, "outputs": [], "source": [ "# 1. Normalize gene symbols in the gene expression data\n", "normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n", "print(f\"Normalized gene data shape: {normalized_gene_data.shape}\")\n", "print(\"First few genes with their expression values after normalization:\")\n", "print(normalized_gene_data.head())\n", "\n", "# Save the normalized gene data\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "normalized_gene_data.to_csv(out_gene_data_file)\n", "print(f\"Normalized gene data saved to {out_gene_data_file}\")\n", "\n", "# 2. Check if trait data is available before proceeding with clinical data extraction\n", "if trait_row is None:\n", " print(\"Trait row is None. Cannot extract trait information from clinical data.\")\n", " # Create an empty dataframe for clinical features\n", " clinical_features = pd.DataFrame()\n", " \n", " # Create an empty dataframe for linked data\n", " linked_data = pd.DataFrame()\n", " \n", " # Validate and save cohort info\n", " validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=False, # Trait data is not available\n", " is_biased=True, # Not applicable but required\n", " df=pd.DataFrame(), # Empty dataframe\n", " note=f\"Dataset contains gene expression data but lacks clear trait indicators for {trait} status.\"\n", " )\n", " print(\"Data was determined to be unusable due to missing trait indicators and was not saved\")\n", "else:\n", " try:\n", " # Get the file paths for the matrix file to extract clinical data\n", " _, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", " \n", " # Get raw clinical data from the matrix file\n", " _, clinical_raw = get_background_and_clinical_data(matrix_file)\n", " \n", " # Verify clinical data structure\n", " print(\"Raw clinical data shape:\", clinical_raw.shape)\n", " \n", " # Extract clinical features using the defined conversion functions\n", " clinical_features = geo_select_clinical_features(\n", " clinical_df=clinical_raw,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " print(\"Clinical features:\")\n", " print(clinical_features)\n", " \n", " # Save clinical features to file\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " clinical_features.to_csv(out_clinical_data_file)\n", " print(f\"Clinical features saved to {out_clinical_data_file}\")\n", " \n", " # 3. Link clinical and genetic data\n", " linked_data = geo_link_clinical_genetic_data(clinical_features, normalized_gene_data)\n", " print(f\"Linked data shape: {linked_data.shape}\")\n", " print(\"Linked data preview (first 5 rows, first 5 columns):\")\n", " print(linked_data.iloc[:5, :5])\n", " \n", " # 4. Handle missing values\n", " print(\"Missing values before handling:\")\n", " print(f\" Trait ({trait}) missing: {linked_data[trait].isna().sum()} out of {len(linked_data)}\")\n", " if 'Age' in linked_data.columns:\n", " print(f\" Age missing: {linked_data['Age'].isna().sum()} out of {len(linked_data)}\")\n", " if 'Gender' in linked_data.columns:\n", " print(f\" Gender missing: {linked_data['Gender'].isna().sum()} out of {len(linked_data)}\")\n", " \n", " gene_cols = [col for col in linked_data.columns if col not in [trait, 'Age', 'Gender']]\n", " print(f\" Genes with >20% missing: {sum(linked_data[gene_cols].isna().mean() > 0.2)}\")\n", " print(f\" Samples with >5% missing genes: {sum(linked_data[gene_cols].isna().mean(axis=1) > 0.05)}\")\n", " \n", " cleaned_data = handle_missing_values(linked_data, trait)\n", " print(f\"Data shape after handling missing values: {cleaned_data.shape}\")\n", " \n", " # 5. Evaluate bias in trait and demographic features\n", " is_trait_biased = False\n", " if len(cleaned_data) > 0:\n", " trait_biased, cleaned_data = judge_and_remove_biased_features(cleaned_data, trait)\n", " is_trait_biased = trait_biased\n", " else:\n", " print(\"No data remains after handling missing values.\")\n", " is_trait_biased = True\n", " \n", " # 6. Final validation and save\n", " is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=True, \n", " is_biased=is_trait_biased, \n", " df=cleaned_data,\n", " note=f\"Dataset contains only {trait} patients with no healthy controls, making it unsuitable for case-control analysis.\"\n", " )\n", " \n", " # 7. Save if usable\n", " if is_usable and len(cleaned_data) > 0:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " cleaned_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", " else:\n", " print(\"Data was determined to be unusable or empty and was not saved\")\n", " \n", " except Exception as e:\n", " print(f\"Error processing data: {e}\")\n", " # Handle the error case by still recording cohort info\n", " validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=False, # Mark as not available due to processing issues\n", " is_biased=True, \n", " df=pd.DataFrame(), # Empty dataframe\n", " note=f\"Error processing data for {trait}: {str(e)}\"\n", " )\n", " print(\"Data was determined to be unusable and was not saved\")" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }