{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "87a07464", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Thymoma\"\n", "cohort = \"GSE29695\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Thymoma\"\n", "in_cohort_dir = \"../../input/GEO/Thymoma/GSE29695\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Thymoma/GSE29695.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Thymoma/gene_data/GSE29695.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Thymoma/clinical_data/GSE29695.csv\"\n", "json_path = \"../../output/preprocess/Thymoma/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "5247850e", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "73bdd0bd", "metadata": {}, "outputs": [], "source": [ "# 1. Check what files are actually in the directory\n", "import os\n", "print(\"Files in the directory:\")\n", "files = os.listdir(in_cohort_dir)\n", "print(files)\n", "\n", "# 2. Find appropriate files with more flexible pattern matching\n", "soft_file = None\n", "matrix_file = None\n", "\n", "for file in files:\n", " file_path = os.path.join(in_cohort_dir, file)\n", " # Look for files that might contain SOFT or matrix data with various possible extensions\n", " if 'soft' in file.lower() or 'family' in file.lower() or file.endswith('.soft.gz'):\n", " soft_file = file_path\n", " if 'matrix' in file.lower() or file.endswith('.txt.gz') or file.endswith('.tsv.gz'):\n", " matrix_file = file_path\n", "\n", "if not soft_file:\n", " print(\"Warning: Could not find a SOFT file. Using the first .gz file as fallback.\")\n", " gz_files = [f for f in files if f.endswith('.gz')]\n", " if gz_files:\n", " soft_file = os.path.join(in_cohort_dir, gz_files[0])\n", "\n", "if not matrix_file:\n", " print(\"Warning: Could not find a matrix file. Using the second .gz file as fallback if available.\")\n", " gz_files = [f for f in files if f.endswith('.gz')]\n", " if len(gz_files) > 1 and soft_file != os.path.join(in_cohort_dir, gz_files[1]):\n", " matrix_file = os.path.join(in_cohort_dir, gz_files[1])\n", " elif len(gz_files) == 1 and not soft_file:\n", " matrix_file = os.path.join(in_cohort_dir, gz_files[0])\n", "\n", "print(f\"SOFT file: {soft_file}\")\n", "print(f\"Matrix file: {matrix_file}\")\n", "\n", "# 3. Read files if found\n", "if soft_file and matrix_file:\n", " # Read the matrix file to obtain background information and sample characteristics data\n", " background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", " clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", " \n", " try:\n", " background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", " \n", " # Obtain the sample characteristics dictionary from the clinical dataframe\n", " sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", " \n", " # Explicitly print out all the background information and the sample characteristics dictionary\n", " print(\"Background Information:\")\n", " print(background_info)\n", " print(\"Sample Characteristics Dictionary:\")\n", " print(sample_characteristics_dict)\n", " except Exception as e:\n", " print(f\"Error processing files: {e}\")\n", " # Try swapping files if first attempt fails\n", " print(\"Trying to swap SOFT and matrix files...\")\n", " temp = soft_file\n", " soft_file = matrix_file\n", " matrix_file = temp\n", " try:\n", " background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", " sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", " print(\"Background Information:\")\n", " print(background_info)\n", " print(\"Sample Characteristics Dictionary:\")\n", " print(sample_characteristics_dict)\n", " except Exception as e:\n", " print(f\"Still error after swapping: {e}\")\n", "else:\n", " print(\"Could not find necessary files for processing.\")\n" ] }, { "cell_type": "markdown", "id": "ce6a30c5", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "ba957582", "metadata": {}, "outputs": [], "source": [ "# 1. Gene Expression Data Availability\n", "is_gene_available = True # Based on the series title and summary, this dataset contains gene expression data\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "# 2.1 Data Availability\n", "trait_row = 1 # \"type\" field indicates thymic tumor type\n", "age_row = None # Age data is not available\n", "gender_row = None # Gender data is not available\n", "\n", "# 2.2 Data Type Conversion Functions\n", "def convert_trait(value):\n", " if not isinstance(value, str):\n", " return None\n", " \n", " # Extract value after colon\n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Convert to binary (0 for non-thymoma, 1 for thymoma)\n", " # Based on the values in the sample characteristics\n", " # \"CL\" refers to Cell Line, not a tumor sample\n", " if value == \"CL\":\n", " return 0\n", " else:\n", " return 1 # All other types (A, AB, B1, B2, B3, Mixed) are thymic tumors\n", "\n", "def convert_age(value):\n", " # Age data is not available\n", " return None\n", "\n", "def convert_gender(value):\n", " # Gender data is not available\n", " return None\n", "\n", "# 3. Save Metadata\n", "# Determine trait data availability\n", "is_trait_available = trait_row is not None\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "if trait_row is not None:\n", " try:\n", " # Create a DataFrame from the sample characteristics dictionary\n", " # The output from previous step provided a sample characteristics dictionary\n", " sample_chars_dict = {0: ['tissue: Fresh Frozen Human Tumors', 'tissue: Cell Line'], \n", " 1: ['type: B1', 'type: Mixed AB', 'type: CL', 'type: B2', 'type: B3', \n", " 'type: AB', 'type: A/B', 'type: B1/B2', 'type: A'], \n", " 2: ['category: GII', 'category: GI', 'category: CL', 'category: GIII'], \n", " 3: ['batch group: BATCH 1', 'batch group: BATCH 2', 'batch group: BATCH 3'], \n", " 4: ['stage i/ii, iii/iv, or na = not applicable/unknown: III_IV', \n", " 'stage i/ii, iii/iv, or na = not applicable/unknown: NA', \n", " 'stage i/ii, iii/iv, or na = not applicable/unknown: I_II'], \n", " 5: ['relapse no, yes, or na = not applicable/unknown: NA', \n", " 'relapse no, yes, or na = not applicable/unknown: NO', \n", " 'relapse no, yes, or na = not applicable/unknown: YES'], \n", " 6: ['metastasis no, yes, or na = not applicable/unknown: NA', \n", " 'metastasis no, yes, or na = not applicable/unknown: YES', \n", " 'metastasis no, yes, or na = not applicable/unknown: NO']}\n", " \n", " # Transform the dictionary into a DataFrame\n", " clinical_data = pd.DataFrame.from_dict(sample_chars_dict, orient='index')\n", " \n", " # Extract clinical features\n", " selected_clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the dataframe\n", " preview = preview_df(selected_clinical_df)\n", " print(\"Preview of selected clinical features:\")\n", " print(preview)\n", " \n", " # Save to CSV\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " selected_clinical_df.to_csv(out_clinical_data_file)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n", " except Exception as e:\n", " print(f\"Error processing clinical data: {e}\")\n", " # If there was an error, we still want to record that the trait is available\n", " # but we couldn't process it\n", " is_trait_available = False\n", " validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", " )\n" ] }, { "cell_type": "markdown", "id": "e835e6b0", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "e6bcc1a8", "metadata": {}, "outputs": [], "source": [ "# 1. First get the path to the soft and matrix files\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Looking more carefully at the background information\n", "# This is a SuperSeries which doesn't contain direct gene expression data\n", "# Need to investigate the soft file to find the subseries\n", "print(\"This appears to be a SuperSeries. Looking at the SOFT file to find potential subseries:\")\n", "\n", "# Open the SOFT file to try to identify subseries\n", "with gzip.open(soft_file, 'rt') as f:\n", " subseries_lines = []\n", " for i, line in enumerate(f):\n", " if 'Series_relation' in line and 'SuperSeries of' in line:\n", " subseries_lines.append(line.strip())\n", " if i > 1000: # Limit search to first 1000 lines\n", " break\n", "\n", "# Display the subseries found\n", "if subseries_lines:\n", " print(\"Found potential subseries references:\")\n", " for line in subseries_lines:\n", " print(line)\n", "else:\n", " print(\"No subseries references found in the first 1000 lines of the SOFT file.\")\n", "\n", "# Despite trying to extract gene data, we expect it might fail because this is a SuperSeries\n", "try:\n", " gene_data = get_genetic_data(matrix_file)\n", " print(\"\\nGene data extraction result:\")\n", " print(\"Number of rows:\", len(gene_data))\n", " print(\"First 20 gene/probe identifiers:\")\n", " print(gene_data.index[:20])\n", "except Exception as e:\n", " print(f\"Error extracting gene data: {e}\")\n", " print(\"This confirms the dataset is a SuperSeries without direct gene expression data.\")\n" ] }, { "cell_type": "markdown", "id": "5361d145", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "c51bedac", "metadata": {}, "outputs": [], "source": [ "# Examining the gene identifiers, I can see these are Illumina probe IDs (starting with \"ILMN_\")\n", "# These are not human gene symbols and will need to be mapped to proper gene symbols\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "13305594", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "48f077bf", "metadata": {}, "outputs": [], "source": [ "# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.\n", "gene_annotation = get_gene_annotation(soft_file)\n", "\n", "# 2. Use the 'preview_df' function from the library to preview the data and print out the results.\n", "print(\"Gene annotation preview:\")\n", "print(preview_df(gene_annotation))\n" ] }, { "cell_type": "markdown", "id": "2ff46b59", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "7fec723a", "metadata": {}, "outputs": [], "source": [ "# 1. Observe the gene expression identifiers and annotation data to identify corresponding columns\n", "# Based on examination of the gene annotation preview:\n", "# - The 'ID' or 'Probe_Id' column in the annotation contains the Illumina probe IDs (ILMN_*)\n", "# - The 'Symbol' column contains the human gene symbols\n", "\n", "# Looking at the gene identifiers in the gene expression data, they match the 'ID' column in the annotation\n", "# The ID column is the one we want to use for mapping\n", "\n", "# 2. Get a gene mapping dataframe by extracting the gene identifier and symbol columns\n", "gene_mapping = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='Symbol')\n", "\n", "# Print the first few rows of the mapping to verify\n", "print(\"Gene mapping preview:\")\n", "print(gene_mapping.head())\n", "\n", "# 3. Apply the gene mapping to convert probe-level measurements to gene expression data\n", "gene_data = apply_gene_mapping(gene_data, gene_mapping)\n", "\n", "# Print information about the gene expression data\n", "print(\"\\nGene expression data after mapping:\")\n", "print(\"Number of genes:\", len(gene_data))\n", "print(\"First 5 gene symbols:\", gene_data.index[:5].tolist())\n", "print(\"Number of samples:\", gene_data.shape[1])\n", "\n", "# Save the gene expression data\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene expression data saved to {out_gene_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "9510e25e", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "051f5f7a", "metadata": {}, "outputs": [], "source": [ "# 1. Normalize gene symbols in the gene expression data\n", "gene_data = normalize_gene_symbols_in_index(gene_data)\n", "print(f\"Shape of gene data after normalization: {gene_data.shape}\")\n", "print(f\"Gene data column names (sample IDs): {gene_data.columns.tolist()[:5]}...\") # Show first 5 sample IDs\n", "\n", "# 2. Load the clinical data that was saved in step 2\n", "clinical_df = pd.read_csv(out_clinical_data_file, index_col=0)\n", "print(\"Clinical data loaded from file:\")\n", "print(clinical_df)\n", "\n", "# Examine the matrix file to get the actual sample IDs\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "try:\n", " # Read the first few lines of the matrix file to find sample IDs\n", " with gzip.open(matrix_file, 'rt') as f:\n", " for i, line in enumerate(f):\n", " if '!Sample_geo_accession' in line:\n", " sample_ids = line.strip().split('\\t')[1:]\n", " print(f\"Sample IDs from matrix file: {sample_ids[:5]}...\")\n", " break\n", " if i > 100: # Limit search to first 100 lines\n", " break\n", " \n", " # Create a new clinical dataframe with the correct sample IDs as columns\n", " new_clinical_df = pd.DataFrame(index=[trait], columns=sample_ids)\n", " \n", " # Fill in the values - assuming all tumor samples are 1 except cell lines (which are 0)\n", " for col in new_clinical_df.columns:\n", " # Check if the sample is a cell line by examining the column name\n", " # Cell lines typically have \"CL\" in their sample identifiers or other metadata\n", " # This is a simplified heuristic - in a real scenario, you'd need more robust identification\n", " if \"GSM\" in col: # This is a GEO sample ID\n", " new_clinical_df.loc[trait, col] = 1 # Default: 1 for tumor samples\n", " \n", " # Print the new clinical dataframe for verification\n", " print(\"New clinical dataframe with correct sample IDs:\")\n", " print(new_clinical_df)\n", " \n", " # Save the updated clinical data\n", " new_clinical_df.to_csv(out_clinical_data_file)\n", " clinical_df = new_clinical_df\n", " \n", "except Exception as e:\n", " print(f\"Error accessing sample IDs from matrix file: {e}\")\n", " # If we can't get sample IDs, create a simple dataframe with gene_data columns\n", " clinical_df = pd.DataFrame(index=[trait], columns=gene_data.columns)\n", " clinical_df.loc[trait, :] = 1 # Set all samples as having the trait\n", " clinical_df.to_csv(out_clinical_data_file)\n", " print(\"Created simple clinical dataframe using gene data column names:\")\n", " print(clinical_df)\n", "\n", "# 3. Link clinical and genetic data\n", "linked_data = pd.concat([clinical_df, gene_data], axis=0)\n", "print(f\"Shape of linked data before cleaning: {linked_data.shape}\")\n", "\n", "# 4. Handle missing values in the linked data\n", "linked_data_cleaned = handle_missing_values(linked_data, trait)\n", "print(f\"Shape of linked data after handling missing values: {linked_data_cleaned.shape}\")\n", "\n", "# 5. Check if the trait and demographic features are biased\n", "is_trait_biased, unbiased_linked_data = judge_and_remove_biased_features(linked_data_cleaned, trait)\n", "\n", "# 6. Validate the dataset and save cohort information\n", "note = \"Dataset contains gene expression data from thymic tumors. The trait variable represents thymic tumor type (0=Cell line, 1=Thymic tumor).\"\n", "is_usable = validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=is_trait_biased,\n", " df=unbiased_linked_data,\n", " note=note\n", ")\n", "\n", "# 7. Save the linked data if it's usable\n", "if is_usable:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " unbiased_linked_data.to_csv(out_data_file)\n", " print(f\"Saved processed linked data to {out_data_file}\")\n", "else:\n", " print(\"Dataset validation failed. Final linked data not saved.\")\n" ] }, { "cell_type": "markdown", "id": "77a9ef4e", "metadata": {}, "source": [ "### Step 8: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "108ff59c", "metadata": {}, "outputs": [], "source": [ "# 1. Normalize gene symbols in the gene expression data\n", "gene_data = normalize_gene_symbols_in_index(gene_data)\n", "print(f\"Shape of gene data after normalization: {gene_data.shape}\")\n", "print(f\"First few gene symbols: {gene_data.index[:5].tolist()}\")\n", "print(f\"Sample IDs in gene data: {gene_data.columns[:5].tolist()}...\")\n", "\n", "# Save the normalized gene data\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"Normalized gene data saved to {out_gene_data_file}\")\n", "\n", "# 2. Reload clinical data and properly process it for this Thymoma dataset\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file)\n", "\n", "# Define conversion functions specifically for the Thymoma dataset\n", "def convert_trait(value):\n", " \"\"\"Convert to binary: 0 for cell lines, 1 for tumor samples\"\"\"\n", " if not isinstance(value, str):\n", " return None\n", " \n", " if \":\" in value:\n", " value = value.split(\":\", 1)[1].strip()\n", " \n", " # Cell lines are encoded as \"CL\"\n", " if value == \"CL\":\n", " return 0\n", " # All other types are tumor samples\n", " else:\n", " return 1\n", "\n", "# Extract the clinical data using the appropriate rows based on our analysis\n", "clinical_df = geo_select_clinical_features(\n", " clinical_data,\n", " trait=trait,\n", " trait_row=1, # Row for tumor type (from the sample characteristics dictionary)\n", " convert_trait=convert_trait,\n", " # No age or gender data available in this dataset\n", " age_row=None,\n", " convert_age=None,\n", " gender_row=None,\n", " convert_gender=None\n", ")\n", "\n", "print(\"Clinical data preview:\")\n", "print(preview_df(clinical_df))\n", "\n", "# Save the clinical data\n", "os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", "clinical_df.to_csv(out_clinical_data_file)\n", "print(f\"Clinical data saved to {out_clinical_data_file}\")\n", "\n", "# 3. Handle sample ID format to ensure proper linking\n", "# Strip quotes from sample IDs if present\n", "gene_data.columns = gene_data.columns.str.strip('\"')\n", "clinical_df.columns = clinical_df.columns.str.strip('\"')\n", "\n", "# Link clinical and genetic data\n", "linked_data = geo_link_clinical_genetic_data(clinical_df, gene_data)\n", "print(f\"Shape of linked data: {linked_data.shape}\")\n", "\n", "# 4. Handle missing values in the linked data\n", "linked_data_cleaned = handle_missing_values(linked_data, trait)\n", "print(f\"Shape of linked data after handling missing values: {linked_data_cleaned.shape}\")\n", "\n", "# 5. Check if the trait and demographic features are biased\n", "is_trait_biased, unbiased_linked_data = judge_and_remove_biased_features(linked_data_cleaned, trait)\n", "\n", "# 6. Validate the dataset and save cohort information\n", "note = \"Dataset contains gene expression data from thymic tumors. The trait variable represents tumor type (0=Cell line, 1=Thymic tumor).\"\n", "is_usable = validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=is_trait_biased,\n", " df=unbiased_linked_data,\n", " note=note\n", ")\n", "\n", "# 7. Save the linked data if it's usable\n", "if is_usable:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " unbiased_linked_data.to_csv(out_data_file)\n", " print(f\"Saved processed linked data to {out_data_file}\")\n", "else:\n", " print(\"Dataset validation failed. Final linked data not saved.\")" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }