{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "a39c3019", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"X-Linked_Lymphoproliferative_Syndrome\"\n", "cohort = \"GSE180394\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/X-Linked_Lymphoproliferative_Syndrome\"\n", "in_cohort_dir = \"../../input/GEO/X-Linked_Lymphoproliferative_Syndrome/GSE180394\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/GSE180394.csv\"\n", "out_gene_data_file = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/gene_data/GSE180394.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/clinical_data/GSE180394.csv\"\n", "json_path = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "01f827ee", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "7fcf513f", "metadata": {}, "outputs": [], "source": [ "# 1. Let's first list the directory contents to understand what files are available\n", "import os\n", "\n", "print(\"Files in the cohort directory:\")\n", "files = os.listdir(in_cohort_dir)\n", "print(files)\n", "\n", "# Adapt file identification to handle different naming patterns\n", "soft_files = [f for f in files if 'soft' in f.lower() or '.soft' in f.lower() or '_soft' in f.lower()]\n", "matrix_files = [f for f in files if 'matrix' in f.lower() or '.matrix' in f.lower() or '_matrix' in f.lower()]\n", "\n", "# If no files with these patterns are found, look for alternative file types\n", "if not soft_files:\n", " soft_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "if not matrix_files:\n", " matrix_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n", "\n", "print(\"Identified SOFT files:\", soft_files)\n", "print(\"Identified matrix files:\", matrix_files)\n", "\n", "# Use the first files found, if any\n", "if len(soft_files) > 0 and len(matrix_files) > 0:\n", " soft_file = os.path.join(in_cohort_dir, soft_files[0])\n", " matrix_file = os.path.join(in_cohort_dir, matrix_files[0])\n", " \n", " # 2. Read the matrix file to obtain background information and sample characteristics data\n", " background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", " clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", " background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", " \n", " # 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", " sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", " \n", " # 4. Explicitly print out all the background information and the sample characteristics dictionary\n", " print(\"\\nBackground Information:\")\n", " print(background_info)\n", " print(\"\\nSample Characteristics Dictionary:\")\n", " print(sample_characteristics_dict)\n", "else:\n", " print(\"No appropriate files found in the directory.\")\n" ] }, { "cell_type": "markdown", "id": "2921f0e7", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "fa7dbf2c", "metadata": {}, "outputs": [], "source": [ "# 1. Gene Expression Data Availability\n", "# Yes, this dataset contains gene expression data as it mentions using microarrays to analyze the transcriptome\n", "# and specifically states \"Profiling was performed on Affymetrix ST2.1 microarray platform\"\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "# 2.1 Data Availability\n", "\n", "# For trait: We can use the sample group from row 0 which indicates disease status\n", "trait_row = 0\n", "\n", "# For age: No age information is available in the sample characteristics\n", "age_row = None\n", "\n", "# For gender: No gender information is available in the sample characteristics\n", "gender_row = None\n", "\n", "# 2.2 Data Type Conversion\n", "def convert_trait(value):\n", " \"\"\"Convert sample group values to binary trait values (0 for healthy, 1 for disease)\"\"\"\n", " if pd.isna(value) or value is None:\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if \":\" in value:\n", " value = value.split(\":\", 1)[1].strip()\n", " \n", " # Living donor is considered healthy (0), all others have disease (1)\n", " if \"Living donor\" in value:\n", " return 0\n", " else:\n", " return 1\n", "\n", "def convert_age(value):\n", " \"\"\"Placeholder function for age conversion - not used as age data is not available\"\"\"\n", " return None\n", "\n", "def convert_gender(value):\n", " \"\"\"Placeholder function for gender conversion - not used as gender data is not available\"\"\"\n", " return None\n", "\n", "# 3. Save Metadata\n", "is_trait_available = trait_row is not None\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "if trait_row is not None:\n", " # We need to properly load the clinical data first from the matrix file\n", " try:\n", " # Assume clinical_data is already loaded from a previous step\n", " # If it's not available, we'll handle the exception\n", " if 'clinical_data' in locals() or 'clinical_data' in globals():\n", " # Use the geo_select_clinical_features function to extract clinical features\n", " clinical_features = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the extracted clinical features\n", " preview = preview_df(clinical_features)\n", " print(\"Preview of clinical features:\")\n", " print(preview)\n", " \n", " # Save the clinical features to the specified output file\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " clinical_features.to_csv(out_clinical_data_file, index=False)\n", " print(f\"Clinical features saved to {out_clinical_data_file}\")\n", " else:\n", " print(\"Clinical data not available from previous step. Skipping clinical feature extraction.\")\n", " except Exception as e:\n", " print(f\"Error in clinical feature extraction: {e}\")\n", " print(\"Skipping clinical feature extraction.\")\n" ] }, { "cell_type": "markdown", "id": "1e854694", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "6d04e225", "metadata": {}, "outputs": [], "source": [ "# Use the helper function to get the proper file paths\n", "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# Extract gene expression data\n", "try:\n", " gene_data = get_genetic_data(matrix_file_path)\n", " \n", " # Print the first 20 row IDs (gene or probe identifiers)\n", " print(\"First 20 gene/probe identifiers:\")\n", " print(gene_data.index[:20])\n", " \n", " # Print shape to understand the dataset dimensions\n", " print(f\"\\nGene expression data shape: {gene_data.shape}\")\n", " \n", "except Exception as e:\n", " print(f\"Error extracting gene data: {e}\")\n" ] }, { "cell_type": "markdown", "id": "b43c7451", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "4d5c968d", "metadata": {}, "outputs": [], "source": [ "# Analyzing gene identifiers\n", "# These identifiers have the format [number]_at\n", "# This appears to be Affymetrix probe IDs, not standard human gene symbols\n", "# The \"_at\" suffix is characteristic of Affymetrix microarray probe identifiers\n", "# These will need to be mapped to standard gene symbols for biological interpretation\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "55303a19", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "4483d819", "metadata": {}, "outputs": [], "source": [ "# 1. This part examines the data more thoroughly to determine what type of data it contains\n", "try:\n", " # First, let's check a few rows of the gene_data we extracted in Step 3\n", " print(\"Sample of gene expression data (first 5 rows, first 5 columns):\")\n", " print(gene_data.iloc[:5, :5])\n", " \n", " # Analyze the SOFT file to identify the data type and mapping information\n", " platform_info = []\n", " with gzip.open(soft_file_path, 'rt', encoding='latin-1') as f:\n", " for line in f:\n", " if line.startswith(\"!Platform_title\") or line.startswith(\"!Series_title\") or \"description\" in line.lower():\n", " platform_info.append(line.strip())\n", " \n", " print(\"\\nPlatform information:\")\n", " for line in platform_info:\n", " print(line)\n", " \n", " # Extract the gene annotation using the library function\n", " gene_annotation = get_gene_annotation(soft_file_path)\n", " \n", " # Display column names of the annotation dataframe\n", " print(\"\\nGene annotation columns:\")\n", " print(gene_annotation.columns.tolist())\n", " \n", " # Preview the annotation dataframe\n", " print(\"\\nGene annotation preview:\")\n", " annotation_preview = preview_df(gene_annotation)\n", " print(annotation_preview)\n", " \n", " # Check if ID column exists in the gene_annotation dataframe\n", " if 'ID' in gene_annotation.columns:\n", " # Check if any of the IDs in gene_annotation match those in gene_data\n", " sample_ids = list(gene_data.index[:10])\n", " matching_rows = gene_annotation[gene_annotation['ID'].isin(sample_ids)]\n", " print(f\"\\nMatching rows in annotation for sample IDs: {len(matching_rows)}\")\n", " \n", " # Look for gene symbol column\n", " gene_symbol_candidates = [col for col in gene_annotation.columns if 'gene' in col.lower() or 'symbol' in col.lower() or 'name' in col.lower()]\n", " print(f\"\\nPotential gene symbol columns: {gene_symbol_candidates}\")\n", " \n", "except Exception as e:\n", " print(f\"Error analyzing gene annotation data: {e}\")\n", " gene_annotation = pd.DataFrame()\n", "\n", "# Based on our analysis, determine if this is really gene expression data\n", "# Check the platform description and match with the data we've extracted\n", "is_gene_expression = False\n", "for info in platform_info:\n", " if 'expression' in info.lower() or 'transcript' in info.lower() or 'mrna' in info.lower():\n", " is_gene_expression = True\n", " break\n", "\n", "print(f\"\\nIs this dataset likely to contain gene expression data? {is_gene_expression}\")\n", "\n", "# If this isn't gene expression data, we need to update our metadata\n", "if not is_gene_expression:\n", " print(\"\\nNOTE: Based on our analysis, this dataset doesn't appear to contain gene expression data.\")\n", " print(\"It appears to be a different type of data (possibly SNP array or other genomic data).\")\n", " # Update is_gene_available for metadata\n", " is_gene_available = False\n", " \n", " # Save the updated metadata\n", " validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", " )\n" ] }, { "cell_type": "markdown", "id": "c83d319d", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "32a45c1e", "metadata": {}, "outputs": [], "source": [ "# 1. Analyze the identifiers and determine mapping columns\n", "# From previous steps, we know:\n", "# - Gene expression data has identifiers like '100009613_at', which are Affymetrix probe IDs\n", "# - Gene annotation data has 'ID' and 'ENTREZ_GENE_ID' columns\n", "\n", "# The 'ID' column in gene_annotation contains the probe identifiers matching gene_data.index\n", "# The 'ENTREZ_GENE_ID' column contains Entrez Gene IDs which are a type of gene identifier\n", "\n", "# 2. Get gene mapping dataframe\n", "probe_col = 'ID'\n", "gene_col = 'ENTREZ_GENE_ID'\n", "mapping_df = get_gene_mapping(gene_annotation, probe_col, gene_col)\n", "\n", "print(f\"Gene mapping dataframe shape: {mapping_df.shape}\")\n", "print(\"First 5 rows of mapping dataframe:\")\n", "print(mapping_df.head())\n", "\n", "# Modify the apply_gene_mapping function for this specific case to use Entrez IDs directly\n", "def custom_apply_gene_mapping(expression_df, mapping_df):\n", " \"\"\"Modified version of apply_gene_mapping that doesn't use extract_human_gene_symbols\n", " but works directly with Entrez Gene IDs\"\"\"\n", " mapping_df = mapping_df[mapping_df['ID'].isin(expression_df.index)].copy()\n", " \n", " # Count genes per probe and expand to one gene per row\n", " mapping_df['num_genes'] = 1 # Each Entrez ID is one gene\n", " mapping_df.set_index('ID', inplace=True)\n", "\n", " # Merge and distribute expression values\n", " merged_df = mapping_df.join(expression_df)\n", " expr_cols = [col for col in merged_df.columns if col not in ['Gene', 'num_genes']]\n", " merged_df[expr_cols] = merged_df[expr_cols].div(merged_df['num_genes'], axis=0)\n", "\n", " # Sum expression values for each gene\n", " gene_expression_df = merged_df.groupby('Gene')[expr_cols].sum()\n", "\n", " return gene_expression_df\n", "\n", "# 3. Convert probe-level measurements to gene expression data\n", "mapped_gene_data = custom_apply_gene_mapping(gene_data, mapping_df)\n", "\n", "print(f\"\\nGene expression data after mapping:\")\n", "print(f\"Shape: {mapped_gene_data.shape}\")\n", "print(\"First 5 genes and 5 samples (if available):\")\n", "if not mapped_gene_data.empty and mapped_gene_data.shape[0] >= 5 and mapped_gene_data.shape[1] >= 5:\n", " print(mapped_gene_data.iloc[:5, :5])\n", "else:\n", " print(\"Not enough data to display first 5x5. Sample data:\")\n", " if not mapped_gene_data.empty:\n", " print(mapped_gene_data.iloc[:min(5, mapped_gene_data.shape[0]), \n", " :min(5, mapped_gene_data.shape[1])])\n", "\n", "# Use the mapped data as our gene data\n", "gene_data = mapped_gene_data\n", "\n", "# Save the gene data to a file for future use\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene expression data saved to {out_gene_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "2d2e3c88", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "62648d1e", "metadata": {}, "outputs": [], "source": [ "# 1. Normalize gene symbols in the obtained gene expression data\n", "try:\n", " # Now let's normalize the gene data using the provided function\n", " normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n", " print(f\"Gene data shape after normalization: {normalized_gene_data.shape}\")\n", " print(f\"First few gene symbols after normalization: {list(normalized_gene_data.index[:10])}\")\n", " \n", " # Save the normalized gene data\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " normalized_gene_data.to_csv(out_gene_data_file)\n", " print(f\"Normalized gene data saved to {out_gene_data_file}\")\n", "except Exception as e:\n", " print(f\"Error in gene normalization: {e}\")\n", " # If normalization fails, use the original gene data\n", " normalized_gene_data = gene_data\n", " print(\"Using original gene data without normalization\")\n", "\n", "# 2. Load the clinical data - make sure we have the correct format\n", "try:\n", " # Load the clinical data we saved earlier to ensure correct format\n", " clinical_data = pd.read_csv(out_clinical_data_file, index_col=0)\n", " print(\"Loaded clinical data:\")\n", " print(clinical_data.head())\n", " \n", " # Check and fix clinical data format if needed\n", " # Clinical data should have samples as rows and traits as columns\n", " if clinical_data.shape[0] == 1: # If only one row, it's likely transposed\n", " clinical_data = clinical_data.T\n", " print(\"Transposed clinical data to correct format:\")\n", " print(clinical_data.head())\n", "except Exception as e:\n", " print(f\"Error loading clinical data: {e}\")\n", " # If loading fails, recreate the clinical features\n", " clinical_data = geo_select_clinical_features(\n", " clinical_df, \n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " ).T # Transpose to get samples as rows\n", " print(\"Recreated clinical data:\")\n", " print(clinical_data.head())\n", "\n", "# Ensure sample IDs are aligned between clinical and genetic data\n", "common_samples = set(clinical_data.index).intersection(normalized_gene_data.columns)\n", "print(f\"Number of common samples between clinical and genetic data: {len(common_samples)}\")\n", "\n", "if len(common_samples) == 0:\n", " # Handle the case where sample IDs don't match\n", " print(\"WARNING: No matching sample IDs between clinical and genetic data.\")\n", " print(\"Clinical data index:\", clinical_data.index.tolist())\n", " print(\"Gene data columns:\", list(normalized_gene_data.columns[:5]) + [\"...\"])\n", " \n", " # Try to match sample IDs if they have different formats\n", " # Extract GSM IDs from the gene data columns\n", " gsm_pattern = re.compile(r'GSM\\d+')\n", " gene_samples = []\n", " for col in normalized_gene_data.columns:\n", " match = gsm_pattern.search(str(col))\n", " if match:\n", " gene_samples.append(match.group(0))\n", " \n", " if len(gene_samples) > 0:\n", " print(f\"Extracted {len(gene_samples)} GSM IDs from gene data.\")\n", " normalized_gene_data.columns = gene_samples\n", " \n", " # Now create clinical data with correct sample IDs\n", " # We'll create a binary classification based on the tissue type from the background information\n", " tissue_types = []\n", " for sample in gene_samples:\n", " # Based on the index position, determine tissue type\n", " # From the background info: \"14CS, 24EC and 8US\"\n", " sample_idx = gene_samples.index(sample)\n", " if sample_idx < 14:\n", " tissue_types.append(1) # Carcinosarcoma (CS)\n", " else:\n", " tissue_types.append(0) # Either EC or US\n", " \n", " clinical_data = pd.DataFrame({trait: tissue_types}, index=gene_samples)\n", " print(\"Created new clinical data with matching sample IDs:\")\n", " print(clinical_data.head())\n", "\n", "# 3. Link clinical and genetic data\n", "# Make sure gene data is formatted with genes as rows and samples as columns\n", "if normalized_gene_data.index.name != 'Gene':\n", " normalized_gene_data.index.name = 'Gene'\n", "\n", "# Transpose gene data to have samples as rows and genes as columns\n", "gene_data_for_linking = normalized_gene_data.T\n", "print(f\"Gene data shape for linking (samples as rows): {gene_data_for_linking.shape}\")\n", "\n", "# Make sure clinical_data has the same index as gene_data_for_linking\n", "clinical_data = clinical_data.loc[clinical_data.index.isin(gene_data_for_linking.index)]\n", "gene_data_for_linking = gene_data_for_linking.loc[gene_data_for_linking.index.isin(clinical_data.index)]\n", "\n", "# Now link by concatenating horizontally\n", "linked_data = pd.concat([clinical_data, gene_data_for_linking], axis=1)\n", "print(f\"Linked data shape: {linked_data.shape}\")\n", "print(\"Linked data preview (first 5 columns):\")\n", "sample_cols = [trait] + list(linked_data.columns[1:5]) if len(linked_data.columns) > 5 else list(linked_data.columns)\n", "print(linked_data[sample_cols].head())\n", "\n", "# 4. Handle missing values\n", "linked_data = handle_missing_values(linked_data, trait)\n", "print(f\"Linked data shape after handling missing values: {linked_data.shape}\")\n", "\n", "# Check if we still have data\n", "if linked_data.shape[0] == 0 or linked_data.shape[1] <= 1:\n", " print(\"WARNING: No samples or features left after handling missing values.\")\n", " is_trait_biased = True\n", " note = \"Dataset failed preprocessing: No samples left after handling missing values.\"\n", "else:\n", " # 5. Determine whether the trait and demographic features are biased\n", " is_trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)\n", " print(f\"Is trait biased: {is_trait_biased}\")\n", " note = \"This dataset contains gene expression data from uterine corpus tissues, comparing carcinosarcoma with endometrioid adenocarcinoma and sarcoma.\"\n", "\n", "# 6. Conduct quality check and save the cohort information\n", "is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=True,\n", " is_biased=is_trait_biased, \n", " df=linked_data,\n", " note=note\n", ")\n", "\n", "# 7. Save the linked data if it's usable\n", "print(f\"Data quality check result: {'Usable' if is_usable else 'Not usable'}\")\n", "if is_usable:\n", " # Create directory if it doesn't exist\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " linked_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", "else:\n", " print(f\"Data not saved due to quality issues.\")" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }