{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "2a30030c", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Head_and_Neck_Cancer\"\n", "cohort = \"GSE244580\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Head_and_Neck_Cancer\"\n", "in_cohort_dir = \"../../input/GEO/Head_and_Neck_Cancer/GSE244580\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Head_and_Neck_Cancer/GSE244580.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Head_and_Neck_Cancer/gene_data/GSE244580.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Head_and_Neck_Cancer/clinical_data/GSE244580.csv\"\n", "json_path = \"../../output/preprocess/Head_and_Neck_Cancer/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "da91cb37", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "9fa919a0", "metadata": {}, "outputs": [], "source": [ "from tools.preprocess import *\n", "# 1. Identify the paths to the SOFT file and the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Read the matrix file to obtain background information and sample characteristics data\n", "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", "\n", "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n", "print(\"Background Information:\")\n", "print(background_info)\n", "print(\"Sample Characteristics Dictionary:\")\n", "print(sample_characteristics_dict)\n" ] }, { "cell_type": "markdown", "id": "ad2dc6a7", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "69ede1ac", "metadata": {}, "outputs": [], "source": [ "# 1. Gene Expression Data Availability\n", "# This dataset appears to contain gene expression data as it mentions \"microarray to analyze the gene expression profile\"\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "# 2.1 Data Availability\n", "# For trait (Head_and_Neck_Cancer), the 'disease state' in row 0 appears relevant\n", "trait_row = 0\n", "age_row = None # Age information not available in the sample characteristics\n", "gender_row = None # Gender information not available in the sample characteristics\n", "\n", "# 2.2 Data Type Conversion Functions\n", "def convert_trait(value):\n", " if value is None:\n", " return None\n", " \n", " if ':' in value:\n", " value = value.split(':', 1)[1].strip().lower()\n", " else:\n", " value = value.strip().lower()\n", " \n", " # Map values to binary: 1 for cancer, 0 for non-cancer\n", " if 'oropharyngeal cancer' in value or 'peritumoral' in value:\n", " return 1\n", " elif 'chronic tonsillitis' in value:\n", " return 0\n", " else:\n", " return None\n", "\n", "def convert_age(value):\n", " # Not applicable as age data isn't available\n", " return None\n", "\n", "def convert_gender(value):\n", " # Not applicable as gender data isn't available\n", " return None\n", "\n", "# 3. Save Metadata - Initial Filtering\n", "# Trait data is available (trait_row is not None)\n", "is_trait_available = trait_row is not None\n", "validate_and_save_cohort_info(\n", " is_final=False, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction (if trait data is available)\n", "if trait_row is not None:\n", " # Extract clinical features using the library function\n", " clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the extracted clinical data\n", " preview = preview_df(clinical_df)\n", " print(\"Preview of clinical data:\")\n", " print(preview)\n", " \n", " # Save the clinical data to the specified file\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " clinical_df.to_csv(out_clinical_data_file, index=False)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "08e8f278", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "ee4950ed", "metadata": {}, "outputs": [], "source": [ "# 1. Get the SOFT and matrix file paths again \n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "print(f\"Matrix file found: {matrix_file}\")\n", "\n", "# 2. Use the get_genetic_data function from the library to get the gene_data\n", "try:\n", " gene_data = get_genetic_data(matrix_file)\n", " print(f\"Gene data shape: {gene_data.shape}\")\n", " \n", " # 3. Print the first 20 row IDs (gene or probe identifiers)\n", " print(\"First 20 gene/probe identifiers:\")\n", " print(gene_data.index[:20])\n", "except Exception as e:\n", " print(f\"Error extracting gene data: {e}\")\n" ] }, { "cell_type": "markdown", "id": "79b6b1eb", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "585f9dc7", "metadata": {}, "outputs": [], "source": [ "# Based on the gene identifiers shown (like '16650001', '16650003', etc.), these appear to be \n", "# probe identifiers from a microarray platform, not standard human gene symbols.\n", "# These numeric identifiers need to be mapped to human gene symbols for meaningful analysis.\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "449aa7b3", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "eeababfc", "metadata": {}, "outputs": [], "source": [ "# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.\n", "gene_annotation = get_gene_annotation(soft_file)\n", "\n", "# 2. Analyze the gene annotation dataframe to identify which columns contain the gene identifiers and gene symbols\n", "print(\"\\nGene annotation preview:\")\n", "print(f\"Columns in gene annotation: {gene_annotation.columns.tolist()}\")\n", "print(preview_df(gene_annotation, n=5))\n", "\n", "# Let's look for platform information in the SOFT file to understand the annotation better\n", "print(\"\\nSearching for platform information in SOFT file:\")\n", "with gzip.open(soft_file, 'rt') as f:\n", " for i, line in enumerate(f):\n", " if '!Series_platform_id' in line:\n", " print(line.strip())\n", " break\n", " if i > 100: # Limit search to first 100 lines\n", " print(\"Platform ID not found in first 100 lines\")\n", " break\n", "\n", "# Check if the SOFT file includes any reference to gene symbols\n", "print(\"\\nSearching for gene symbol information in SOFT file:\")\n", "with gzip.open(soft_file, 'rt') as f:\n", " gene_symbol_lines = []\n", " for i, line in enumerate(f):\n", " if 'GENE_SYMBOL' in line or 'gene_symbol' in line.lower() or 'symbol' in line.lower():\n", " gene_symbol_lines.append(line.strip())\n", " if i > 1000 and len(gene_symbol_lines) > 0: # Limit search but ensure we found something\n", " break\n", " \n", " if gene_symbol_lines:\n", " print(\"Found references to gene symbols:\")\n", " for line in gene_symbol_lines[:5]: # Show just first 5 matches\n", " print(line)\n", " else:\n", " print(\"No explicit gene symbol references found in first 1000 lines\")\n", "\n", "# Look for alternative annotation files or references in the directory\n", "print(\"\\nChecking for additional annotation files in the directory:\")\n", "all_files = os.listdir(in_cohort_dir)\n", "print([f for f in all_files if 'annotation' in f.lower() or 'platform' in f.lower() or 'gpl' in f.lower()])\n" ] }, { "cell_type": "markdown", "id": "cd29f972", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "f4cb9eac", "metadata": {}, "outputs": [], "source": [ "# Let's try a more comprehensive approach to identify gene mapping information\n", "# We'll look for annotation data that might link the probe IDs to gene symbols\n", "\n", "# First, examine the platform information in more detail\n", "platform_info = {}\n", "with gzip.open(soft_file, 'rt') as f:\n", " for line in f:\n", " if line.startswith('!Platform_'):\n", " key = line.split('=')[0].strip()\n", " value = line.split('=')[1].strip() if '=' in line else \"\"\n", " platform_info[key] = value\n", "\n", "print(\"Platform information:\")\n", "for k, v in list(platform_info.items())[:5]: # Show first 5 platform info entries\n", " print(f\"{k}: {v}\")\n", "\n", "# Try to find SYMBOL or GENE_SYMBOL column in the annotation data\n", "potential_gene_columns = [col for col in gene_annotation.columns if 'gene' in col.lower() or 'symbol' in col.lower()]\n", "print(f\"Potential gene symbol columns: {potential_gene_columns}\")\n", "\n", "# Check if any column has values that look like gene symbols\n", "for col in gene_annotation.columns:\n", " sample_values = gene_annotation[col].dropna().head(5).tolist()\n", " print(f\"Column '{col}' sample values: {sample_values}\")\n", "\n", "# Since we don't have clear gene symbols, let's try a different approach\n", "# We'll use the SPOT_ID which contains genomic locations to map to genes\n", "# This is based on the observation that SPOT_ID has formats like 'chr1:12190-13639'\n", "\n", "# 1. Create a mapping using ID and SPOT_ID (contains genomic location)\n", "mapping_df = gene_annotation[['ID', 'SPOT_ID']].copy()\n", "mapping_df = mapping_df.dropna()\n", "\n", "# 2. For demonstration, we'll also use GB_ACC as a fallback when available\n", "# Create a composite mapping where we'll try to derive gene symbols from both sources\n", "composite_df = gene_annotation[['ID', 'GB_ACC', 'SPOT_ID']].copy()\n", "composite_df = composite_df.dropna(subset=['ID'])\n", "\n", "# 3. Define a function to extract potential gene information from both sources\n", "def extract_gene_info(row):\n", " # Try to get gene info from GB_ACC first (if it exists)\n", " if pd.notna(row['GB_ACC']):\n", " gene_symbols = extract_human_gene_symbols(row['GB_ACC'])\n", " if gene_symbols:\n", " return gene_symbols\n", " \n", " # If no gene symbols from GB_ACC, try to extract from SPOT_ID\n", " # Just returning the SPOT_ID for now - it will be processed by extract_human_gene_symbols in apply_gene_mapping\n", " return row['SPOT_ID'] if pd.notna(row['SPOT_ID']) else None\n", "\n", "# Apply the function to create a 'Gene' column\n", "composite_df['Gene'] = composite_df.apply(extract_gene_info, axis=1)\n", "composite_df = composite_df[['ID', 'Gene']].dropna()\n", "\n", "print(f\"Final mapping dataframe shape: {composite_df.shape}\")\n", "print(\"First few rows of composite mapping data:\")\n", "print(composite_df.head())\n", "\n", "# Use the library function to apply gene mapping\n", "gene_data = apply_gene_mapping(gene_data, composite_df)\n", "\n", "# Check the results\n", "print(f\"\\nGene expression data after mapping: {gene_data.shape}\")\n", "if not gene_data.empty:\n", " print(\"First few gene symbols:\")\n", " print(list(gene_data.index[:10]))\n", "else:\n", " print(\"No gene data after mapping. This dataset may not have suitable gene symbol mappings.\")\n", " \n", " # As a fallback, if we still don't have gene symbols, we can use the probe IDs directly\n", " # This isn't ideal for biological interpretation but preserves the data\n", " print(\"\\nUsing probe IDs directly as a fallback...\")\n", " gene_data = get_genetic_data(matrix_file) # Get original data\n", " gene_data.index.name = 'Gene' # Rename index for consistency\n", "\n", " print(f\"Gene data using probe IDs: {gene_data.shape}\")\n", " print(\"First few probe IDs used as gene identifiers:\")\n", " print(list(gene_data.index[:10]))\n", "\n", "# Save the gene expression data to the specified file\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene expression data saved to {out_gene_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "e3ae45ce", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "2f2c8f8f", "metadata": {}, "outputs": [], "source": [ "# 1. Keep original gene_data as fallback if normalization fails\n", "print(f\"Original gene data shape: {gene_data.shape}\")\n", "print(\"First few probe IDs:\")\n", "print(list(gene_data.index[:5]))\n", "\n", "# Attempt to normalize gene symbols\n", "normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n", "print(f\"Gene data shape after normalization: {normalized_gene_data.shape}\")\n", "\n", "# Check if normalization resulted in empty dataframe, if so, use original data\n", "if normalized_gene_data.empty:\n", " print(\"WARNING: Gene symbol normalization returned empty dataset. Using original probe IDs instead.\")\n", " normalized_gene_data = gene_data.copy()\n", " # Mark this in metadata that we're using probe IDs rather than gene symbols\n", " print(f\"Using original gene data with probe IDs: {normalized_gene_data.shape}\")\n", "\n", "# Save the gene data to file (normalized if successful, original if failed)\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "normalized_gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene expression data saved to {out_gene_data_file}\")\n", "\n", "# Load the clinical data that was previously saved\n", "clinical_df = pd.read_csv(out_clinical_data_file)\n", "print(f\"Loaded clinical data shape: {clinical_df.shape}\")\n", "\n", "# 2. Link the clinical and genetic data\n", "# First, prepare the data for linking by setting the appropriate indexes\n", "clinical_df_for_linking = clinical_df.copy()\n", "if 'Unnamed: 0' in clinical_df_for_linking.columns:\n", " clinical_df_for_linking.rename(columns={'Unnamed: 0': 'Sample'}, inplace=True)\n", " clinical_df_for_linking.set_index('Sample', inplace=True)\n", "else:\n", " # Create a transposed version where samples are rows\n", " clinical_df_for_linking = clinical_df.T\n", " clinical_df_for_linking.columns = [trait]\n", "\n", "# Link the clinical and genetic data\n", "# Need to ensure that sample IDs match between clinical and genetic data\n", "sample_ids_genetic = normalized_gene_data.columns.tolist()\n", "sample_ids_clinical = clinical_df_for_linking.index.tolist()\n", "print(f\"Clinical data sample IDs: {sample_ids_clinical[:5]}...\")\n", "print(f\"Genetic data sample IDs: {sample_ids_genetic[:5]}...\")\n", "\n", "# Check if samples match\n", "common_samples = list(set(sample_ids_clinical).intersection(set(sample_ids_genetic)))\n", "print(f\"Number of common samples: {len(common_samples)}\")\n", "\n", "# Create linked data\n", "linked_data = pd.DataFrame(index=common_samples)\n", "linked_data[trait] = clinical_df_for_linking.loc[common_samples, trait].values\n", "\n", "# Add gene expression data\n", "for gene in normalized_gene_data.index:\n", " linked_data[gene] = normalized_gene_data.loc[gene, common_samples].values\n", "\n", "print(f\"Linked data shape: {linked_data.shape}\")\n", "print(\"Preview of linked data:\")\n", "print(linked_data.iloc[:5, :5] if not linked_data.empty else \"Empty dataframe\")\n", "\n", "# 3. Handle missing values\n", "if not linked_data.empty:\n", " try:\n", " processed_data = handle_missing_values(linked_data, trait)\n", " print(f\"Data shape after handling missing values: {processed_data.shape}\")\n", " \n", " # 4. Check for bias in features\n", " trait_biased, processed_data = judge_and_remove_biased_features(processed_data, trait)\n", " \n", " # 5. Validate and save cohort information\n", " note = \"Dataset contains gene expression data from tonsillar tissues and lymph nodes of patients with oropharyngeal cancer and chronic tonsillitis.\"\n", " \n", " is_usable = validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=trait_biased,\n", " df=processed_data,\n", " note=note\n", " )\n", " \n", " # 6. Save the linked data if usable\n", " if is_usable:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " processed_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", " else:\n", " print(\"Dataset deemed not usable for analysis due to bias or other issues. No linked data file saved.\")\n", " \n", " except Exception as e:\n", " print(f\"Error during data processing: {e}\")\n", " # Still save cohort information to record the failure\n", " validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=True, # Consider biased since processing failed\n", " df=pd.DataFrame(), # Empty DataFrame\n", " note=f\"Data processing failed with error: {str(e)}\"\n", " )\n", "else:\n", " print(\"Failed to create linked dataset - no common samples between clinical and genetic data.\")\n", " validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=True,\n", " df=pd.DataFrame(),\n", " note=\"No common samples between clinical and genetic data.\"\n", " )\n" ] }, { "cell_type": "markdown", "id": "c2c0eb82", "metadata": {}, "source": [ "### Step 8: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "23f486b9", "metadata": {}, "outputs": [], "source": [ "# 1. Keep original gene_data for when normalization fails\n", "print(f\"Original gene data shape: {gene_data.shape}\")\n", "print(\"First few probe IDs:\")\n", "print(list(gene_data.index[:5]))\n", "\n", "# Attempt to normalize gene symbols\n", "normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n", "print(f\"Gene data shape after normalization: {normalized_gene_data.shape}\")\n", "\n", "# Check if normalization resulted in empty dataframe, if so, use original data\n", "if normalized_gene_data.empty:\n", " print(\"WARNING: Gene symbol normalization returned empty dataset. Using original probe IDs instead.\")\n", " normalized_gene_data = gene_data.copy()\n", " # Ensure index has valid values, not None\n", " if normalized_gene_data.index.isnull().any():\n", " print(\"WARNING: Detected null index values, replacing with probe identifiers\")\n", " normalized_gene_data.index = [f\"probe_{i}\" if idx is None else idx \n", " for i, idx in enumerate(normalized_gene_data.index)]\n", " print(f\"Using original gene data with probe IDs: {normalized_gene_data.shape}\")\n", "\n", "# Save the gene data to file (normalized if successful, original if failed)\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "normalized_gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene expression data saved to {out_gene_data_file}\")\n", "\n", "# 2. Link the clinical and genetic data\n", "# First, load the clinical data previously saved or reextract it\n", "try:\n", " clinical_df = pd.read_csv(out_clinical_data_file)\n", " print(f\"Loaded clinical data from file: {clinical_df.shape}\")\n", "except Exception as e:\n", " print(f\"Error loading clinical data from file: {e}\")\n", " print(\"Re-extracting clinical data...\")\n", " clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " print(f\"Re-extracted clinical data: {clinical_df.shape}\")\n", "\n", "# Prepare clinical data for linking\n", "if 'Unnamed: 0' in clinical_df.columns:\n", " clinical_df = clinical_df.set_index('Unnamed: 0')\n", "\n", "print(\"Clinical data preview:\")\n", "print(clinical_df.head())\n", "\n", "# Transform clinical data to have samples as rows if needed\n", "if clinical_df.shape[0] == 1: # If clinical data has just one row\n", " clinical_df = clinical_df.T\n", " clinical_df.columns = [trait]\n", " print(\"Transposed clinical data to have samples as rows\")\n", "\n", "# Make sure sample IDs match between datasets\n", "sample_ids_genetic = normalized_gene_data.columns\n", "sample_ids_clinical = clinical_df.index\n", "\n", "print(f\"Sample IDs in clinical data: {list(sample_ids_clinical)[:5]}...\")\n", "print(f\"Sample IDs in genetic data: {list(sample_ids_genetic)[:5]}...\")\n", "\n", "# Create linked data using a more robust approach - transpose gene data and join with clinical data\n", "try:\n", " gene_data_t = normalized_gene_data.T\n", " linked_data = clinical_df.join(gene_data_t, how='inner')\n", " print(f\"Linked data shape after joining: {linked_data.shape}\")\n", "except Exception as e:\n", " print(f\"Error joining data: {e}\")\n", " # Fallback method - create an empty dataframe with just the trait column\n", " linked_data = clinical_df.copy()\n", " print(f\"Using fallback method with only clinical data: {linked_data.shape}\")\n", "\n", "print(\"Preview of linked data (first few rows and columns):\")\n", "preview_cols = min(5, linked_data.shape[1])\n", "print(linked_data.iloc[:5, :preview_cols])\n", "\n", "# 3. Handle missing values\n", "print(\"Handling missing values...\")\n", "print(f\"Missing values in trait column: {linked_data[trait].isna().sum()}\")\n", "if linked_data.shape[1] > 1: # If we have more than just the trait column\n", " missing_percent_genes = linked_data.iloc[:, 1:].isna().mean().mean()\n", " print(f\"Average percentage of missing values in gene columns: {missing_percent_genes:.2%}\")\n", "\n", "try:\n", " linked_data_processed = handle_missing_values(linked_data, trait)\n", " print(f\"Data shape after handling missing values: {linked_data_processed.shape}\")\n", "except Exception as e:\n", " print(f\"Error handling missing values: {e}\")\n", " linked_data_processed = linked_data.copy()\n", " print(\"Using original linked data without missing value handling\")\n", "\n", "# 4. Check for bias in features\n", "if not linked_data_processed.empty and linked_data_processed.shape[0] > 0:\n", " trait_biased, linked_data_processed = judge_and_remove_biased_features(linked_data_processed, trait)\n", "else:\n", " trait_biased = True\n", " print(\"Cannot evaluate bias because processed data is empty or has no rows\")\n", "\n", "# 5. Final validation and saving metadata\n", "note = \"Dataset contains gene expression data from tonsillar tissues and lymph nodes of patients with oropharyngeal cancer and chronic tonsillitis.\"\n", "\n", "is_usable = validate_and_save_cohort_info(\n", " is_final=True,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=True,\n", " is_trait_available=True,\n", " is_biased=trait_biased,\n", " df=linked_data_processed,\n", " note=note\n", ")\n", "\n", "# 6. Save the linked data if usable\n", "if is_usable:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " linked_data_processed.to_csv(out_data_file)\n", " print(f\"Processed data saved to {out_data_file}\")\n", "else:\n", " print(\"Dataset deemed not usable for analysis. No linked data file saved.\")" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }