{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "e66ff380", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Height\"\n", "cohort = \"GSE181339\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Height\"\n", "in_cohort_dir = \"../../input/GEO/Height/GSE181339\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Height/GSE181339.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Height/gene_data/GSE181339.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Height/clinical_data/GSE181339.csv\"\n", "json_path = \"../../output/preprocess/Height/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "c26737df", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "6aa8ff0d", "metadata": {}, "outputs": [], "source": [ "from tools.preprocess import *\n", "# 1. Identify the paths to the SOFT file and the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Read the matrix file to obtain background information and sample characteristics data\n", "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", "\n", "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n", "print(\"Background Information:\")\n", "print(background_info)\n", "print(\"Sample Characteristics Dictionary:\")\n", "print(sample_characteristics_dict)\n" ] }, { "cell_type": "markdown", "id": "f981b5e2", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "f6cf6496", "metadata": {}, "outputs": [], "source": [ "# 1. Gene Expression Data Availability\n", "# Based on the series description, this appears to be a gene expression study\n", "# \"For the microarray experiment...\" suggests gene expression data is available\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "\n", "# 2.1 Data Availability\n", "trait_row = 1 # 'group' contains weight status information (NW, OW/OB, MONW)\n", "age_row = 2 # 'age' is available\n", "gender_row = 0 # 'gender' is available\n", "\n", "# 2.2 Data Type Conversion\n", "def convert_trait(value):\n", " \"\"\"Convert the group value to binary form (0 for normal weight, 1 for overweight/obese or MONW)\"\"\"\n", " if pd.isna(value) or value is None:\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if isinstance(value, str) and \":\" in value:\n", " value = value.split(\":\", 1)[1].strip()\n", " \n", " if value.upper() == \"NW\":\n", " return 0 # Normal weight\n", " elif value.upper() in [\"OW/OB\", \"MONW\"]:\n", " return 1 # Overweight/obese or Metabolically Obese Normal-Weight\n", " else:\n", " return None\n", "\n", "def convert_age(value):\n", " \"\"\"Convert age string to numeric value\"\"\"\n", " if pd.isna(value) or value is None:\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if isinstance(value, str) and \":\" in value:\n", " value = value.split(\":\", 1)[1].strip()\n", " \n", " try:\n", " return float(value)\n", " except (ValueError, TypeError):\n", " return None\n", "\n", "def convert_gender(value):\n", " \"\"\"Convert gender string to binary (0 for female, 1 for male)\"\"\"\n", " if pd.isna(value) or value is None:\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if isinstance(value, str) and \":\" in value:\n", " value = value.split(\":\", 1)[1].strip().lower()\n", " \n", " if value.lower() == \"woman\" or value.lower() == \"female\":\n", " return 0\n", " elif value.lower() == \"man\" or value.lower() == \"male\":\n", " return 1\n", " else:\n", " return None\n", "\n", "# 3. Save Metadata\n", "# Since trait_row is not None, trait data is available\n", "is_trait_available = trait_row is not None\n", "\n", "# Conduct initial filtering on the usability\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "# Since trait_row is not None, we proceed with clinical data extraction\n", "# Create a DataFrame from the sample characteristics dictionary provided in the task\n", "sample_characteristics = {\n", " 0: ['gender: Man', 'gender: Woman'],\n", " 1: ['group: NW', 'group: OW/OB', 'group: MONW'],\n", " 2: ['age: 21', 'age: 23', 'age: 10', 'age: 17', 'age: 11', 'age: 1', 'age: 18', 'age: 12', 'age: 8', 'age: 14', 'age: 26', 'age: 4', 'age: 2', 'age: 3', 'age: 7', 'age: 13', 'age: 15', 'age: 9', 'age: 30', 'age: 19'],\n", " 3: ['fasting time: 6hr', 'fasting time: 4hr'],\n", " 4: ['timepoint: 0months', 'timepoint: 6months']\n", "}\n", "\n", "# Convert to a format that geo_select_clinical_features can work with\n", "# Create a dataframe with the sample characteristics\n", "clinical_data = pd.DataFrame.from_dict(sample_characteristics, orient='index')\n", "clinical_data.index.name = 'row_id'\n", "clinical_data = clinical_data.reset_index()\n", "\n", "# Extract clinical features\n", "clinical_features = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", ")\n", "\n", "# Preview the extracted clinical features\n", "preview = preview_df(clinical_features)\n", "print(\"Preview of clinical features:\", preview)\n", "\n", "# Save the clinical data\n", "os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", "clinical_features.to_csv(out_clinical_data_file, index=False)\n", "print(f\"Clinical data saved to {out_clinical_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "1a8f3056", "metadata": {}, "source": [ "### Step 3: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "39ac46bd", "metadata": {}, "outputs": [], "source": [ "I'll debug the code to properly handle GEO dataset files and extract clinical features.\n", "\n", "```python\n", "import os\n", "import json\n", "import pandas as pd\n", "import glob\n", "from typing import Optional, Callable, Dict, Any\n", "\n", "# First, let's explore what files are available in the directory\n", "print(f\"Exploring directory: {in_cohort_dir}\")\n", "available_files = glob.glob(os.path.join(in_cohort_dir, \"*\"))\n", "print(f\"Available files: {available_files}\")\n", "\n", "# GEO data is typically stored in _series_matrix.txt files\n", "series_matrix_files = glob.glob(os.path.join(in_cohort_dir, \"*_series_matrix.txt*\"))\n", "if series_matrix_files:\n", " matrix_file = series_matrix_files[0]\n", " print(f\"Found series matrix file: {matrix_file}\")\n", " \n", " # Read the file line by line to extract sample characteristics\n", " sample_char_dict = {}\n", " current_line_idx = 0\n", " with open(matrix_file, 'r') as f:\n", " for line in f:\n", " if line.startswith('!Sample_characteristics_ch1'):\n", " parts = line.strip().split('\\t')\n", " if len(parts) > 1: # Ensure there's at least one sample\n", " # Remove the prefix to get just the values\n", " values = [p.replace('!Sample_characteristics_ch1 = ', '') for p in parts]\n", " sample_char_dict[current_line_idx] = values\n", " current_line_idx += 1\n", " elif line.startswith('!Sample_title'):\n", " # Sample titles can sometimes contain useful information\n", " parts = line.strip().split('\\t')\n", " if len(parts) > 1:\n", " values = [p.replace('!Sample_title = ', '') for p in parts]\n", " sample_char_dict[current_line_idx] = values\n", " current_line_idx += 1\n", " \n", " # If we've collected any sample characteristics, convert to DataFrame\n", " if sample_char_dict:\n", " clinical_data = pd.DataFrame(sample_char_dict).T\n", " # Print a preview of what we found\n", " print(\"Sample characteristics preview:\")\n", " for idx, row in clinical_data.iterrows():\n", " print(f\"Row {idx}: {row.unique()[:5]}...\")\n", " else:\n", " clinical_data = pd.DataFrame()\n", " print(\"No sample characteristics found in series matrix file.\")\n", "else:\n", " # If no series matrix file, try to find a soft file\n", " soft_files = glob.glob(os.path.join(in_cohort_dir, \"*.soft*\"))\n", " if soft_files:\n", " soft_file = soft_files[0]\n", " print(f\"Found SOFT file: {soft_file}\")\n", " \n", " # Read the SOFT file to extract sample characteristics\n", " sample_char_dict = {}\n", " current_line_idx = 0\n", " with open(soft_file, 'r') as f:\n", " in_sample_section = False\n", " current_sample = None\n", " for line in f:\n", " if line.startswith('^SAMPLE'):\n", " in_sample_section = True\n", " current_sample = []\n", " elif line.startswith('!Sample_characteristics_ch1'):\n", " if in_sample_section:\n", " current_sample.append(line.strip().split(' = ')[1])\n", " elif line.startswith('!sample_table_end'):\n", " if in_sample_section and current_sample:\n", " sample_char_dict[current_line_idx] = current_sample\n", " current_line_idx += 1\n", " current_sample = None\n", " in_sample_section = False\n", " \n", " if sample_char_dict:\n", " clinical_data = pd.DataFrame(sample_char_dict).T\n", " print(\"Sample characteristics preview from SOFT file:\")\n", " for idx, row in clinical_data.iterrows():\n", " print(f\"Row {idx}: {row.unique()[:5]}...\")\n", " else:\n", " clinical_data = pd.DataFrame()\n", " print(\"No sample characteristics found in SOFT file.\")\n", " else:\n", " # As a last resort, try to find any text files\n", " txt_files = glob.glob(os.path.join(in_cohort_dir, \"*.txt\"))\n", " if txt_files:\n", " print(f\"Found text files but no recognized GEO format: {txt_files}\")\n", " clinical_data = pd.DataFrame()\n", " else:\n", " print(\"No recognizable data files found.\")\n", " clinical_data = pd.DataFrame()\n", "\n", "# Analyze what we have and make decisions about data availability\n", "is_gene_available = True # Assuming gene expression data exists unless determined otherwise\n", "\n", "# Determine if height-related data is available in the clinical data\n", "trait_row = None\n", "age_row = None\n", "gender_row = None\n", "\n", "if not clinical_data.empty:\n", " # Check each row for trait, age, and gender data\n", " for row_idx in range(len(clinical_data)):\n", " row_values = clinical_data.iloc[row_idx].astype(str)\n", " row_text = ' '.join(row_values).lower()\n", " \n", " # Check for trait (Height)\n", " if 'height' in row_text and trait_row is None:\n", " unique_values = row_values.unique()\n", " if len(unique_values) > 1: # More than one unique value\n", " trait_row = row_idx\n", " print(f\"Found trait data (Height) in row {row_idx}: {unique_values[:5]}\")\n", " \n", " # Check for age\n", " if ('age' in row_text or 'years' in row_text) and age_row is None:\n", " unique_values = row_values.unique()\n", " if len(unique_values) > 1: # More than one unique value\n", " age_row = row_idx\n", " print(f\"Found age data in row {row_idx}: {unique_values[:5]}\")\n", " \n", " # Check for gender\n", " if ('gender' in row_text or 'sex' in row_text) and gender_row is None:\n", " unique_values = row_values.unique()\n", " if len(unique_values) > 1: # More than one unique value\n", " gender_row = row_idx\n", " print(f\"Found gender data in row {row_idx}: {unique_values[:5]}\")\n", "\n", "# Define conversion functions based on the identified data structure\n", "def convert_trait(value):\n", " \"\"\"Convert height value to a continuous numeric value.\"\"\"\n", " try:\n", " # Try to extract a numeric value from the string\n", " # Height may be in format like \"height: 180cm\" or similar\n", " if value is None:\n", " return None\n", " \n", " value = str(value).lower()\n", " # Look for height patterns\n", " if 'height' in value:\n", " # Extract numeric part - look for digits\n", " import re\n", " height_match = re.search(r'(\\d+\\.?\\d*)', value)\n", " if height_match:\n", " return float(height_match.group(1))\n", " # If it's just a number, try to convert directly\n", " elif value.replace('.', '', 1).isdigit():\n", " return float(value)\n", " return None\n", " except Exception as e:\n", " print(f\"Error converting trait: {e}\")\n", " return None\n", "\n", "def convert_age(value):\n", " \"\"\"Convert age value to a continuous numeric value.\"\"\"\n", " try:\n", " if value is None:\n", " return None\n", " \n", " value = str(value).lower()\n", " # Look for age patterns\n", " if 'age' in value or 'years' in value:\n", " # Extract numeric part\n", " import re\n", " age_match = re.search(r'(\\d+\\.?\\d*)', value)\n", " if age_match:\n", " return float(age_match.group(1))\n", " # If it's just a number, try to convert directly\n", " elif value.replace('.', '', 1).isdigit():\n", " return float(value)\n", " return None\n", " except Exception as e:\n", " print(f\"Error converting age: {e}\")\n", " return None\n", "\n", "def convert_gender(value):\n", " \"\"\"Convert gender value to binary (0 for female, 1 for male).\"\"\"\n", " try:\n", " if value is None:\n", " return None\n", " \n", " value = str(value).lower()\n", " # Check for gender/sex indicators\n", " if 'female' in value or 'f' == value.strip() or 'f:\n" ] }, { "cell_type": "markdown", "id": "fcbad901", "metadata": {}, "source": [ "### Step 4: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "452bb99a", "metadata": {}, "outputs": [], "source": [ "# 1. Get the file paths for the SOFT file and matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. First, let's examine the structure of the matrix file to understand its format\n", "import gzip\n", "\n", "# Peek at the first few lines of the file to understand its structure\n", "with gzip.open(matrix_file, 'rt') as file:\n", " # Read first 100 lines to find the header structure\n", " for i, line in enumerate(file):\n", " if '!series_matrix_table_begin' in line:\n", " print(f\"Found data marker at line {i}\")\n", " # Read the next line which should be the header\n", " header_line = next(file)\n", " print(f\"Header line: {header_line.strip()}\")\n", " # And the first data line\n", " first_data_line = next(file)\n", " print(f\"First data line: {first_data_line.strip()}\")\n", " break\n", " if i > 100: # Limit search to first 100 lines\n", " print(\"Matrix table marker not found in first 100 lines\")\n", " break\n", "\n", "# 3. Now try to get the genetic data with better error handling\n", "try:\n", " gene_data = get_genetic_data(matrix_file)\n", " print(gene_data.index[:20])\n", "except KeyError as e:\n", " print(f\"KeyError: {e}\")\n", " \n", " # Alternative approach: manually extract the data\n", " print(\"\\nTrying alternative approach to read the gene data:\")\n", " with gzip.open(matrix_file, 'rt') as file:\n", " # Find the start of the data\n", " for line in file:\n", " if '!series_matrix_table_begin' in line:\n", " break\n", " \n", " # Read the headers and data\n", " import pandas as pd\n", " df = pd.read_csv(file, sep='\\t', index_col=0)\n", " print(f\"Column names: {df.columns[:5]}\")\n", " print(f\"First 20 row IDs: {df.index[:20]}\")\n", " gene_data = df\n" ] }, { "cell_type": "markdown", "id": "28b890b3", "metadata": {}, "source": [ "### Step 5: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "89de7d56", "metadata": {}, "outputs": [], "source": [ "# Based on reviewing the gene identifiers in the gene expression data, I can see they are numeric \n", "# identifiers (like 7, 8, 15, 18, etc.) rather than human gene symbols (which would be something \n", "# like BRCA1, TP53, etc.)\n", "# \n", "# These appear to be probe IDs from a microarray platform, which need to be mapped to human gene symbols\n", "# for proper biological interpretation. The numeric format is typical of Affymetrix or similar microarray \n", "# platforms where probes are identified by numbers rather than gene names.\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "bc845dcc", "metadata": {}, "source": [ "### Step 6: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "870ed6f2", "metadata": {}, "outputs": [], "source": [ "# 1. Let's first examine the structure of the SOFT file before trying to parse it\n", "import gzip\n", "\n", "# Look at the first few lines of the SOFT file to understand its structure\n", "print(\"Examining SOFT file structure:\")\n", "try:\n", " with gzip.open(soft_file, 'rt') as file:\n", " # Read first 20 lines to understand the file structure\n", " for i, line in enumerate(file):\n", " if i < 20:\n", " print(f\"Line {i}: {line.strip()}\")\n", " else:\n", " break\n", "except Exception as e:\n", " print(f\"Error reading SOFT file: {e}\")\n", "\n", "# 2. Now let's try a more robust approach to extract the gene annotation\n", "# Instead of using the library function which failed, we'll implement a custom approach\n", "try:\n", " # First, look for the platform section which contains gene annotation\n", " platform_data = []\n", " with gzip.open(soft_file, 'rt') as file:\n", " in_platform_section = False\n", " for line in file:\n", " if line.startswith('^PLATFORM'):\n", " in_platform_section = True\n", " continue\n", " if in_platform_section and line.startswith('!platform_table_begin'):\n", " # Next line should be the header\n", " header = next(file).strip()\n", " platform_data.append(header)\n", " # Read until the end of the platform table\n", " for table_line in file:\n", " if table_line.startswith('!platform_table_end'):\n", " break\n", " platform_data.append(table_line.strip())\n", " break\n", " \n", " # If we found platform data, convert it to a DataFrame\n", " if platform_data:\n", " import pandas as pd\n", " import io\n", " platform_text = '\\n'.join(platform_data)\n", " gene_annotation = pd.read_csv(io.StringIO(platform_text), delimiter='\\t', \n", " low_memory=False, on_bad_lines='skip')\n", " print(\"\\nGene annotation preview:\")\n", " print(preview_df(gene_annotation))\n", " else:\n", " print(\"Could not find platform table in SOFT file\")\n", " \n", " # Try an alternative approach - extract mapping from other sections\n", " with gzip.open(soft_file, 'rt') as file:\n", " for line in file:\n", " if 'ANNOTATION information' in line or 'annotation information' in line:\n", " print(f\"Found annotation information: {line.strip()}\")\n", " if line.startswith('!Platform_title') or line.startswith('!platform_title'):\n", " print(f\"Platform title: {line.strip()}\")\n", " \n", "except Exception as e:\n", " print(f\"Error processing gene annotation: {e}\")\n" ] }, { "cell_type": "markdown", "id": "06038669", "metadata": {}, "source": [ "### Step 7: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "f15b5960", "metadata": {}, "outputs": [], "source": [ "# Inspect the gene identifier columns in the annotation dataframe\n", "print(\"Column names in gene annotation dataframe:\")\n", "print(gene_annotation.columns)\n", "\n", "# Based on previous output, I can see that:\n", "# - The gene expression data uses numeric IDs (like 7, 8, 15...)\n", "# - The gene annotation dataframe has an 'ID' column that appears to contain similar numeric identifiers\n", "# - The 'GENE_SYMBOL' column contains the human gene symbols we need\n", "\n", "# 1. Identify the mapping columns\n", "probe_id_col = 'ID'\n", "gene_symbol_col = 'GENE_SYMBOL'\n", "\n", "# 2. Get the gene mapping dataframe by extracting the two relevant columns\n", "mapping_df = get_gene_mapping(gene_annotation, probe_id_col, gene_symbol_col)\n", "print(f\"Number of probes with gene mapping: {len(mapping_df)}\")\n", "print(f\"Sample of the mapping dataframe:\")\n", "print(mapping_df.head())\n", "\n", "# 3. Apply gene mapping to convert probe-level measurements to gene expression data\n", "gene_data = apply_gene_mapping(gene_data, mapping_df)\n", "print(f\"Dimensions of gene expression data after mapping: {gene_data.shape}\")\n", "print(f\"Sample of gene symbols and expression values:\")\n", "print(gene_data.head())\n", "\n", "# Save the gene expression data\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene expression data saved to {out_gene_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "8f477487", "metadata": {}, "source": [ "### Step 8: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": null, "id": "1323b4e1", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import os\n", "\n", "# 1. Let's load the gene expression data\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# Extract gene expression data\n", "gene_data = get_genetic_data(matrix_file)\n", "\n", "# Normalize gene symbols using the NCBI Gene database information\n", "normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n", "print(f\"Gene data shape after normalization: {normalized_gene_data.shape}\")\n", "\n", "# Save the normalized gene data to the output file\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "normalized_gene_data.to_csv(out_gene_data_file)\n", "print(f\"Normalized gene data saved to {out_gene_data_file}\")\n", "\n", "# 2. Link clinical and genetic data\n", "# In Step 2, we determined that Height data is not available in this dataset (trait_row = None)\n", "# Create a minimal clinical dataframe with the trait column (containing NaNs)\n", "sample_ids = normalized_gene_data.columns\n", "minimal_clinical_df = pd.DataFrame(index=sample_ids)\n", "minimal_clinical_df[trait] = np.nan # Add the trait column with NaN values\n", "\n", "# If we have age and gender data from Step 2, add those columns\n", "if age_row is not None:\n", " minimal_clinical_df['Age'] = get_feature_data(clinical_data, age_row, 'Age', convert_age).iloc[0]\n", "\n", "if gender_row is not None:\n", " minimal_clinical_df['Gender'] = get_feature_data(clinical_data, gender_row, 'Gender', convert_gender).iloc[0]\n", "\n", "minimal_clinical_df.index.name = 'Sample'\n", "\n", "# Save this minimal clinical data for reference\n", "os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", "minimal_clinical_df.to_csv(out_clinical_data_file)\n", "print(f\"Clinical data saved to {out_clinical_data_file}\")\n", "\n", "# Create a linked dataset \n", "linked_data = pd.concat([minimal_clinical_df, normalized_gene_data.T], axis=1)\n", "linked_data.index.name = 'Sample'\n", "print(f\"Linked data shape: {linked_data.shape}\")\n", "\n", "# We would normally handle missing values here, but since all trait values are missing,\n", "# the dataset will be empty after removing samples with missing trait values\n", "# Therefore, we'll skip that step\n", "\n", "# 4 & 5. Validate and save cohort information\n", "# Since trait_row was None in Step 2, we know Height data is not available\n", "is_gene_available = len(normalized_gene_data) > 0\n", "is_trait_available = False # Height data is not available\n", "\n", "note = \"Dataset contains gene expression data but no Height measurements. This dataset is not usable for studying Height associations.\"\n", "\n", "# For datasets without trait data, we set is_biased to False\n", "# This indicates the dataset is not usable due to missing trait data, not due to bias\n", "is_biased = False\n", "\n", "# Final validation\n", "is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=is_gene_available, \n", " is_trait_available=is_trait_available, \n", " is_biased=is_biased,\n", " df=linked_data,\n", " note=note\n", ")\n", "\n", "# 6. Since there is no trait data, the dataset is not usable for our association study\n", "# So we should not save it to out_data_file\n", "print(f\"Dataset usability: {is_usable}\")\n", "if is_usable:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " linked_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", "else:\n", " print(\"Dataset does not contain Height data and cannot be used for association studies.\")" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }