File size: 25,151 Bytes
6bc7e45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e7b4e6b",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n",
    "\n",
    "# Path Configuration\n",
    "from tools.preprocess import *\n",
    "\n",
    "# Processing context\n",
    "trait = \"Height\"\n",
    "cohort = \"GSE71994\"\n",
    "\n",
    "# Input paths\n",
    "in_trait_dir = \"../../input/GEO/Height\"\n",
    "in_cohort_dir = \"../../input/GEO/Height/GSE71994\"\n",
    "\n",
    "# Output paths\n",
    "out_data_file = \"../../output/preprocess/Height/GSE71994.csv\"\n",
    "out_gene_data_file = \"../../output/preprocess/Height/gene_data/GSE71994.csv\"\n",
    "out_clinical_data_file = \"../../output/preprocess/Height/clinical_data/GSE71994.csv\"\n",
    "json_path = \"../../output/preprocess/Height/cohort_info.json\"\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "341bb3ce",
   "metadata": {},
   "source": [
    "### Step 1: Initial Data Loading"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2fbb862d",
   "metadata": {},
   "outputs": [],
   "source": [
    "from tools.preprocess import *\n",
    "# 1. Identify the paths to the SOFT file and the matrix file\n",
    "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n",
    "\n",
    "# 2. Read the matrix file to obtain background information and sample characteristics data\n",
    "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n",
    "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n",
    "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n",
    "\n",
    "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n",
    "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n",
    "\n",
    "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n",
    "print(\"Background Information:\")\n",
    "print(background_info)\n",
    "print(\"Sample Characteristics Dictionary:\")\n",
    "print(sample_characteristics_dict)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f9fd4102",
   "metadata": {},
   "source": [
    "### Step 2: Dataset Analysis and Clinical Feature Extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2413c303",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Gene Expression Data Availability\n",
    "# Based on the background information, this dataset contains PBMC gene expression data\n",
    "is_gene_available = True\n",
    "\n",
    "# 2. Variable Availability and Data Type Conversion\n",
    "# 2.1 Data Availability\n",
    "# Height data is available in row 4\n",
    "trait_row = 4\n",
    "# Age data is available in row 3\n",
    "age_row = 3\n",
    "# Gender data is available in row 1\n",
    "gender_row = 1\n",
    "\n",
    "# 2.2 Data Type Conversion Functions\n",
    "def convert_trait(value):\n",
    "    \"\"\"Convert height data to continuous values.\"\"\"\n",
    "    try:\n",
    "        # Extract the value after colon and convert to float\n",
    "        if isinstance(value, str) and ':' in value:\n",
    "            height_str = value.split(':', 1)[1].strip()\n",
    "            return float(height_str)\n",
    "        return None\n",
    "    except:\n",
    "        return None\n",
    "\n",
    "def convert_age(value):\n",
    "    \"\"\"Convert age data to continuous values.\"\"\"\n",
    "    try:\n",
    "        # Extract the value after colon and convert to integer\n",
    "        if isinstance(value, str) and ':' in value:\n",
    "            age_str = value.split(':', 1)[1].strip()\n",
    "            return int(age_str)\n",
    "        return None\n",
    "    except:\n",
    "        return None\n",
    "\n",
    "def convert_gender(value):\n",
    "    \"\"\"Convert gender data to binary values: female=0, male=1.\"\"\"\n",
    "    try:\n",
    "        if isinstance(value, str) and ':' in value:\n",
    "            gender_str = value.split(':', 1)[1].strip().lower()\n",
    "            if 'female' in gender_str:\n",
    "                return 0\n",
    "            elif 'male' in gender_str:\n",
    "                return 1\n",
    "        return None\n",
    "    except:\n",
    "        return None\n",
    "\n",
    "# 3. Save Metadata\n",
    "# Perform initial filtering on dataset usability\n",
    "is_trait_available = trait_row is not None\n",
    "validate_and_save_cohort_info(\n",
    "    is_final=False,\n",
    "    cohort=cohort,\n",
    "    info_path=json_path,\n",
    "    is_gene_available=is_gene_available,\n",
    "    is_trait_available=is_trait_available\n",
    ")\n",
    "\n",
    "# 4. Clinical Feature Extraction\n",
    "# If trait_row is not None, extract clinical features\n",
    "if trait_row is not None:\n",
    "    # Get the clinical data from the previous step\n",
    "    # (assuming clinical_data is available from the previous step)\n",
    "    try:\n",
    "        # Use geo_select_clinical_features to extract clinical data\n",
    "        clinical_features = geo_select_clinical_features(\n",
    "            clinical_df=clinical_data, \n",
    "            trait=trait, \n",
    "            trait_row=trait_row,\n",
    "            convert_trait=convert_trait,\n",
    "            age_row=age_row,\n",
    "            convert_age=convert_age,\n",
    "            gender_row=gender_row,\n",
    "            convert_gender=convert_gender\n",
    "        )\n",
    "        \n",
    "        # Preview the extracted clinical features\n",
    "        preview = preview_df(clinical_features)\n",
    "        print(f\"Clinical features preview: {preview}\")\n",
    "        \n",
    "        # Save the clinical features to CSV\n",
    "        os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n",
    "        clinical_features.to_csv(out_clinical_data_file, index=True)\n",
    "        print(f\"Clinical features saved to {out_clinical_data_file}\")\n",
    "    except NameError:\n",
    "        print(\"clinical_data not available from previous step\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ab37f5d1",
   "metadata": {},
   "source": [
    "### Step 3: Gene Data Extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "66b78a02",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Get the file paths for the SOFT file and matrix file\n",
    "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n",
    "\n",
    "# 2. First, let's examine the structure of the matrix file to understand its format\n",
    "import gzip\n",
    "\n",
    "# Peek at the first few lines of the file to understand its structure\n",
    "with gzip.open(matrix_file, 'rt') as file:\n",
    "    # Read first 100 lines to find the header structure\n",
    "    for i, line in enumerate(file):\n",
    "        if '!series_matrix_table_begin' in line:\n",
    "            print(f\"Found data marker at line {i}\")\n",
    "            # Read the next line which should be the header\n",
    "            header_line = next(file)\n",
    "            print(f\"Header line: {header_line.strip()}\")\n",
    "            # And the first data line\n",
    "            first_data_line = next(file)\n",
    "            print(f\"First data line: {first_data_line.strip()}\")\n",
    "            break\n",
    "        if i > 100:  # Limit search to first 100 lines\n",
    "            print(\"Matrix table marker not found in first 100 lines\")\n",
    "            break\n",
    "\n",
    "# 3. Now try to get the genetic data with better error handling\n",
    "try:\n",
    "    gene_data = get_genetic_data(matrix_file)\n",
    "    print(gene_data.index[:20])\n",
    "except KeyError as e:\n",
    "    print(f\"KeyError: {e}\")\n",
    "    \n",
    "    # Alternative approach: manually extract the data\n",
    "    print(\"\\nTrying alternative approach to read the gene data:\")\n",
    "    with gzip.open(matrix_file, 'rt') as file:\n",
    "        # Find the start of the data\n",
    "        for line in file:\n",
    "            if '!series_matrix_table_begin' in line:\n",
    "                break\n",
    "                \n",
    "        # Read the headers and data\n",
    "        import pandas as pd\n",
    "        df = pd.read_csv(file, sep='\\t', index_col=0)\n",
    "        print(f\"Column names: {df.columns[:5]}\")\n",
    "        print(f\"First 20 row IDs: {df.index[:20]}\")\n",
    "        gene_data = df\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "09e892cc",
   "metadata": {},
   "source": [
    "### Step 4: Gene Identifier Review"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b045e18b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Examining the gene identifiers in the gene data\n",
    "# The IDs appear to be numeric identifiers (e.g., 7896746) which are not standard\n",
    "# human gene symbols. Human gene symbols are typically alphanumeric (like BRCA1, TP53, etc.)\n",
    "# These appear to be probe IDs from a microarray platform that need to be mapped to gene symbols.\n",
    "\n",
    "requires_gene_mapping = True\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d5cb647a",
   "metadata": {},
   "source": [
    "### Step 5: Gene Annotation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "98ce4bae",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Let's first examine the structure of the SOFT file before trying to parse it\n",
    "import gzip\n",
    "\n",
    "# Look at the first few lines of the SOFT file to understand its structure\n",
    "print(\"Examining SOFT file structure:\")\n",
    "try:\n",
    "    with gzip.open(soft_file, 'rt') as file:\n",
    "        # Read first 20 lines to understand the file structure\n",
    "        for i, line in enumerate(file):\n",
    "            if i < 20:\n",
    "                print(f\"Line {i}: {line.strip()}\")\n",
    "            else:\n",
    "                break\n",
    "except Exception as e:\n",
    "    print(f\"Error reading SOFT file: {e}\")\n",
    "\n",
    "# 2. Now let's try a more robust approach to extract the gene annotation\n",
    "# Instead of using the library function which failed, we'll implement a custom approach\n",
    "try:\n",
    "    # First, look for the platform section which contains gene annotation\n",
    "    platform_data = []\n",
    "    with gzip.open(soft_file, 'rt') as file:\n",
    "        in_platform_section = False\n",
    "        for line in file:\n",
    "            if line.startswith('^PLATFORM'):\n",
    "                in_platform_section = True\n",
    "                continue\n",
    "            if in_platform_section and line.startswith('!platform_table_begin'):\n",
    "                # Next line should be the header\n",
    "                header = next(file).strip()\n",
    "                platform_data.append(header)\n",
    "                # Read until the end of the platform table\n",
    "                for table_line in file:\n",
    "                    if table_line.startswith('!platform_table_end'):\n",
    "                        break\n",
    "                    platform_data.append(table_line.strip())\n",
    "                break\n",
    "    \n",
    "    # If we found platform data, convert it to a DataFrame\n",
    "    if platform_data:\n",
    "        import pandas as pd\n",
    "        import io\n",
    "        platform_text = '\\n'.join(platform_data)\n",
    "        gene_annotation = pd.read_csv(io.StringIO(platform_text), delimiter='\\t', \n",
    "                                      low_memory=False, on_bad_lines='skip')\n",
    "        print(\"\\nGene annotation preview:\")\n",
    "        print(preview_df(gene_annotation))\n",
    "    else:\n",
    "        print(\"Could not find platform table in SOFT file\")\n",
    "        \n",
    "        # Try an alternative approach - extract mapping from other sections\n",
    "        with gzip.open(soft_file, 'rt') as file:\n",
    "            for line in file:\n",
    "                if 'ANNOTATION information' in line or 'annotation information' in line:\n",
    "                    print(f\"Found annotation information: {line.strip()}\")\n",
    "                if line.startswith('!Platform_title') or line.startswith('!platform_title'):\n",
    "                    print(f\"Platform title: {line.strip()}\")\n",
    "            \n",
    "except Exception as e:\n",
    "    print(f\"Error processing gene annotation: {e}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a2ef0cad",
   "metadata": {},
   "source": [
    "### Step 6: Gene Identifier Mapping"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cc6fc1e3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Analyze the gene identifiers in gene expression data and gene annotation\n",
    "\n",
    "# Based on examining the gene expression data and gene annotation data:\n",
    "# - The gene expression data uses numeric identifiers (e.g., 7896746) in the 'ID' column\n",
    "# - The gene annotation data has these same identifiers in the 'ID' column\n",
    "# - The gene symbols can be extracted from the 'gene_assignment' column\n",
    "\n",
    "# 2. Create a mapping between probe IDs and gene symbols\n",
    "# The 'gene_assignment' column has a complex format with gene symbols embedded\n",
    "# We need to extract gene symbols from this column\n",
    "\n",
    "# Define the mapping columns\n",
    "probe_col = 'ID'  # This column matches the index in gene_data\n",
    "gene_col = 'gene_assignment'  # This column contains the gene symbols\n",
    "\n",
    "# Create a mapping dataframe\n",
    "mapping_df = pd.DataFrame({\n",
    "    'ID': gene_annotation['ID'].astype(str),\n",
    "    'Gene': gene_annotation[gene_col]\n",
    "})\n",
    "\n",
    "# Preview the mapping before processing\n",
    "print(\"Original mapping preview (first 2 rows):\")\n",
    "print(mapping_df.head(2))\n",
    "\n",
    "# 3. Apply the gene mapping to transform probe-level data to gene-level data\n",
    "# The apply_gene_mapping function will:\n",
    "# - Extract human gene symbols from the gene_assignment text\n",
    "# - Handle many-to-many mappings with proper distribution of expression values\n",
    "gene_data = apply_gene_mapping(expression_df=gene_data, mapping_df=mapping_df)\n",
    "\n",
    "# Preview the gene expression data\n",
    "print(\"\\nTransformed gene expression data (first 5 genes, first 3 samples):\")\n",
    "print(gene_data.iloc[:5, :3])\n",
    "\n",
    "# Check the shape of the transformed data\n",
    "print(f\"\\nGene expression data shape: {gene_data.shape}\")\n",
    "\n",
    "# Save the gene data to the specified path\n",
    "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n",
    "gene_data.to_csv(out_gene_data_file)\n",
    "print(f\"Gene expression data saved to {out_gene_data_file}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "84304b01",
   "metadata": {},
   "source": [
    "### Step 7: Data Normalization and Linking"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f201d6e3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Let's use the gene data from Step 6 that already has genes mapped from probes\n",
    "# We need to normalize these gene symbols\n",
    "try:\n",
    "    # Normalize gene symbols using the NCBI Gene database information\n",
    "    normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n",
    "    print(f\"Gene data shape after normalization: {normalized_gene_data.shape}\")\n",
    "    \n",
    "    # Check if we have any genes after normalization\n",
    "    if normalized_gene_data.shape[0] == 0:\n",
    "        print(\"WARNING: No genes remained after normalization. This may indicate an issue with gene symbol mapping.\")\n",
    "        is_gene_available = False\n",
    "    else:\n",
    "        is_gene_available = True\n",
    "        \n",
    "    # Save the normalized gene data to the output file (even if empty, for logging purposes)\n",
    "    os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n",
    "    normalized_gene_data.to_csv(out_gene_data_file)\n",
    "    print(f\"Normalized gene data saved to {out_gene_data_file}\")\n",
    "    \n",
    "except Exception as e:\n",
    "    print(f\"Error during gene normalization: {e}\")\n",
    "    normalized_gene_data = pd.DataFrame()\n",
    "    is_gene_available = False\n",
    "\n",
    "# 2. Load clinical data from the processed file\n",
    "try:\n",
    "    clinical_df = pd.read_csv(out_clinical_data_file, index_col=0)\n",
    "    print(f\"Loaded clinical data with shape: {clinical_df.shape}\")\n",
    "    print(f\"Clinical data columns: {clinical_df.columns.tolist()}\")\n",
    "    \n",
    "    # Check if trait column exists in the data\n",
    "    if trait not in clinical_df.columns:\n",
    "        clinical_df[trait] = np.nan  # Add empty trait column\n",
    "        print(f\"Added empty '{trait}' column to clinical data\")\n",
    "    \n",
    "    is_trait_available = not clinical_df[trait].isna().all()\n",
    "    print(f\"Trait availability: {is_trait_available}\")\n",
    "    \n",
    "except FileNotFoundError:\n",
    "    print(\"Clinical data file not found. Creating a new clinical dataframe.\")\n",
    "    clinical_df = pd.DataFrame(index=gene_data.columns)\n",
    "    clinical_df[trait] = np.nan  # Empty trait column\n",
    "    clinical_df['Age'] = np.nan  # Empty age column\n",
    "    clinical_df['Gender'] = np.nan  # Empty gender column\n",
    "    is_trait_available = False\n",
    "\n",
    "# 3. Create linked data\n",
    "linked_data = pd.DataFrame(index=clinical_df.index)\n",
    "linked_data[trait] = clinical_df[trait]\n",
    "\n",
    "# Add demographic columns if available\n",
    "if 'Age' in clinical_df.columns:\n",
    "    linked_data['Age'] = clinical_df['Age']\n",
    "if 'Gender' in clinical_df.columns:\n",
    "    linked_data['Gender'] = clinical_df['Gender']\n",
    "\n",
    "# Add gene expression data if available\n",
    "if is_gene_available:\n",
    "    for gene in normalized_gene_data.index:\n",
    "        linked_data[gene] = normalized_gene_data.loc[gene]\n",
    "\n",
    "print(f\"Linked data shape: {linked_data.shape}\")\n",
    "\n",
    "# Handle missing values only if trait data is available\n",
    "if is_trait_available:\n",
    "    linked_data = handle_missing_values(linked_data, trait)\n",
    "    print(f\"Linked data shape after handling missing values: {linked_data.shape}\")\n",
    "    \n",
    "    # Check if trait and demographic features are biased\n",
    "    is_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)\n",
    "else:\n",
    "    is_biased = False  # Not biased because there's no trait data to be biased\n",
    "\n",
    "# Prepare notes based on data availability\n",
    "if not is_gene_available and not is_trait_available:\n",
    "    note = \"Dataset lacks both gene expression data (after normalization) and Height measurements.\"\n",
    "elif not is_gene_available:\n",
    "    note = \"Dataset lacks gene expression data after normalization (all gene symbols may be non-standard).\"\n",
    "elif not is_trait_available:\n",
    "    note = \"Dataset contains gene expression data but no Height measurements.\"\n",
    "else:\n",
    "    note = \"Dataset contains both gene expression and Height measurements.\"\n",
    "\n",
    "# 5. Final validation\n",
    "is_usable = validate_and_save_cohort_info(\n",
    "    is_final=True, \n",
    "    cohort=cohort, \n",
    "    info_path=json_path, \n",
    "    is_gene_available=is_gene_available, \n",
    "    is_trait_available=is_trait_available, \n",
    "    is_biased=is_biased,\n",
    "    df=linked_data,\n",
    "    note=note\n",
    ")\n",
    "\n",
    "# 6. Save the linked data if it's usable\n",
    "print(f\"Dataset usability: {is_usable}\")\n",
    "if is_usable:\n",
    "    os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n",
    "    linked_data.to_csv(out_data_file)\n",
    "    print(f\"Linked data saved to {out_data_file}\")\n",
    "else:\n",
    "    print(\"Dataset cannot be used for association studies due to missing or biased data.\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a4a965ba",
   "metadata": {},
   "source": [
    "### Step 8: Data Normalization and Linking"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f5260b15",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import os\n",
    "\n",
    "# 1. Let's load the gene expression data\n",
    "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n",
    "\n",
    "# Extract gene expression data\n",
    "gene_data = get_genetic_data(matrix_file)\n",
    "\n",
    "# Normalize gene symbols using the NCBI Gene database information\n",
    "normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n",
    "print(f\"Gene data shape after normalization: {normalized_gene_data.shape}\")\n",
    "\n",
    "# Save the normalized gene data to the output file\n",
    "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n",
    "normalized_gene_data.to_csv(out_gene_data_file)\n",
    "print(f\"Normalized gene data saved to {out_gene_data_file}\")\n",
    "\n",
    "# 2. Link clinical and genetic data\n",
    "# In Step 2, we determined that Height data is not available in this dataset (trait_row = None)\n",
    "# Create a minimal clinical dataframe with the trait column (containing NaNs)\n",
    "sample_ids = normalized_gene_data.columns\n",
    "minimal_clinical_df = pd.DataFrame(index=sample_ids)\n",
    "minimal_clinical_df[trait] = np.nan  # Add the trait column with NaN values\n",
    "\n",
    "# If we have age and gender data from Step 2, add those columns\n",
    "if age_row is not None:\n",
    "    minimal_clinical_df['Age'] = get_feature_data(clinical_data, age_row, 'Age', convert_age).iloc[0]\n",
    "\n",
    "if gender_row is not None:\n",
    "    minimal_clinical_df['Gender'] = get_feature_data(clinical_data, gender_row, 'Gender', convert_gender).iloc[0]\n",
    "\n",
    "minimal_clinical_df.index.name = 'Sample'\n",
    "\n",
    "# Save this minimal clinical data for reference\n",
    "os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n",
    "minimal_clinical_df.to_csv(out_clinical_data_file)\n",
    "print(f\"Clinical data saved to {out_clinical_data_file}\")\n",
    "\n",
    "# Create a linked dataset \n",
    "linked_data = pd.concat([minimal_clinical_df, normalized_gene_data.T], axis=1)\n",
    "linked_data.index.name = 'Sample'\n",
    "print(f\"Linked data shape: {linked_data.shape}\")\n",
    "\n",
    "# We would normally handle missing values here, but since all trait values are missing,\n",
    "# the dataset will be empty after removing samples with missing trait values\n",
    "# Therefore, we'll skip that step\n",
    "\n",
    "# 4 & 5. Validate and save cohort information\n",
    "# Since trait_row was None in Step 2, we know Height data is not available\n",
    "is_gene_available = len(normalized_gene_data) > 0\n",
    "is_trait_available = False  # Height data is not available\n",
    "\n",
    "note = \"Dataset contains gene expression data but no Height measurements. This dataset is not usable for studying Height associations.\"\n",
    "\n",
    "# For datasets without trait data, we set is_biased to False\n",
    "# This indicates the dataset is not usable due to missing trait data, not due to bias\n",
    "is_biased = False\n",
    "\n",
    "# Final validation\n",
    "is_usable = validate_and_save_cohort_info(\n",
    "    is_final=True, \n",
    "    cohort=cohort, \n",
    "    info_path=json_path, \n",
    "    is_gene_available=is_gene_available, \n",
    "    is_trait_available=is_trait_available, \n",
    "    is_biased=is_biased,\n",
    "    df=linked_data,\n",
    "    note=note\n",
    ")\n",
    "\n",
    "# 6. Since there is no trait data, the dataset is not usable for our association study\n",
    "# So we should not save it to out_data_file\n",
    "print(f\"Dataset usability: {is_usable}\")\n",
    "if is_usable:\n",
    "    os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n",
    "    linked_data.to_csv(out_data_file)\n",
    "    print(f\"Linked data saved to {out_data_file}\")\n",
    "else:\n",
    "    print(\"Dataset does not contain Height data and cannot be used for association studies.\")"
   ]
  }
 ],
 "metadata": {},
 "nbformat": 4,
 "nbformat_minor": 5
}