File size: 30,130 Bytes
6bc7e45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e66ff380",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n",
    "\n",
    "# Path Configuration\n",
    "from tools.preprocess import *\n",
    "\n",
    "# Processing context\n",
    "trait = \"Height\"\n",
    "cohort = \"GSE181339\"\n",
    "\n",
    "# Input paths\n",
    "in_trait_dir = \"../../input/GEO/Height\"\n",
    "in_cohort_dir = \"../../input/GEO/Height/GSE181339\"\n",
    "\n",
    "# Output paths\n",
    "out_data_file = \"../../output/preprocess/Height/GSE181339.csv\"\n",
    "out_gene_data_file = \"../../output/preprocess/Height/gene_data/GSE181339.csv\"\n",
    "out_clinical_data_file = \"../../output/preprocess/Height/clinical_data/GSE181339.csv\"\n",
    "json_path = \"../../output/preprocess/Height/cohort_info.json\"\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c26737df",
   "metadata": {},
   "source": [
    "### Step 1: Initial Data Loading"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6aa8ff0d",
   "metadata": {},
   "outputs": [],
   "source": [
    "from tools.preprocess import *\n",
    "# 1. Identify the paths to the SOFT file and the matrix file\n",
    "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n",
    "\n",
    "# 2. Read the matrix file to obtain background information and sample characteristics data\n",
    "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n",
    "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n",
    "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n",
    "\n",
    "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n",
    "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n",
    "\n",
    "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n",
    "print(\"Background Information:\")\n",
    "print(background_info)\n",
    "print(\"Sample Characteristics Dictionary:\")\n",
    "print(sample_characteristics_dict)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f981b5e2",
   "metadata": {},
   "source": [
    "### Step 2: Dataset Analysis and Clinical Feature Extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6cf6496",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Gene Expression Data Availability\n",
    "# Based on the series description, this appears to be a gene expression study\n",
    "# \"For the microarray experiment...\" suggests gene expression data is available\n",
    "is_gene_available = True\n",
    "\n",
    "# 2. Variable Availability and Data Type Conversion\n",
    "\n",
    "# 2.1 Data Availability\n",
    "trait_row = 1  # 'group' contains weight status information (NW, OW/OB, MONW)\n",
    "age_row = 2    # 'age' is available\n",
    "gender_row = 0  # 'gender' is available\n",
    "\n",
    "# 2.2 Data Type Conversion\n",
    "def convert_trait(value):\n",
    "    \"\"\"Convert the group value to binary form (0 for normal weight, 1 for overweight/obese or MONW)\"\"\"\n",
    "    if pd.isna(value) or value is None:\n",
    "        return None\n",
    "    \n",
    "    # Extract the value after the colon if present\n",
    "    if isinstance(value, str) and \":\" in value:\n",
    "        value = value.split(\":\", 1)[1].strip()\n",
    "    \n",
    "    if value.upper() == \"NW\":\n",
    "        return 0  # Normal weight\n",
    "    elif value.upper() in [\"OW/OB\", \"MONW\"]:\n",
    "        return 1  # Overweight/obese or Metabolically Obese Normal-Weight\n",
    "    else:\n",
    "        return None\n",
    "\n",
    "def convert_age(value):\n",
    "    \"\"\"Convert age string to numeric value\"\"\"\n",
    "    if pd.isna(value) or value is None:\n",
    "        return None\n",
    "    \n",
    "    # Extract the value after the colon if present\n",
    "    if isinstance(value, str) and \":\" in value:\n",
    "        value = value.split(\":\", 1)[1].strip()\n",
    "    \n",
    "    try:\n",
    "        return float(value)\n",
    "    except (ValueError, TypeError):\n",
    "        return None\n",
    "\n",
    "def convert_gender(value):\n",
    "    \"\"\"Convert gender string to binary (0 for female, 1 for male)\"\"\"\n",
    "    if pd.isna(value) or value is None:\n",
    "        return None\n",
    "    \n",
    "    # Extract the value after the colon if present\n",
    "    if isinstance(value, str) and \":\" in value:\n",
    "        value = value.split(\":\", 1)[1].strip().lower()\n",
    "    \n",
    "    if value.lower() == \"woman\" or value.lower() == \"female\":\n",
    "        return 0\n",
    "    elif value.lower() == \"man\" or value.lower() == \"male\":\n",
    "        return 1\n",
    "    else:\n",
    "        return None\n",
    "\n",
    "# 3. Save Metadata\n",
    "# Since trait_row is not None, trait data is available\n",
    "is_trait_available = trait_row is not None\n",
    "\n",
    "# Conduct initial filtering on the usability\n",
    "validate_and_save_cohort_info(\n",
    "    is_final=False,\n",
    "    cohort=cohort,\n",
    "    info_path=json_path,\n",
    "    is_gene_available=is_gene_available,\n",
    "    is_trait_available=is_trait_available\n",
    ")\n",
    "\n",
    "# 4. Clinical Feature Extraction\n",
    "# Since trait_row is not None, we proceed with clinical data extraction\n",
    "# Create a DataFrame from the sample characteristics dictionary provided in the task\n",
    "sample_characteristics = {\n",
    "    0: ['gender: Man', 'gender: Woman'],\n",
    "    1: ['group: NW', 'group: OW/OB', 'group: MONW'],\n",
    "    2: ['age: 21', 'age: 23', 'age: 10', 'age: 17', 'age: 11', 'age: 1', 'age: 18', 'age: 12', 'age: 8', 'age: 14', 'age: 26', 'age: 4', 'age: 2', 'age: 3', 'age: 7', 'age: 13', 'age: 15', 'age: 9', 'age: 30', 'age: 19'],\n",
    "    3: ['fasting time: 6hr', 'fasting time: 4hr'],\n",
    "    4: ['timepoint: 0months', 'timepoint: 6months']\n",
    "}\n",
    "\n",
    "# Convert to a format that geo_select_clinical_features can work with\n",
    "# Create a dataframe with the sample characteristics\n",
    "clinical_data = pd.DataFrame.from_dict(sample_characteristics, orient='index')\n",
    "clinical_data.index.name = 'row_id'\n",
    "clinical_data = clinical_data.reset_index()\n",
    "\n",
    "# Extract clinical features\n",
    "clinical_features = geo_select_clinical_features(\n",
    "    clinical_df=clinical_data,\n",
    "    trait=trait,\n",
    "    trait_row=trait_row,\n",
    "    convert_trait=convert_trait,\n",
    "    age_row=age_row,\n",
    "    convert_age=convert_age,\n",
    "    gender_row=gender_row,\n",
    "    convert_gender=convert_gender\n",
    ")\n",
    "\n",
    "# Preview the extracted clinical features\n",
    "preview = preview_df(clinical_features)\n",
    "print(\"Preview of clinical features:\", preview)\n",
    "\n",
    "# Save the clinical data\n",
    "os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n",
    "clinical_features.to_csv(out_clinical_data_file, index=False)\n",
    "print(f\"Clinical data saved to {out_clinical_data_file}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1a8f3056",
   "metadata": {},
   "source": [
    "### Step 3: Dataset Analysis and Clinical Feature Extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "39ac46bd",
   "metadata": {},
   "outputs": [],
   "source": [
    "I'll debug the code to properly handle GEO dataset files and extract clinical features.\n",
    "\n",
    "```python\n",
    "import os\n",
    "import json\n",
    "import pandas as pd\n",
    "import glob\n",
    "from typing import Optional, Callable, Dict, Any\n",
    "\n",
    "# First, let's explore what files are available in the directory\n",
    "print(f\"Exploring directory: {in_cohort_dir}\")\n",
    "available_files = glob.glob(os.path.join(in_cohort_dir, \"*\"))\n",
    "print(f\"Available files: {available_files}\")\n",
    "\n",
    "# GEO data is typically stored in _series_matrix.txt files\n",
    "series_matrix_files = glob.glob(os.path.join(in_cohort_dir, \"*_series_matrix.txt*\"))\n",
    "if series_matrix_files:\n",
    "    matrix_file = series_matrix_files[0]\n",
    "    print(f\"Found series matrix file: {matrix_file}\")\n",
    "    \n",
    "    # Read the file line by line to extract sample characteristics\n",
    "    sample_char_dict = {}\n",
    "    current_line_idx = 0\n",
    "    with open(matrix_file, 'r') as f:\n",
    "        for line in f:\n",
    "            if line.startswith('!Sample_characteristics_ch1'):\n",
    "                parts = line.strip().split('\\t')\n",
    "                if len(parts) > 1:  # Ensure there's at least one sample\n",
    "                    # Remove the prefix to get just the values\n",
    "                    values = [p.replace('!Sample_characteristics_ch1 = ', '') for p in parts]\n",
    "                    sample_char_dict[current_line_idx] = values\n",
    "                    current_line_idx += 1\n",
    "            elif line.startswith('!Sample_title'):\n",
    "                # Sample titles can sometimes contain useful information\n",
    "                parts = line.strip().split('\\t')\n",
    "                if len(parts) > 1:\n",
    "                    values = [p.replace('!Sample_title = ', '') for p in parts]\n",
    "                    sample_char_dict[current_line_idx] = values\n",
    "                    current_line_idx += 1\n",
    "                    \n",
    "    # If we've collected any sample characteristics, convert to DataFrame\n",
    "    if sample_char_dict:\n",
    "        clinical_data = pd.DataFrame(sample_char_dict).T\n",
    "        # Print a preview of what we found\n",
    "        print(\"Sample characteristics preview:\")\n",
    "        for idx, row in clinical_data.iterrows():\n",
    "            print(f\"Row {idx}: {row.unique()[:5]}...\")\n",
    "    else:\n",
    "        clinical_data = pd.DataFrame()\n",
    "        print(\"No sample characteristics found in series matrix file.\")\n",
    "else:\n",
    "    # If no series matrix file, try to find a soft file\n",
    "    soft_files = glob.glob(os.path.join(in_cohort_dir, \"*.soft*\"))\n",
    "    if soft_files:\n",
    "        soft_file = soft_files[0]\n",
    "        print(f\"Found SOFT file: {soft_file}\")\n",
    "        \n",
    "        # Read the SOFT file to extract sample characteristics\n",
    "        sample_char_dict = {}\n",
    "        current_line_idx = 0\n",
    "        with open(soft_file, 'r') as f:\n",
    "            in_sample_section = False\n",
    "            current_sample = None\n",
    "            for line in f:\n",
    "                if line.startswith('^SAMPLE'):\n",
    "                    in_sample_section = True\n",
    "                    current_sample = []\n",
    "                elif line.startswith('!Sample_characteristics_ch1'):\n",
    "                    if in_sample_section:\n",
    "                        current_sample.append(line.strip().split(' = ')[1])\n",
    "                elif line.startswith('!sample_table_end'):\n",
    "                    if in_sample_section and current_sample:\n",
    "                        sample_char_dict[current_line_idx] = current_sample\n",
    "                        current_line_idx += 1\n",
    "                        current_sample = None\n",
    "                        in_sample_section = False\n",
    "                        \n",
    "        if sample_char_dict:\n",
    "            clinical_data = pd.DataFrame(sample_char_dict).T\n",
    "            print(\"Sample characteristics preview from SOFT file:\")\n",
    "            for idx, row in clinical_data.iterrows():\n",
    "                print(f\"Row {idx}: {row.unique()[:5]}...\")\n",
    "        else:\n",
    "            clinical_data = pd.DataFrame()\n",
    "            print(\"No sample characteristics found in SOFT file.\")\n",
    "    else:\n",
    "        # As a last resort, try to find any text files\n",
    "        txt_files = glob.glob(os.path.join(in_cohort_dir, \"*.txt\"))\n",
    "        if txt_files:\n",
    "            print(f\"Found text files but no recognized GEO format: {txt_files}\")\n",
    "            clinical_data = pd.DataFrame()\n",
    "        else:\n",
    "            print(\"No recognizable data files found.\")\n",
    "            clinical_data = pd.DataFrame()\n",
    "\n",
    "# Analyze what we have and make decisions about data availability\n",
    "is_gene_available = True  # Assuming gene expression data exists unless determined otherwise\n",
    "\n",
    "# Determine if height-related data is available in the clinical data\n",
    "trait_row = None\n",
    "age_row = None\n",
    "gender_row = None\n",
    "\n",
    "if not clinical_data.empty:\n",
    "    # Check each row for trait, age, and gender data\n",
    "    for row_idx in range(len(clinical_data)):\n",
    "        row_values = clinical_data.iloc[row_idx].astype(str)\n",
    "        row_text = ' '.join(row_values).lower()\n",
    "        \n",
    "        # Check for trait (Height)\n",
    "        if 'height' in row_text and trait_row is None:\n",
    "            unique_values = row_values.unique()\n",
    "            if len(unique_values) > 1:  # More than one unique value\n",
    "                trait_row = row_idx\n",
    "                print(f\"Found trait data (Height) in row {row_idx}: {unique_values[:5]}\")\n",
    "        \n",
    "        # Check for age\n",
    "        if ('age' in row_text or 'years' in row_text) and age_row is None:\n",
    "            unique_values = row_values.unique()\n",
    "            if len(unique_values) > 1:  # More than one unique value\n",
    "                age_row = row_idx\n",
    "                print(f\"Found age data in row {row_idx}: {unique_values[:5]}\")\n",
    "        \n",
    "        # Check for gender\n",
    "        if ('gender' in row_text or 'sex' in row_text) and gender_row is None:\n",
    "            unique_values = row_values.unique()\n",
    "            if len(unique_values) > 1:  # More than one unique value\n",
    "                gender_row = row_idx\n",
    "                print(f\"Found gender data in row {row_idx}: {unique_values[:5]}\")\n",
    "\n",
    "# Define conversion functions based on the identified data structure\n",
    "def convert_trait(value):\n",
    "    \"\"\"Convert height value to a continuous numeric value.\"\"\"\n",
    "    try:\n",
    "        # Try to extract a numeric value from the string\n",
    "        # Height may be in format like \"height: 180cm\" or similar\n",
    "        if value is None:\n",
    "            return None\n",
    "        \n",
    "        value = str(value).lower()\n",
    "        # Look for height patterns\n",
    "        if 'height' in value:\n",
    "            # Extract numeric part - look for digits\n",
    "            import re\n",
    "            height_match = re.search(r'(\\d+\\.?\\d*)', value)\n",
    "            if height_match:\n",
    "                return float(height_match.group(1))\n",
    "        # If it's just a number, try to convert directly\n",
    "        elif value.replace('.', '', 1).isdigit():\n",
    "            return float(value)\n",
    "        return None\n",
    "    except Exception as e:\n",
    "        print(f\"Error converting trait: {e}\")\n",
    "        return None\n",
    "\n",
    "def convert_age(value):\n",
    "    \"\"\"Convert age value to a continuous numeric value.\"\"\"\n",
    "    try:\n",
    "        if value is None:\n",
    "            return None\n",
    "        \n",
    "        value = str(value).lower()\n",
    "        # Look for age patterns\n",
    "        if 'age' in value or 'years' in value:\n",
    "            # Extract numeric part\n",
    "            import re\n",
    "            age_match = re.search(r'(\\d+\\.?\\d*)', value)\n",
    "            if age_match:\n",
    "                return float(age_match.group(1))\n",
    "        # If it's just a number, try to convert directly\n",
    "        elif value.replace('.', '', 1).isdigit():\n",
    "            return float(value)\n",
    "        return None\n",
    "    except Exception as e:\n",
    "        print(f\"Error converting age: {e}\")\n",
    "        return None\n",
    "\n",
    "def convert_gender(value):\n",
    "    \"\"\"Convert gender value to binary (0 for female, 1 for male).\"\"\"\n",
    "    try:\n",
    "        if value is None:\n",
    "            return None\n",
    "        \n",
    "        value = str(value).lower()\n",
    "        # Check for gender/sex indicators\n",
    "        if 'female' in value or 'f' == value.strip() or 'f:\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fcbad901",
   "metadata": {},
   "source": [
    "### Step 4: Gene Data Extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "452bb99a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Get the file paths for the SOFT file and matrix file\n",
    "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n",
    "\n",
    "# 2. First, let's examine the structure of the matrix file to understand its format\n",
    "import gzip\n",
    "\n",
    "# Peek at the first few lines of the file to understand its structure\n",
    "with gzip.open(matrix_file, 'rt') as file:\n",
    "    # Read first 100 lines to find the header structure\n",
    "    for i, line in enumerate(file):\n",
    "        if '!series_matrix_table_begin' in line:\n",
    "            print(f\"Found data marker at line {i}\")\n",
    "            # Read the next line which should be the header\n",
    "            header_line = next(file)\n",
    "            print(f\"Header line: {header_line.strip()}\")\n",
    "            # And the first data line\n",
    "            first_data_line = next(file)\n",
    "            print(f\"First data line: {first_data_line.strip()}\")\n",
    "            break\n",
    "        if i > 100:  # Limit search to first 100 lines\n",
    "            print(\"Matrix table marker not found in first 100 lines\")\n",
    "            break\n",
    "\n",
    "# 3. Now try to get the genetic data with better error handling\n",
    "try:\n",
    "    gene_data = get_genetic_data(matrix_file)\n",
    "    print(gene_data.index[:20])\n",
    "except KeyError as e:\n",
    "    print(f\"KeyError: {e}\")\n",
    "    \n",
    "    # Alternative approach: manually extract the data\n",
    "    print(\"\\nTrying alternative approach to read the gene data:\")\n",
    "    with gzip.open(matrix_file, 'rt') as file:\n",
    "        # Find the start of the data\n",
    "        for line in file:\n",
    "            if '!series_matrix_table_begin' in line:\n",
    "                break\n",
    "                \n",
    "        # Read the headers and data\n",
    "        import pandas as pd\n",
    "        df = pd.read_csv(file, sep='\\t', index_col=0)\n",
    "        print(f\"Column names: {df.columns[:5]}\")\n",
    "        print(f\"First 20 row IDs: {df.index[:20]}\")\n",
    "        gene_data = df\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "28b890b3",
   "metadata": {},
   "source": [
    "### Step 5: Gene Identifier Review"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "89de7d56",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Based on reviewing the gene identifiers in the gene expression data, I can see they are numeric \n",
    "# identifiers (like 7, 8, 15, 18, etc.) rather than human gene symbols (which would be something \n",
    "# like BRCA1, TP53, etc.)\n",
    "# \n",
    "# These appear to be probe IDs from a microarray platform, which need to be mapped to human gene symbols\n",
    "# for proper biological interpretation. The numeric format is typical of Affymetrix or similar microarray \n",
    "# platforms where probes are identified by numbers rather than gene names.\n",
    "\n",
    "requires_gene_mapping = True\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bc845dcc",
   "metadata": {},
   "source": [
    "### Step 6: Gene Annotation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "870ed6f2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Let's first examine the structure of the SOFT file before trying to parse it\n",
    "import gzip\n",
    "\n",
    "# Look at the first few lines of the SOFT file to understand its structure\n",
    "print(\"Examining SOFT file structure:\")\n",
    "try:\n",
    "    with gzip.open(soft_file, 'rt') as file:\n",
    "        # Read first 20 lines to understand the file structure\n",
    "        for i, line in enumerate(file):\n",
    "            if i < 20:\n",
    "                print(f\"Line {i}: {line.strip()}\")\n",
    "            else:\n",
    "                break\n",
    "except Exception as e:\n",
    "    print(f\"Error reading SOFT file: {e}\")\n",
    "\n",
    "# 2. Now let's try a more robust approach to extract the gene annotation\n",
    "# Instead of using the library function which failed, we'll implement a custom approach\n",
    "try:\n",
    "    # First, look for the platform section which contains gene annotation\n",
    "    platform_data = []\n",
    "    with gzip.open(soft_file, 'rt') as file:\n",
    "        in_platform_section = False\n",
    "        for line in file:\n",
    "            if line.startswith('^PLATFORM'):\n",
    "                in_platform_section = True\n",
    "                continue\n",
    "            if in_platform_section and line.startswith('!platform_table_begin'):\n",
    "                # Next line should be the header\n",
    "                header = next(file).strip()\n",
    "                platform_data.append(header)\n",
    "                # Read until the end of the platform table\n",
    "                for table_line in file:\n",
    "                    if table_line.startswith('!platform_table_end'):\n",
    "                        break\n",
    "                    platform_data.append(table_line.strip())\n",
    "                break\n",
    "    \n",
    "    # If we found platform data, convert it to a DataFrame\n",
    "    if platform_data:\n",
    "        import pandas as pd\n",
    "        import io\n",
    "        platform_text = '\\n'.join(platform_data)\n",
    "        gene_annotation = pd.read_csv(io.StringIO(platform_text), delimiter='\\t', \n",
    "                                      low_memory=False, on_bad_lines='skip')\n",
    "        print(\"\\nGene annotation preview:\")\n",
    "        print(preview_df(gene_annotation))\n",
    "    else:\n",
    "        print(\"Could not find platform table in SOFT file\")\n",
    "        \n",
    "        # Try an alternative approach - extract mapping from other sections\n",
    "        with gzip.open(soft_file, 'rt') as file:\n",
    "            for line in file:\n",
    "                if 'ANNOTATION information' in line or 'annotation information' in line:\n",
    "                    print(f\"Found annotation information: {line.strip()}\")\n",
    "                if line.startswith('!Platform_title') or line.startswith('!platform_title'):\n",
    "                    print(f\"Platform title: {line.strip()}\")\n",
    "            \n",
    "except Exception as e:\n",
    "    print(f\"Error processing gene annotation: {e}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "06038669",
   "metadata": {},
   "source": [
    "### Step 7: Gene Identifier Mapping"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f15b5960",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Inspect the gene identifier columns in the annotation dataframe\n",
    "print(\"Column names in gene annotation dataframe:\")\n",
    "print(gene_annotation.columns)\n",
    "\n",
    "# Based on previous output, I can see that:\n",
    "# - The gene expression data uses numeric IDs (like 7, 8, 15...)\n",
    "# - The gene annotation dataframe has an 'ID' column that appears to contain similar numeric identifiers\n",
    "# - The 'GENE_SYMBOL' column contains the human gene symbols we need\n",
    "\n",
    "# 1. Identify the mapping columns\n",
    "probe_id_col = 'ID'\n",
    "gene_symbol_col = 'GENE_SYMBOL'\n",
    "\n",
    "# 2. Get the gene mapping dataframe by extracting the two relevant columns\n",
    "mapping_df = get_gene_mapping(gene_annotation, probe_id_col, gene_symbol_col)\n",
    "print(f\"Number of probes with gene mapping: {len(mapping_df)}\")\n",
    "print(f\"Sample of the mapping dataframe:\")\n",
    "print(mapping_df.head())\n",
    "\n",
    "# 3. Apply gene mapping to convert probe-level measurements to gene expression data\n",
    "gene_data = apply_gene_mapping(gene_data, mapping_df)\n",
    "print(f\"Dimensions of gene expression data after mapping: {gene_data.shape}\")\n",
    "print(f\"Sample of gene symbols and expression values:\")\n",
    "print(gene_data.head())\n",
    "\n",
    "# Save the gene expression data\n",
    "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n",
    "gene_data.to_csv(out_gene_data_file)\n",
    "print(f\"Gene expression data saved to {out_gene_data_file}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8f477487",
   "metadata": {},
   "source": [
    "### Step 8: Data Normalization and Linking"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1323b4e1",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import os\n",
    "\n",
    "# 1. Let's load the gene expression data\n",
    "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n",
    "\n",
    "# Extract gene expression data\n",
    "gene_data = get_genetic_data(matrix_file)\n",
    "\n",
    "# Normalize gene symbols using the NCBI Gene database information\n",
    "normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n",
    "print(f\"Gene data shape after normalization: {normalized_gene_data.shape}\")\n",
    "\n",
    "# Save the normalized gene data to the output file\n",
    "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n",
    "normalized_gene_data.to_csv(out_gene_data_file)\n",
    "print(f\"Normalized gene data saved to {out_gene_data_file}\")\n",
    "\n",
    "# 2. Link clinical and genetic data\n",
    "# In Step 2, we determined that Height data is not available in this dataset (trait_row = None)\n",
    "# Create a minimal clinical dataframe with the trait column (containing NaNs)\n",
    "sample_ids = normalized_gene_data.columns\n",
    "minimal_clinical_df = pd.DataFrame(index=sample_ids)\n",
    "minimal_clinical_df[trait] = np.nan  # Add the trait column with NaN values\n",
    "\n",
    "# If we have age and gender data from Step 2, add those columns\n",
    "if age_row is not None:\n",
    "    minimal_clinical_df['Age'] = get_feature_data(clinical_data, age_row, 'Age', convert_age).iloc[0]\n",
    "\n",
    "if gender_row is not None:\n",
    "    minimal_clinical_df['Gender'] = get_feature_data(clinical_data, gender_row, 'Gender', convert_gender).iloc[0]\n",
    "\n",
    "minimal_clinical_df.index.name = 'Sample'\n",
    "\n",
    "# Save this minimal clinical data for reference\n",
    "os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n",
    "minimal_clinical_df.to_csv(out_clinical_data_file)\n",
    "print(f\"Clinical data saved to {out_clinical_data_file}\")\n",
    "\n",
    "# Create a linked dataset \n",
    "linked_data = pd.concat([minimal_clinical_df, normalized_gene_data.T], axis=1)\n",
    "linked_data.index.name = 'Sample'\n",
    "print(f\"Linked data shape: {linked_data.shape}\")\n",
    "\n",
    "# We would normally handle missing values here, but since all trait values are missing,\n",
    "# the dataset will be empty after removing samples with missing trait values\n",
    "# Therefore, we'll skip that step\n",
    "\n",
    "# 4 & 5. Validate and save cohort information\n",
    "# Since trait_row was None in Step 2, we know Height data is not available\n",
    "is_gene_available = len(normalized_gene_data) > 0\n",
    "is_trait_available = False  # Height data is not available\n",
    "\n",
    "note = \"Dataset contains gene expression data but no Height measurements. This dataset is not usable for studying Height associations.\"\n",
    "\n",
    "# For datasets without trait data, we set is_biased to False\n",
    "# This indicates the dataset is not usable due to missing trait data, not due to bias\n",
    "is_biased = False\n",
    "\n",
    "# Final validation\n",
    "is_usable = validate_and_save_cohort_info(\n",
    "    is_final=True, \n",
    "    cohort=cohort, \n",
    "    info_path=json_path, \n",
    "    is_gene_available=is_gene_available, \n",
    "    is_trait_available=is_trait_available, \n",
    "    is_biased=is_biased,\n",
    "    df=linked_data,\n",
    "    note=note\n",
    ")\n",
    "\n",
    "# 6. Since there is no trait data, the dataset is not usable for our association study\n",
    "# So we should not save it to out_data_file\n",
    "print(f\"Dataset usability: {is_usable}\")\n",
    "if is_usable:\n",
    "    os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n",
    "    linked_data.to_csv(out_data_file)\n",
    "    print(f\"Linked data saved to {out_data_file}\")\n",
    "else:\n",
    "    print(\"Dataset does not contain Height data and cannot be used for association studies.\")"
   ]
  }
 ],
 "metadata": {},
 "nbformat": 4,
 "nbformat_minor": 5
}