File size: 29,258 Bytes
82732bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6c20ab4e",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n",
    "\n",
    "# Path Configuration\n",
    "from tools.preprocess import *\n",
    "\n",
    "# Processing context\n",
    "trait = \"X-Linked_Lymphoproliferative_Syndrome\"\n",
    "cohort = \"GSE180395\"\n",
    "\n",
    "# Input paths\n",
    "in_trait_dir = \"../../input/GEO/X-Linked_Lymphoproliferative_Syndrome\"\n",
    "in_cohort_dir = \"../../input/GEO/X-Linked_Lymphoproliferative_Syndrome/GSE180395\"\n",
    "\n",
    "# Output paths\n",
    "out_data_file = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/GSE180395.csv\"\n",
    "out_gene_data_file = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/gene_data/GSE180395.csv\"\n",
    "out_clinical_data_file = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/clinical_data/GSE180395.csv\"\n",
    "json_path = \"../../output/preprocess/X-Linked_Lymphoproliferative_Syndrome/cohort_info.json\"\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eb2ad70c",
   "metadata": {},
   "source": [
    "### Step 1: Initial Data Loading"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ae6ad9c2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Let's first list the directory contents to understand what files are available\n",
    "import os\n",
    "\n",
    "print(\"Files in the cohort directory:\")\n",
    "files = os.listdir(in_cohort_dir)\n",
    "print(files)\n",
    "\n",
    "# Adapt file identification to handle different naming patterns\n",
    "soft_files = [f for f in files if 'soft' in f.lower() or '.soft' in f.lower() or '_soft' in f.lower()]\n",
    "matrix_files = [f for f in files if 'matrix' in f.lower() or '.matrix' in f.lower() or '_matrix' in f.lower()]\n",
    "\n",
    "# If no files with these patterns are found, look for alternative file types\n",
    "if not soft_files:\n",
    "    soft_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n",
    "if not matrix_files:\n",
    "    matrix_files = [f for f in files if f.endswith('.txt') or f.endswith('.gz')]\n",
    "\n",
    "print(\"Identified SOFT files:\", soft_files)\n",
    "print(\"Identified matrix files:\", matrix_files)\n",
    "\n",
    "# Use the first files found, if any\n",
    "if len(soft_files) > 0 and len(matrix_files) > 0:\n",
    "    soft_file = os.path.join(in_cohort_dir, soft_files[0])\n",
    "    matrix_file = os.path.join(in_cohort_dir, matrix_files[0])\n",
    "    \n",
    "    # 2. Read the matrix file to obtain background information and sample characteristics data\n",
    "    background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n",
    "    clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n",
    "    background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n",
    "    \n",
    "    # 3. Obtain the sample characteristics dictionary from the clinical dataframe\n",
    "    sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n",
    "    \n",
    "    # 4. Explicitly print out all the background information and the sample characteristics dictionary\n",
    "    print(\"\\nBackground Information:\")\n",
    "    print(background_info)\n",
    "    print(\"\\nSample Characteristics Dictionary:\")\n",
    "    print(sample_characteristics_dict)\n",
    "else:\n",
    "    print(\"No appropriate files found in the directory.\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e229a7d5",
   "metadata": {},
   "source": [
    "### Step 2: Dataset Analysis and Clinical Feature Extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cd234997",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import os\n",
    "import re\n",
    "import gzip\n",
    "from typing import Optional, Dict, Any, Callable\n",
    "\n",
    "# Function to extract sample characteristics from GEO series matrix file\n",
    "def extract_sample_info(file_path):\n",
    "    characteristics_dict = {}\n",
    "    background_info = {}\n",
    "    \n",
    "    with gzip.open(file_path, 'rt') as file:\n",
    "        line_count = 0\n",
    "        for line in file:\n",
    "            line = line.strip()\n",
    "            line_count += 1\n",
    "            \n",
    "            # Extract background information\n",
    "            if line.startswith('!Series_'):\n",
    "                key = line.split('\\t')[0]\n",
    "                value = line.split('\\t')[1] if len(line.split('\\t')) > 1 else \"\"\n",
    "                background_info[key] = value\n",
    "            \n",
    "            # Extract sample characteristics\n",
    "            if line.startswith('!Sample_characteristics_ch'):\n",
    "                parts = line.split('\\t')\n",
    "                key_idx = len(characteristics_dict)\n",
    "                values = [v.strip('\"') for v in parts[1:]]\n",
    "                unique_values = list(set([v for v in values if v and v != \"NA\"]))\n",
    "                characteristics_dict[key_idx] = unique_values\n",
    "            \n",
    "            # Limit processing to avoid memory issues\n",
    "            if line_count > 5000:\n",
    "                break\n",
    "    \n",
    "    return background_info, characteristics_dict\n",
    "\n",
    "# Process the GEO matrix file\n",
    "file_path = os.path.join(in_cohort_dir, \"GSE180395_series_matrix.txt.gz\")\n",
    "\n",
    "# Check if file exists\n",
    "if not os.path.exists(file_path):\n",
    "    print(f\"File not found: {file_path}\")\n",
    "    is_gene_available = False\n",
    "    is_trait_available = False\n",
    "else:\n",
    "    # Extract information\n",
    "    background_info, characteristics_dict = extract_sample_info(file_path)\n",
    "    \n",
    "    # Print extracted info for debugging\n",
    "    print(\"Background Information:\")\n",
    "    for key, value in background_info.items():\n",
    "        print(f\"{key}\\t{value}\")\n",
    "    \n",
    "    print(\"\\nSample Characteristics Dictionary:\")\n",
    "    print(characteristics_dict)\n",
    "    \n",
    "    # 1. Gene Expression Data Availability\n",
    "    # Based on the series title and summary, this appears to be a transcriptome study\n",
    "    is_gene_available = True\n",
    "    \n",
    "    # 2. Variable Availability\n",
    "    # From the output of the previous step, trait information is in row 0\n",
    "    trait_row = 0  # 'sample group' contains disease vs control information\n",
    "    age_row = None  # No age information available in the provided characteristics\n",
    "    gender_row = None  # No gender information available in the provided characteristics\n",
    "    \n",
    "    # Check trait data availability\n",
    "    is_trait_available = trait_row is not None\n",
    "\n",
    "# 2.2 Data Type Conversion Functions\n",
    "def convert_trait(value: str) -> Optional[int]:\n",
    "    \"\"\"Convert disease status to binary: 1 for disease, 0 for control/living donor.\"\"\"\n",
    "    if value is None:\n",
    "        return None\n",
    "    \n",
    "    # Extract the value after the colon if present\n",
    "    match = re.search(r':\\s*(.*)', value)\n",
    "    if match:\n",
    "        value = match.group(1).strip()\n",
    "    else:\n",
    "        value = value.strip()\n",
    "    \n",
    "    # Living donor is considered as control\n",
    "    if \"Living donor\" in value:\n",
    "        return 0\n",
    "    # All other values indicate some form of disease/condition\n",
    "    else:\n",
    "        return 1\n",
    "\n",
    "def convert_age(value: str) -> Optional[float]:\n",
    "    \"\"\"Convert age to float.\"\"\"\n",
    "    # Function defined but not used as age data is not available\n",
    "    return None\n",
    "\n",
    "def convert_gender(value: str) -> Optional[int]:\n",
    "    \"\"\"Convert gender to binary: 0 for female, 1 for male.\"\"\"\n",
    "    # Function defined but not used as gender data is not available\n",
    "    return None\n",
    "\n",
    "# 3. Save Metadata\n",
    "validate_and_save_cohort_info(\n",
    "    is_final=False,\n",
    "    cohort=cohort,\n",
    "    info_path=json_path,\n",
    "    is_gene_available=is_gene_available,\n",
    "    is_trait_available=is_trait_available\n",
    ")\n",
    "\n",
    "# 4. Clinical Feature Extraction\n",
    "if is_trait_available:\n",
    "    # Read the clinical data\n",
    "    clinical_data = pd.read_csv(file_path, sep='\\t', comment='!', compression='gzip')\n",
    "    \n",
    "    # Use the library function to extract clinical features\n",
    "    clinical_df = geo_select_clinical_features(\n",
    "        clinical_data, \n",
    "        trait=trait,\n",
    "        trait_row=trait_row,\n",
    "        convert_trait=convert_trait,\n",
    "        age_row=age_row,\n",
    "        convert_age=convert_age,\n",
    "        gender_row=gender_row,\n",
    "        convert_gender=convert_gender\n",
    "    )\n",
    "    \n",
    "    # Preview the clinical data\n",
    "    preview = preview_df(clinical_df)\n",
    "    print(\"Clinical Data Preview:\")\n",
    "    print(preview)\n",
    "    \n",
    "    # Create directory if it doesn't exist\n",
    "    os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n",
    "    \n",
    "    # Save the clinical data\n",
    "    clinical_df.to_csv(out_clinical_data_file, index=False)\n",
    "    print(f\"Clinical data saved to {out_clinical_data_file}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "560533da",
   "metadata": {},
   "source": [
    "### Step 3: Gene Data Extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "57109f15",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Use the helper function to get the proper file paths\n",
    "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n",
    "\n",
    "# Extract gene expression data\n",
    "try:\n",
    "    gene_data = get_genetic_data(matrix_file_path)\n",
    "    \n",
    "    # Print the first 20 row IDs (gene or probe identifiers)\n",
    "    print(\"First 20 gene/probe identifiers:\")\n",
    "    print(gene_data.index[:20])\n",
    "    \n",
    "    # Print shape to understand the dataset dimensions\n",
    "    print(f\"\\nGene expression data shape: {gene_data.shape}\")\n",
    "    \n",
    "except Exception as e:\n",
    "    print(f\"Error extracting gene data: {e}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "358ce157",
   "metadata": {},
   "source": [
    "### Step 4: Gene Identifier Review"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b9fc105f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Review the gene identifiers from the output above\n",
    "# The identifiers appear to be probe IDs from a microarray, as they have \n",
    "# a specific format with numbers followed by \"_at\"\n",
    "# These are not standard human gene symbols and will need to be mapped\n",
    "\n",
    "# Based on biomedical knowledge, these are likely Affymetrix probe IDs\n",
    "# which need to be mapped to human gene symbols\n",
    "\n",
    "requires_gene_mapping = True\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7136736a",
   "metadata": {},
   "source": [
    "### Step 5: Gene Annotation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b2f6a869",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. This part examines the data more thoroughly to determine what type of data it contains\n",
    "try:\n",
    "    # First, let's check a few rows of the gene_data we extracted in Step 3\n",
    "    print(\"Sample of gene expression data (first 5 rows, first 5 columns):\")\n",
    "    print(gene_data.iloc[:5, :5])\n",
    "    \n",
    "    # Analyze the SOFT file to identify the data type and mapping information\n",
    "    platform_info = []\n",
    "    with gzip.open(soft_file_path, 'rt', encoding='latin-1') as f:\n",
    "        for line in f:\n",
    "            if line.startswith(\"!Platform_title\") or line.startswith(\"!Series_title\") or \"description\" in line.lower():\n",
    "                platform_info.append(line.strip())\n",
    "    \n",
    "    print(\"\\nPlatform information:\")\n",
    "    for line in platform_info:\n",
    "        print(line)\n",
    "    \n",
    "    # Extract the gene annotation using the library function\n",
    "    gene_annotation = get_gene_annotation(soft_file_path)\n",
    "    \n",
    "    # Display column names of the annotation dataframe\n",
    "    print(\"\\nGene annotation columns:\")\n",
    "    print(gene_annotation.columns.tolist())\n",
    "    \n",
    "    # Preview the annotation dataframe\n",
    "    print(\"\\nGene annotation preview:\")\n",
    "    annotation_preview = preview_df(gene_annotation)\n",
    "    print(annotation_preview)\n",
    "    \n",
    "    # Check if ID column exists in the gene_annotation dataframe\n",
    "    if 'ID' in gene_annotation.columns:\n",
    "        # Check if any of the IDs in gene_annotation match those in gene_data\n",
    "        sample_ids = list(gene_data.index[:10])\n",
    "        matching_rows = gene_annotation[gene_annotation['ID'].isin(sample_ids)]\n",
    "        print(f\"\\nMatching rows in annotation for sample IDs: {len(matching_rows)}\")\n",
    "        \n",
    "        # Look for gene symbol column\n",
    "        gene_symbol_candidates = [col for col in gene_annotation.columns if 'gene' in col.lower() or 'symbol' in col.lower() or 'name' in col.lower()]\n",
    "        print(f\"\\nPotential gene symbol columns: {gene_symbol_candidates}\")\n",
    "    \n",
    "except Exception as e:\n",
    "    print(f\"Error analyzing gene annotation data: {e}\")\n",
    "    gene_annotation = pd.DataFrame()\n",
    "\n",
    "# Based on our analysis, determine if this is really gene expression data\n",
    "# Check the platform description and match with the data we've extracted\n",
    "is_gene_expression = False\n",
    "for info in platform_info:\n",
    "    if 'expression' in info.lower() or 'transcript' in info.lower() or 'mrna' in info.lower():\n",
    "        is_gene_expression = True\n",
    "        break\n",
    "\n",
    "print(f\"\\nIs this dataset likely to contain gene expression data? {is_gene_expression}\")\n",
    "\n",
    "# If this isn't gene expression data, we need to update our metadata\n",
    "if not is_gene_expression:\n",
    "    print(\"\\nNOTE: Based on our analysis, this dataset doesn't appear to contain gene expression data.\")\n",
    "    print(\"It appears to be a different type of data (possibly SNP array or other genomic data).\")\n",
    "    # Update is_gene_available for metadata\n",
    "    is_gene_available = False\n",
    "    \n",
    "    # Save the updated metadata\n",
    "    validate_and_save_cohort_info(\n",
    "        is_final=False,\n",
    "        cohort=cohort,\n",
    "        info_path=json_path,\n",
    "        is_gene_available=is_gene_available,\n",
    "        is_trait_available=is_trait_available\n",
    "    )\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3d7f03b4",
   "metadata": {},
   "source": [
    "### Step 6: Gene Identifier Mapping"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "86e42d88",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Get the file paths\n",
    "soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n",
    "\n",
    "# Extract gene expression data again\n",
    "gene_data = get_genetic_data(matrix_file_path)\n",
    "\n",
    "# Check whether gene expression data has any rows\n",
    "if gene_data.shape[0] == 0:\n",
    "    print(\"Warning: Gene expression data is empty.\")\n",
    "    is_gene_available = False\n",
    "else:\n",
    "    # 1. Compare the probe ID formats in both datasets\n",
    "    print(\"Sample probe IDs in gene expression data:\")\n",
    "    print(gene_data.index[:5])\n",
    "    \n",
    "    # Extract the gene annotation using the library function\n",
    "    gene_annotation = get_gene_annotation(soft_file_path)\n",
    "    \n",
    "    print(\"\\nSample probe IDs in gene annotation:\")\n",
    "    print(gene_annotation['ID'].head())\n",
    "    \n",
    "    # 2. Get the gene mapping dataframe\n",
    "    prob_col = 'ID'  # This contains the probe IDs like '10000_at'\n",
    "    gene_col = 'ENTREZ_GENE_ID'  # This contains the Entrez Gene IDs\n",
    "    \n",
    "    # Create mapping dataframe\n",
    "    gene_mapping = get_gene_mapping(gene_annotation, prob_col, gene_col)\n",
    "    \n",
    "    # Check the overlap between gene expression probe IDs and mapping probe IDs\n",
    "    expression_probes = set(gene_data.index)\n",
    "    mapping_probes = set(gene_mapping['ID'])\n",
    "    overlap = expression_probes.intersection(mapping_probes)\n",
    "    \n",
    "    print(f\"\\nOverlap between expression probes and mapping probes: {len(overlap)} out of {len(expression_probes)} expression probes\")\n",
    "    \n",
    "    # 3. Modify the probe IDs in the mapping to match the expression data if needed\n",
    "    if len(overlap) == 0:\n",
    "        # Try to match by removing the \"_at\" suffix if present\n",
    "        # Check if we need to add or remove suffix\n",
    "        sample_expr_id = list(expression_probes)[0]\n",
    "        sample_map_id = list(mapping_probes)[0]\n",
    "        \n",
    "        print(f\"Sample expression probe ID: {sample_expr_id}\")\n",
    "        print(f\"Sample mapping probe ID: {sample_map_id}\")\n",
    "        \n",
    "        # Convert Entrez IDs to appropriate format for mapping\n",
    "        # Since our expression data has format like \"10000_at\", ensure mapping IDs match this format\n",
    "        if \"_at\" in sample_expr_id and \"_at\" not in sample_map_id:\n",
    "            print(\"Adding '_at' suffix to mapping probe IDs...\")\n",
    "            gene_mapping['ID'] = gene_mapping['ID'] + \"_at\"\n",
    "        elif \"_at\" not in sample_expr_id and \"_at\" in sample_map_id:\n",
    "            print(\"Removing '_at' suffix from mapping probe IDs...\")\n",
    "            gene_mapping['ID'] = gene_mapping['ID'].str.replace(\"_at\", \"\")\n",
    "        \n",
    "        # Check overlap again after modification\n",
    "        mapping_probes = set(gene_mapping['ID'])\n",
    "        overlap = expression_probes.intersection(mapping_probes)\n",
    "        print(f\"After adjustment, overlap: {len(overlap)} out of {len(expression_probes)} expression probes\")\n",
    "    \n",
    "    # 3. Apply the gene mapping to convert probe-level data to gene-level data\n",
    "    if len(overlap) > 0:\n",
    "        # This converts expression values from probes to genes \n",
    "        gene_data_mapped = apply_gene_mapping(gene_data, gene_mapping)\n",
    "        \n",
    "        # Let's see how many genes we have after mapping\n",
    "        print(f\"\\nGene expression data after mapping:\")\n",
    "        print(f\"Shape: {gene_data_mapped.shape}\")\n",
    "        if gene_data_mapped.shape[0] > 0:\n",
    "            print(\"First 5 genes:\")\n",
    "            print(gene_data_mapped.index[:5])\n",
    "            \n",
    "            # Update gene_data with the mapped data\n",
    "            gene_data = gene_data_mapped\n",
    "            \n",
    "            # Save the processed gene data\n",
    "            os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n",
    "            gene_data.to_csv(out_gene_data_file)\n",
    "            print(f\"Gene expression data saved to {out_gene_data_file}\")\n",
    "        else:\n",
    "            print(\"ERROR: Gene mapping resulted in empty dataframe.\")\n",
    "            is_gene_available = False\n",
    "    else:\n",
    "        print(\"ERROR: No overlap between gene expression probes and mapping probes.\")\n",
    "        print(\"Cannot proceed with gene mapping.\")\n",
    "        is_gene_available = False\n",
    "\n",
    "# Update metadata if mapping failed\n",
    "if not is_gene_available:\n",
    "    validate_and_save_cohort_info(\n",
    "        is_final=False,\n",
    "        cohort=cohort,\n",
    "        info_path=json_path,\n",
    "        is_gene_available=is_gene_available,\n",
    "        is_trait_available=is_trait_available\n",
    "    )\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6c1b4591",
   "metadata": {},
   "source": [
    "### Step 7: Data Normalization and Linking"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fef09cea",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. First let's check the situation with our gene data\n",
    "try:\n",
    "    # Get the file paths\n",
    "    soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)\n",
    "    \n",
    "    # We know from previous steps that the gene mapping resulted in an empty dataframe\n",
    "    # Let's extract the genetic data again\n",
    "    gene_data = get_genetic_data(matrix_file_path)\n",
    "    \n",
    "    # Check if the gene data extraction worked\n",
    "    print(f\"Original gene expression data shape: {gene_data.shape}\")\n",
    "    \n",
    "    # Due to issues with gene mapping in previous steps, let's use the original probe-level data\n",
    "    if gene_data.shape[0] > 0:\n",
    "        print(\"Using original probe-level data instead of mapped gene data\")\n",
    "        # Set index name to \"Gene\" to maintain expected format\n",
    "        gene_data.index.name = 'Gene'\n",
    "        \n",
    "        # Save the gene data directly\n",
    "        os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n",
    "        gene_data.to_csv(out_gene_data_file)\n",
    "        print(f\"Probe-level data saved to {out_gene_data_file}\")\n",
    "    else:\n",
    "        print(\"ERROR: Gene data extraction failed\")\n",
    "        is_gene_available = False\n",
    "except Exception as e:\n",
    "    print(f\"Error with gene data processing: {e}\")\n",
    "    is_gene_available = False\n",
    "\n",
    "# 2. Extract and process clinical data from raw file\n",
    "try:\n",
    "    # Re-load the sample characteristics\n",
    "    background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n",
    "    clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n",
    "    _, clinical_raw = get_background_and_clinical_data(matrix_file_path, background_prefixes, clinical_prefixes)\n",
    "    \n",
    "    # Function to convert trait values based on sample description\n",
    "    def convert_trait(value):\n",
    "        \"\"\"Convert sample groups to binary trait values\"\"\"\n",
    "        if value is None or not isinstance(value, str):\n",
    "            return None\n",
    "            \n",
    "        # Extract the value after the colon if present\n",
    "        match = re.search(r'sample group:\\s*(.*)', value)\n",
    "        if match:\n",
    "            value = match.group(1).strip()\n",
    "        else:\n",
    "            value = value.strip()\n",
    "        \n",
    "        # Living donor is considered as control\n",
    "        if \"Living donor\" in value:\n",
    "            return 0\n",
    "        # All other values indicate some form of disease/condition\n",
    "        elif any(x in value for x in [\"GN\", \"LN\", \"nephritis\", \"FSGS\", \"DN\", \"amyloidosis\", \"MN\", \"AKI\"]):\n",
    "            return 1\n",
    "        else:\n",
    "            return None\n",
    "    \n",
    "    # Create a binary trait based on sample groups\n",
    "    trait_row = 0  # From inspection of the clinical_raw data\n",
    "    \n",
    "    # Process clinical features and extract trait information\n",
    "    if trait_row is not None:\n",
    "        clinical_df = clinical_raw.copy()\n",
    "        clinical_features = geo_select_clinical_features(\n",
    "            clinical_df, \n",
    "            trait=trait,\n",
    "            trait_row=trait_row,\n",
    "            convert_trait=convert_trait,\n",
    "            age_row=None,  # No age information available\n",
    "            convert_age=None,\n",
    "            gender_row=None,  # No gender information available\n",
    "            convert_gender=None\n",
    "        )\n",
    "        \n",
    "        # Transpose to get samples as rows\n",
    "        clinical_features = clinical_features.T\n",
    "        \n",
    "        # Save clinical features\n",
    "        os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n",
    "        clinical_features.to_csv(out_clinical_data_file)\n",
    "        print(f\"Clinical features saved to {out_clinical_data_file}\")\n",
    "        print(f\"Clinical features shape: {clinical_features.shape}\")\n",
    "    else:\n",
    "        print(\"No trait information available in clinical data\")\n",
    "        is_trait_available = False\n",
    "        clinical_features = pd.DataFrame()\n",
    "except Exception as e:\n",
    "    print(f\"Error processing clinical data: {e}\")\n",
    "    is_trait_available = False\n",
    "    clinical_features = pd.DataFrame()\n",
    "\n",
    "# 3. Link clinical and gene data if both are available\n",
    "if is_gene_available and is_trait_available and gene_data.shape[0] > 0 and clinical_features.shape[0] > 0:\n",
    "    try:\n",
    "        # Ensure gene data is formatted with genes as rows and samples as columns\n",
    "        gene_data.index.name = 'Gene'\n",
    "        \n",
    "        # Make sample IDs match between datasets\n",
    "        # In gene_data, the columns contain GSM IDs\n",
    "        # In clinical_features, the rows contain GSM IDs\n",
    "        common_samples = list(set(clinical_features.index) & set(gene_data.columns))\n",
    "        print(f\"Number of common samples between datasets: {len(common_samples)}\")\n",
    "        \n",
    "        if len(common_samples) == 0:\n",
    "            print(\"WARNING: No matching sample IDs between clinical and genetic data.\")\n",
    "            is_gene_available = False\n",
    "        else:\n",
    "            # Filter both datasets to include only common samples\n",
    "            clinical_subset = clinical_features.loc[common_samples]\n",
    "            gene_subset = gene_data[common_samples]\n",
    "            \n",
    "            # Transpose gene data to have samples as rows\n",
    "            gene_subset_t = gene_subset.T\n",
    "            \n",
    "            # Link the datasets\n",
    "            linked_data = pd.concat([clinical_subset, gene_subset_t], axis=1)\n",
    "            print(f\"Linked data shape: {linked_data.shape}\")\n",
    "            \n",
    "            # 4. Handle missing values in the linked data\n",
    "            linked_data = handle_missing_values(linked_data, trait)\n",
    "            print(f\"Data shape after handling missing values: {linked_data.shape}\")\n",
    "            \n",
    "            # 5. Determine if trait is biased\n",
    "            is_trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)\n",
    "    except Exception as e:\n",
    "        print(f\"Error linking data: {e}\")\n",
    "        is_trait_biased = True\n",
    "        linked_data = pd.DataFrame()\n",
    "else:\n",
    "    print(\"Cannot link data: gene or trait data unavailable\")\n",
    "    is_trait_biased = True\n",
    "    linked_data = pd.DataFrame()\n",
    "\n",
    "# 6. Make final determination about data usability\n",
    "note = \"Dataset contains kidney disease gene expression data. Processing encountered issues with gene ID mapping.\"\n",
    "is_usable = validate_and_save_cohort_info(\n",
    "    is_final=True, \n",
    "    cohort=cohort, \n",
    "    info_path=json_path, \n",
    "    is_gene_available=is_gene_available and gene_data.shape[0] > 0,\n",
    "    is_trait_available=is_trait_available and clinical_features.shape[0] > 0,\n",
    "    is_biased=is_trait_biased, \n",
    "    df=linked_data,\n",
    "    note=note\n",
    ")\n",
    "\n",
    "# 7. Save linked data if usable\n",
    "if is_usable and linked_data.shape[0] > 0:\n",
    "    os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n",
    "    linked_data.to_csv(out_data_file)\n",
    "    print(f\"Linked data saved to {out_data_file}\")\n",
    "else:\n",
    "    print(\"Data not saved due to quality issues\")"
   ]
  }
 ],
 "metadata": {},
 "nbformat": 4,
 "nbformat_minor": 5
}