File size: 5,827 Bytes
5bd5338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Glioblastoma"
cohort = "GSE148949"

# Input paths
in_trait_dir = "../DATA/GEO/Glioblastoma"
in_cohort_dir = "../DATA/GEO/Glioblastoma/GSE148949"

# Output paths
out_data_file = "./output/preprocess/3/Glioblastoma/GSE148949.csv"
out_gene_data_file = "./output/preprocess/3/Glioblastoma/gene_data/GSE148949.csv"
out_clinical_data_file = "./output/preprocess/3/Glioblastoma/clinical_data/GSE148949.csv"
json_path = "./output/preprocess/3/Glioblastoma/cohort_info.json"

# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)

# Extract background info and clinical data 
background_info, clinical_data = get_background_and_clinical_data(matrix_file)

# Get unique values per clinical feature
sample_characteristics = get_unique_values_by_row(clinical_data)

# Print background info
print("Dataset Background Information:")
print(f"{background_info}\n")

# Print sample characteristics
print("Sample Characteristics:")
for feature, values in sample_characteristics.items():
    print(f"Feature: {feature}")
    print(f"Values: {values}\n")
# 1. Gene Expression Data Availability
is_gene_available = True  # This is a gene expression array dataset per Series_overall_design

# 2. Variable Availability and Data Type Conversion 
# 2.1 Data Availability
trait_row = None  # Not available as trait status in characteristics
age_row = None  # No age info
gender_row = None  # No gender info 

# 2.2 Data Type Conversion
# Since none of the clinical variables are available, we don't need conversion functions
convert_trait = None
convert_age = None  
convert_gender = None

# 3. Save Metadata
# Initial filtering based on data availability
validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=(trait_row is not None)
)

# 4. Clinical Feature Extraction
# Skip since trait_row is None
# Extract gene expression data from matrix file
gene_data = get_genetic_data(matrix_file)

# Print first 20 row IDs and shape of data to help debug
print("Shape of gene expression data:", gene_data.shape)
print("\nFirst few rows of data:")
print(gene_data.head())
print("\nFirst 20 gene/probe identifiers:")
print(gene_data.index[:20])

# Inspect a snippet of raw file to verify identifier format
import gzip
with gzip.open(matrix_file, 'rt', encoding='utf-8') as f:
    lines = []
    for i, line in enumerate(f):
        if "!series_matrix_table_begin" in line:
            # Get the next 5 lines after the marker
            for _ in range(5):
                lines.append(next(f).strip())
            break
print("\nFirst few lines after matrix marker in raw file:")
for line in lines:
    print(line)
# These identifiers appear to be probe IDs (numerical) rather than human gene symbols
# The first identifier "1/2-SBSRNA4" may be a gene symbol but most others are numeric IDs "41334", "41335" etc.
# For GEO expression data from microarray platforms, probe IDs typically need mapping to gene symbols
requires_gene_mapping = True
# Inspect the raw SOFT file to understand its structure
import gzip
print("Preview of SOFT file content:")
with gzip.open(soft_file, 'rt', encoding='utf-8') as f:
    for i, line in enumerate(f):
        if i < 20:  # Print first 20 lines
            print(line.strip())
        else:
            break

# Extract sections with platform annotation
platform_info = filter_content_by_prefix(soft_file,
                                       prefixes_a=['!Platform_'],  
                                       source_type='file',
                                       return_df_a=False)[0]

print("\nPlatform annotation preview:")
platform_lines = platform_info.split('\n')[:20]  # First 20 lines
for line in platform_lines:
    print(line)
# Extract gene annotation from SOFT file
gene_metadata = get_gene_annotation(soft_file)

# Print column names to identify relevant columns
print("Annotation columns:", gene_metadata.columns.tolist())

# Preview first few lines to verify data format 
print("\nAnnotation preview:")
print(gene_metadata.head())

# Extract probe ID and gene symbol columns for mapping
mapping_data = get_gene_mapping(gene_metadata, prob_col='ID', gene_col='ORF')

# Map probe IDs to gene symbols and aggregate expression values
gene_data = apply_gene_mapping(gene_data, mapping_data)

# Normalize gene symbols to standard nomenclature
gene_data = normalize_gene_symbols_in_index(gene_data)

# Print shape and preview to verify mapping results
print("\nShape of mapped gene expression data:", gene_data.shape)
print("\nFirst few rows of mapped data:")
print(gene_data.head())
# Save normalized gene data
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)

# Create empty clinical features since no trait data available
clinical_features = pd.DataFrame(index=[trait])

# Link clinical and genetic data (will be just genetic data with empty clinical features)
linked_data = geo_link_clinical_genetic_data(clinical_features, gene_data)

# Handle missing values
linked_data = handle_missing_values(linked_data, trait)

# Check for biases
is_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)

# Validate and save cohort info
is_usable = validate_and_save_cohort_info(
    is_final=True,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=True,
    is_trait_available=False,
    is_biased=is_biased,
    df=linked_data,
    note="Dataset contains gene expression data but no glioblastoma trait information"
)

# Save linked data if usable (will not save since trait data is missing)
if is_usable:
    os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
    linked_data.to_csv(out_data_file)