File size: 6,382 Bytes
4144951
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
# Path Configuration
from tools.preprocess import *

# Processing context
trait = "Von_Hippel_Lindau"
cohort = "GSE33093"

# Input paths
in_trait_dir = "../DATA/GEO/Von_Hippel_Lindau"
in_cohort_dir = "../DATA/GEO/Von_Hippel_Lindau/GSE33093"

# Output paths
out_data_file = "./output/preprocess/3/Von_Hippel_Lindau/GSE33093.csv"
out_gene_data_file = "./output/preprocess/3/Von_Hippel_Lindau/gene_data/GSE33093.csv"
out_clinical_data_file = "./output/preprocess/3/Von_Hippel_Lindau/clinical_data/GSE33093.csv"
json_path = "./output/preprocess/3/Von_Hippel_Lindau/cohort_info.json"

# Get file paths
soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)

# Get background info and clinical data 
background_info, clinical_data = get_background_and_clinical_data(matrix_file_path)

# Print shape and first few rows to verify data
print("Background Information:")
print(background_info)
print("\nClinical Data Shape:", clinical_data.shape)
print("\nFirst few rows of Clinical Data:")
print(clinical_data.head())

print("\nSample Characteristics:")
# Get dictionary of unique values per row
unique_values_dict = get_unique_values_by_row(clinical_data)
for row, values in unique_values_dict.items():
    print(f"\n{row}:")
    print(values)
# 1. Gene Expression Data Availability
# Based on series description, this is a gene expression study
is_gene_available = True

# 2.1 Data Availability
# Looking at sample characteristics, no explicit trait/age/gender data found in the rows
# The data does not contain VHL status information needed for the trait
trait_row = None 
age_row = None
gender_row = None

# 2.2 Data Type Conversion Functions
def convert_trait(x):
    # No VHL status data available, function not used
    return None

def convert_age(x):
    # Age conversion function not used since data not available
    return None

def convert_gender(x):
    # Gender conversion function not used since data not available
    return None

# 3. Save Initial Metadata
is_trait_available = trait_row is not None
validate_and_save_cohort_info(
    is_final=False,
    cohort=cohort,
    info_path=json_path,
    is_gene_available=is_gene_available,
    is_trait_available=is_trait_available
)

# 4. Clinical Feature Extraction
# Skip since trait_row is None, indicating no clinical data available
# Extract gene expression data from matrix file
genetic_data = get_genetic_data(matrix_file_path)

# Print first 20 row IDs and shape of data
print("Shape of genetic data:", genetic_data.shape)
print("\nFirst 5 rows with sample columns:")
print(genetic_data.head())
print("\nFirst 20 gene/probe IDs:")
print(list(genetic_data.index[:20]))

# Print first few lines of raw matrix file to inspect format
print("\nFirst few lines of raw matrix file:")
with gzip.open(matrix_file_path, 'rt') as f:
    for i, line in enumerate(f):
        if i < 10:  # Print first 10 lines
            print(line.strip())
        elif "!series_matrix_table_begin" in line:
            print("\nFound table marker at line", i)
            # Print next 3 lines after marker
            for _ in range(3):
                print(next(f).strip())
            break
# The gene identifiers appear to be simple numeric indices (1,2,3...) rather than official gene symbols
# This indicates they are likely probe IDs that need to be mapped to gene symbols
requires_gene_mapping = True
# Extract gene annotation from SOFT file with adjusted prefix filtering
gene_annotation = get_gene_annotation(soft_file_path, prefixes=['!Platform_table_begin', '!Platform_table_end'])

# Preview both headers and first few rows
print("Gene annotation column names:")
print(gene_annotation.columns.tolist())

print("\nGene annotation preview:")
preview = preview_df(gene_annotation)
print(preview)

# Also check raw file content around platform table section
print("\nChecking raw SOFT file content around platform table:")
with gzip.open(soft_file_path, 'rt') as f:
    in_platform_table = False
    for i, line in enumerate(f):
        if '!Platform_table_begin' in line:
            print(f"\nFound table begin at line {i}:")
            in_platform_table = True
            # Print header and first few lines
            for _ in range(5):
                print(next(f).strip())
            break
# Since the SOFT file seems to have a non-standard format, let's examine the raw file first
platform_data_lines = []
with gzip.open(soft_file_path, 'rt') as f:
    in_platform_table = False
    for line in f:
        if '!Platform_table_begin' in line:
            in_platform_table = True
            continue
        elif '!Platform_table_end' in line:
            in_platform_table = False
            break
        elif in_platform_table:
            platform_data_lines.append(line.strip())

# Check if we got any platform data
if len(platform_data_lines) > 0:
    # Convert platform data to dataframe
    platform_data = pd.read_csv(io.StringIO('\n'.join(platform_data_lines)), sep='\t', low_memory=False)
    
    # Print columns to verify we have the platform data
    print("Platform data columns:", platform_data.columns.tolist())
    print("\nFirst few rows of platform data:")
    print(platform_data.head())
    
    # Create mapping and apply it if we have the required columns
    id_col = [col for col in platform_data.columns if 'ID' in col.upper()][0] if any('ID' in col.upper() for col in platform_data.columns) else None
    gene_col = [col for col in platform_data.columns if 'GENE' in col.upper() and 'SYMBOL' in col.upper()][0] if any('GENE' in col.upper() and 'SYMBOL' in col.upper() for col in platform_data.columns) else None
    
    if id_col and gene_col:
        mapping_df = get_gene_mapping(platform_data, prob_col=id_col, gene_col=gene_col)
        gene_data = apply_gene_mapping(genetic_data, mapping_df)
        
        # Save gene expression data
        if gene_data is not None:
            os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
            gene_data.to_csv(out_gene_data_file)
            print("\nGene data shape:", gene_data.shape)
            print("First few genes and their expression values:")
            print(gene_data.head())
    else:
        print("Could not identify ID and Gene Symbol columns in platform data")
        gene_data = None
else:
    print("Failed to extract platform table with probe-gene mappings")
    gene_data = None