File size: 8,024 Bytes
ba45cf6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Pancreatic_Cancer"
cohort = "GSE157494"
# Input paths
in_trait_dir = "../DATA/GEO/Pancreatic_Cancer"
in_cohort_dir = "../DATA/GEO/Pancreatic_Cancer/GSE157494"
# Output paths
out_data_file = "./output/preprocess/3/Pancreatic_Cancer/GSE157494.csv"
out_gene_data_file = "./output/preprocess/3/Pancreatic_Cancer/gene_data/GSE157494.csv"
out_clinical_data_file = "./output/preprocess/3/Pancreatic_Cancer/clinical_data/GSE157494.csv"
json_path = "./output/preprocess/3/Pancreatic_Cancer/cohort_info.json"
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract background info and clinical data
background_info, clinical_data = filter_content_by_prefix(matrix_file,
prefixes_a=['!Series_title', '!Series_summary', '!Series_overall_design'],
prefixes_b=['!Sample_geo_accession', '!Sample_characteristics_ch1'],
unselect=False,
source_type='file',
return_df_a=False,
return_df_b=True,
transpose=True)
# Get unique values per clinical feature
sample_characteristics = get_unique_values_by_row(clinical_data)
# Print background info
print("Dataset Background Information:")
print(f"{background_info}\n")
# Print sample characteristics
print("Sample Characteristics:")
for feature, values in sample_characteristics.items():
print(f"Feature: {feature}")
print(f"Values: {values}\n")
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract background info and clinical data
background_info, clinical_data = filter_content_by_prefix(
matrix_file,
prefixes_a=['!Series_title', '!Series_summary', '!Series_overall_design'],
prefixes_b=['!Sample_characteristics_ch'],
unselect=False,
source_type='file',
return_df_a=False,
return_df_b=True
)
# Get unique values per clinical feature
sample_characteristics = get_unique_values_by_row(clinical_data)
# Print background info
print("Dataset Background Information:")
print(f"{background_info}\n")
# Print sample characteristics
print("Sample Characteristics:")
for feature, values in sample_characteristics.items():
print(f"Feature: {feature}")
print(f"Values: {values}\n")
# 1. Gene Expression Data Availability
# Yes - the series summary mentions gene expression profiling with Affymetrix Gene Chip
is_gene_available = True
# 2. Variable Availability and Data Type Conversion
# Sample Characteristics output is empty, indicating no clinical data available
trait_row = None
age_row = None
gender_row = None
def convert_trait(x):
return None
def convert_age(x):
return None
def convert_gender(x):
return None
# 3. Save metadata
# Initial filtering - save info that this dataset has gene data but no clinical data
validate_and_save_cohort_info(is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=False)
# 4. Clinical Feature Extraction
# Skip since trait_row is None (no clinical data available)
# Extract gene expression data from matrix file
gene_data = get_genetic_data(matrix_file)
# Print first 20 row IDs and shape of data to help debug
print("Shape of gene expression data:", gene_data.shape)
print("\nFirst few rows of data:")
print(gene_data.head())
print("\nFirst 20 gene/probe identifiers:")
print(gene_data.index[:20])
# Inspect a snippet of raw file to verify identifier format
import gzip
with gzip.open(matrix_file, 'rt', encoding='utf-8') as f:
lines = []
for i, line in enumerate(f):
if "!series_matrix_table_begin" in line:
# Get the next 5 lines after the marker
for _ in range(5):
lines.append(next(f).strip())
break
print("\nFirst few lines after matrix marker in raw file:")
for line in lines:
print(line)
# Looking at the IDs (e.g. 1007_s_at, 1053_at), these are Affymetrix probe IDs
# from HG-U133_Plus_2 array platform, not gene symbols.
# They need to be mapped to human gene symbols for standardized analysis
requires_gene_mapping = True
# Get file paths using library function
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract gene annotation from SOFT file
gene_annotation = get_gene_annotation(soft_file)
# Preview gene annotation data
print("Gene annotation columns and example values:")
print(preview_df(gene_annotation))
# Looking at gene_data index ['1007_s_at', '1053_at', '117_at'...] and
# gene_annotation dictionary preview, 'ID' column contains probe IDs matching gene_data index,
# and 'Gene Symbol' column contains the gene symbols we need
# Create mapping between probe IDs and gene symbols
gene_mapping = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='Gene Symbol')
# Convert probe-level measurements to gene expression data
gene_data = apply_gene_mapping(gene_data, gene_mapping)
# Normalize gene symbols to standard format using synonym dictionary
gene_data = normalize_gene_symbols_in_index(gene_data)
# First get the gene expression data again
gene_data = get_genetic_data(matrix_file)
gene_mapping = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='Gene Symbol')
gene_data = apply_gene_mapping(gene_data, gene_mapping)
gene_data = normalize_gene_symbols_in_index(gene_data)
# Save normalized gene data
gene_data.to_csv(out_gene_data_file)
# Update cohort info - dataset unusable due to lack of clinical data
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=None,
df=None,
note="Gene expression data available but no clinical annotations/controls present."
)
# Get file paths using library function
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract gene annotation from SOFT file
gene_annotation = get_gene_annotation(soft_file)
# Preview gene annotation data
print("Gene annotation columns and example values:")
print(preview_df(gene_annotation))
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Get gene annotation first
gene_annotation = get_gene_annotation(soft_file)
# Get gene expression data
gene_data = get_genetic_data(matrix_file)
# Create mapping between probe IDs and gene symbols
gene_mapping = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='Gene Symbol')
# Convert probe-level measurements to gene expression data
gene_data = apply_gene_mapping(gene_data, gene_mapping)
# Print dimensions of result for verification
print(f"\nShape of gene expression data after mapping: {gene_data.shape}")
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Get gene annotation and expression data
gene_annotation = get_gene_annotation(soft_file)
gene_data = get_genetic_data(matrix_file)
# Create mapping and apply it
gene_mapping = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='Gene Symbol')
gene_data = apply_gene_mapping(gene_data, gene_mapping)
# Normalize gene symbols and save gene data
gene_data = normalize_gene_symbols_in_index(gene_data)
gene_data.to_csv(out_gene_data_file)
# Update cohort info - dataset unusable due to lack of clinical data
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=None,
df=None,
note="Gene expression data available but no clinical annotations/controls present."
) |