File size: 13,273 Bytes
1f52ac2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 |
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Melanoma"
cohort = "GSE157738"
# Input paths
in_trait_dir = "../DATA/GEO/Melanoma"
in_cohort_dir = "../DATA/GEO/Melanoma/GSE157738"
# Output paths
out_data_file = "./output/preprocess/3/Melanoma/GSE157738.csv"
out_gene_data_file = "./output/preprocess/3/Melanoma/gene_data/GSE157738.csv"
out_clinical_data_file = "./output/preprocess/3/Melanoma/clinical_data/GSE157738.csv"
json_path = "./output/preprocess/3/Melanoma/cohort_info.json"
# Get file paths for SOFT and matrix files
soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)
# Get background info and clinical data from the matrix file
background_info, clinical_data = get_background_and_clinical_data(matrix_file_path)
# Create dictionary of unique values for each feature
unique_values_dict = get_unique_values_by_row(clinical_data)
# Print the information
print("Dataset Background Information:")
print(background_info)
print("\nSample Characteristics:")
for feature, values in unique_values_dict.items():
print(f"\n{feature}:")
print(values)
# 1. Gene Expression Data Availability
# Yes, this is gene expression data from Affymetrix Human Gene 2.0 ST Array
is_gene_available = True
# 2.1 Data Availability
# Trait (clinical outcome) is available in row 4 with multiple values
trait_row = 4
# Age and gender data are not available in sample characteristics
age_row = None
gender_row = None
# 2.2 Data Type Conversion Functions
def convert_trait(x):
# Extract value after colon, strip whitespace
if not isinstance(x, str):
return None
value = x.split(':')[-1].strip()
# Convert clinical outcomes to binary
# NED (No Evidence of Disease) and PR (Partial Response) are positive outcomes
if value in ['NED1', 'NED2', 'PR']:
return 1
# PD (Progressive Disease) and SD (Stable Disease) are negative outcomes
elif value in ['PD', 'SD']:
return 0
return None
def convert_age(x):
return None # Age data not available
def convert_gender(x):
return None # Gender data not available
# 3. Save Metadata
is_trait_available = trait_row is not None
validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4. Clinical Feature Extraction
# Since trait_row is not None, we need to extract clinical features
selected_clinical = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
# Preview the processed clinical data
preview_result = preview_df(selected_clinical)
# Save clinical data
selected_clinical.to_csv(out_clinical_data_file)
# Extract genetic data matrix
genetic_data = get_genetic_data(matrix_file_path)
# Print first 20 row IDs to examine data type
print("First 20 row IDs:")
print(list(genetic_data.index)[:20])
# After examining the IDs and confirming this is gene expression data:
is_gene_available = True
# Save updated metadata
validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=(trait_row is not None)
)
genetic_data.to_csv(out_gene_data_file)
# These numerical IDs appear to be probe IDs, not standard human gene symbols
# They need to be mapped to their corresponding gene symbols for biological interpretation
requires_gene_mapping = True
# First let's examine a few lines from the SOFT file to identify the correct section
import gzip
with gzip.open(soft_file_path, 'rt') as f:
# Print first 100 lines to see file structure
for i, line in enumerate(f):
if i < 100: # Limit output to first 100 lines
if 'table_begin' in line.lower():
print(f"Found table marker at line {i}:")
print(line.strip())
else:
break
# Extract gene annotation with adjusted prefix filtering
gene_metadata = get_gene_annotation(soft_file_path, prefixes=['!Platform_table_begin', '!platform_table_end'])
# Preview to verify we got the annotation data
print("\nGene annotation columns and sample values:")
preview = preview_df(gene_metadata)
print(preview)
# Try loading annotation data with platform-related prefixes
import gzip
def parse_soft_file(file_path):
probe_to_gene = {}
within_platform = False
with gzip.open(file_path, 'rt') as f:
for line in f:
line = line.strip()
if line.startswith('!Platform_table_begin'):
within_platform = True
# Get header line
header = next(f).strip().split('\t')
id_idx = header.index('ID')
gene_idx = header.index('Gene Assignment')
continue
if within_platform:
if line.startswith('!Platform_table_end'):
break
fields = line.split('\t')
if len(fields) > max(id_idx, gene_idx):
probe_id = fields[id_idx]
gene_info = fields[gene_idx]
if gene_info != '---':
# Extract gene symbol from gene assignment string
# Format is typically: gene_id // gene_symbol // gene_name
gene_parts = gene_info.split('//')
if len(gene_parts) > 1:
gene_symbol = gene_parts[1].strip()
probe_to_gene[probe_id] = gene_symbol
# Convert to DataFrame
mapping_df = pd.DataFrame.from_dict(probe_to_gene.items())
mapping_df.columns = ['ID', 'Gene']
return mapping_df
# Get mapping between probe IDs and gene symbols
mapping_data = parse_soft_file(soft_file_path)
# Apply gene mapping to convert probe-level data to gene-level expression
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Preview mapped gene data
print("\nFirst few gene symbols:")
print(list(gene_data.index)[:10])
# Save gene expression data
gene_data.to_csv(out_gene_data_file)
# Parse SOFT file to get probe-to-gene mapping
def parse_soft_file(file_path):
probe_to_gene = []
within_platform = False
with gzip.open(file_path, 'rt') as f:
# Debug printing
print("First 10 lines of SOFT file:")
for i, line in enumerate(f):
if i < 10:
print(line.strip())
if i == 10:
break
f.seek(0) # Reset file pointer
for line in f:
line = line.strip()
if line.startswith('!Platform_table_begin'):
within_platform = True
# Print a few lines after table begin to confirm structure
print("\nPlatform table header:")
header = next(f).strip()
print(header)
header = header.split('\t')
try:
id_idx = header.index('ID')
gene_idx = header.index('Gene Symbol') # Try alternative column name
except ValueError:
# If first attempt fails, print all column names for debugging
print("\nAll column names found:")
print(header)
# Try other common variations
gene_idx = next((i for i, col in enumerate(header)
if 'gene' in col.lower() and 'symbol' in col.lower()), -1)
if gene_idx == -1:
raise ValueError("Could not find gene symbol column")
continue
if within_platform:
if line.startswith('!Platform_table_end'):
break
fields = line.split('\t')
if len(fields) > max(id_idx, gene_idx):
probe_id = fields[id_idx]
gene_symbol = fields[gene_idx]
if gene_symbol and gene_symbol != '---':
probe_to_gene.append([probe_id, gene_symbol])
mapping_df = pd.DataFrame(probe_to_gene, columns=['ID', 'Gene'])
print(f"\nFound {len(mapping_df)} probe-to-gene mappings")
return mapping_df
# Get mapping between probe IDs and gene symbols
mapping_data = parse_soft_file(soft_file_path)
# Apply gene mapping to convert probe-level data to gene-level expression
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Preview mapped gene data
print("\nFirst few gene symbols:")
print(list(gene_data.index)[:10])
# Save gene expression data
gene_data.to_csv(out_gene_data_file)
# Parse SOFT file with more debugging and flexible header parsing
def parse_soft_file(file_path):
probe_to_gene = []
within_platform = False
header_found = False
with gzip.open(file_path, 'rt') as f:
for line in f:
line = line.strip()
# Print sections looking for platform metadata
if line.startswith('^PLATFORM'):
within_platform = True
print(f"\nFound platform section: {line}")
continue
# After platform marker, look for the header line with probe metadata
if within_platform and not header_found and line.startswith('!Platform_var'):
header_line = line
print(f"\nPotential header line found: {header_line}")
if 'gene symbol' in header_line.lower():
print("Found gene symbol column info")
# Extract column name mapping if this line contains it
if ' = ' in line:
field_name = line.split(' = ')[1]
print(f"Field name: {field_name}")
if 'gene symbol' in field_name.lower():
gene_col = field_name
print(f"Gene column found: {gene_col}")
header_found = True
continue
# After finding header info, process data rows
if within_platform and header_found:
if line.startswith('#') or line.startswith('!'):
continue
fields = line.split('\t')
if len(fields) < 2:
continue
probe_id = fields[0]
# Look for gene symbol in likely positions
for field in fields[1:]:
if '//' in field: # Common format in GEO: geneID//geneSymbol//geneName
parts = field.split('//')
if len(parts) > 1:
gene_symbol = parts[1].strip()
if gene_symbol and gene_symbol not in ['---', '']:
probe_to_gene.append([probe_id, gene_symbol])
break
mapping_df = pd.DataFrame(probe_to_gene, columns=['ID', 'Gene'])
print(f"\nFound {len(mapping_df)} probe-to-gene mappings")
if len(mapping_df) > 0:
print("\nFirst few mappings:")
print(mapping_df.head())
return mapping_df
# Get mapping between probe IDs and gene symbols
print("Extracting probe-to-gene mappings from SOFT file...")
mapping_data = parse_soft_file(soft_file_path)
if len(mapping_data) > 0:
# Apply gene mapping to convert probe-level data to gene-level expression
print("\nConverting probe-level to gene-level expression data...")
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Save gene expression data
print("\nSaving gene expression data...")
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)
# Normalize and link data
gene_data = normalize_gene_symbols_in_index(gene_data)
clinical_features = pd.read_csv(out_clinical_data_file, index_col=0)
linked_data = geo_link_clinical_genetic_data(clinical_features, gene_data)
# Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# Judge bias in features and remove biased ones
trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# Final validation and save metadata
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=True,
is_biased=trait_biased,
df=linked_data,
note="Gene expression data from melanoma patients receiving PD-1 immunotherapy, with long-term benefit as outcome."
)
# Save linked data if usable
if is_usable:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
linked_data.to_csv(out_data_file)
else:
print("Failed to extract gene mappings. Cannot proceed with data processing.") |