Liu-Hy's picture
Add files using upload-large-folder tool
1f52ac2 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Melanoma"
cohort = "GSE157738"
# Input paths
in_trait_dir = "../DATA/GEO/Melanoma"
in_cohort_dir = "../DATA/GEO/Melanoma/GSE157738"
# Output paths
out_data_file = "./output/preprocess/3/Melanoma/GSE157738.csv"
out_gene_data_file = "./output/preprocess/3/Melanoma/gene_data/GSE157738.csv"
out_clinical_data_file = "./output/preprocess/3/Melanoma/clinical_data/GSE157738.csv"
json_path = "./output/preprocess/3/Melanoma/cohort_info.json"
# Get file paths for SOFT and matrix files
soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)
# Get background info and clinical data from the matrix file
background_info, clinical_data = get_background_and_clinical_data(matrix_file_path)
# Create dictionary of unique values for each feature
unique_values_dict = get_unique_values_by_row(clinical_data)
# Print the information
print("Dataset Background Information:")
print(background_info)
print("\nSample Characteristics:")
for feature, values in unique_values_dict.items():
print(f"\n{feature}:")
print(values)
# 1. Gene Expression Data Availability
# Yes, this is gene expression data from Affymetrix Human Gene 2.0 ST Array
is_gene_available = True
# 2.1 Data Availability
# Trait (clinical outcome) is available in row 4 with multiple values
trait_row = 4
# Age and gender data are not available in sample characteristics
age_row = None
gender_row = None
# 2.2 Data Type Conversion Functions
def convert_trait(x):
# Extract value after colon, strip whitespace
if not isinstance(x, str):
return None
value = x.split(':')[-1].strip()
# Convert clinical outcomes to binary
# NED (No Evidence of Disease) and PR (Partial Response) are positive outcomes
if value in ['NED1', 'NED2', 'PR']:
return 1
# PD (Progressive Disease) and SD (Stable Disease) are negative outcomes
elif value in ['PD', 'SD']:
return 0
return None
def convert_age(x):
return None # Age data not available
def convert_gender(x):
return None # Gender data not available
# 3. Save Metadata
is_trait_available = trait_row is not None
validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4. Clinical Feature Extraction
# Since trait_row is not None, we need to extract clinical features
selected_clinical = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
# Preview the processed clinical data
preview_result = preview_df(selected_clinical)
# Save clinical data
selected_clinical.to_csv(out_clinical_data_file)
# Extract genetic data matrix
genetic_data = get_genetic_data(matrix_file_path)
# Print first 20 row IDs to examine data type
print("First 20 row IDs:")
print(list(genetic_data.index)[:20])
# After examining the IDs and confirming this is gene expression data:
is_gene_available = True
# Save updated metadata
validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=(trait_row is not None)
)
genetic_data.to_csv(out_gene_data_file)
# These numerical IDs appear to be probe IDs, not standard human gene symbols
# They need to be mapped to their corresponding gene symbols for biological interpretation
requires_gene_mapping = True
# First let's examine a few lines from the SOFT file to identify the correct section
import gzip
with gzip.open(soft_file_path, 'rt') as f:
# Print first 100 lines to see file structure
for i, line in enumerate(f):
if i < 100: # Limit output to first 100 lines
if 'table_begin' in line.lower():
print(f"Found table marker at line {i}:")
print(line.strip())
else:
break
# Extract gene annotation with adjusted prefix filtering
gene_metadata = get_gene_annotation(soft_file_path, prefixes=['!Platform_table_begin', '!platform_table_end'])
# Preview to verify we got the annotation data
print("\nGene annotation columns and sample values:")
preview = preview_df(gene_metadata)
print(preview)
# Try loading annotation data with platform-related prefixes
import gzip
def parse_soft_file(file_path):
probe_to_gene = {}
within_platform = False
with gzip.open(file_path, 'rt') as f:
for line in f:
line = line.strip()
if line.startswith('!Platform_table_begin'):
within_platform = True
# Get header line
header = next(f).strip().split('\t')
id_idx = header.index('ID')
gene_idx = header.index('Gene Assignment')
continue
if within_platform:
if line.startswith('!Platform_table_end'):
break
fields = line.split('\t')
if len(fields) > max(id_idx, gene_idx):
probe_id = fields[id_idx]
gene_info = fields[gene_idx]
if gene_info != '---':
# Extract gene symbol from gene assignment string
# Format is typically: gene_id // gene_symbol // gene_name
gene_parts = gene_info.split('//')
if len(gene_parts) > 1:
gene_symbol = gene_parts[1].strip()
probe_to_gene[probe_id] = gene_symbol
# Convert to DataFrame
mapping_df = pd.DataFrame.from_dict(probe_to_gene.items())
mapping_df.columns = ['ID', 'Gene']
return mapping_df
# Get mapping between probe IDs and gene symbols
mapping_data = parse_soft_file(soft_file_path)
# Apply gene mapping to convert probe-level data to gene-level expression
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Preview mapped gene data
print("\nFirst few gene symbols:")
print(list(gene_data.index)[:10])
# Save gene expression data
gene_data.to_csv(out_gene_data_file)
# Parse SOFT file to get probe-to-gene mapping
def parse_soft_file(file_path):
probe_to_gene = []
within_platform = False
with gzip.open(file_path, 'rt') as f:
# Debug printing
print("First 10 lines of SOFT file:")
for i, line in enumerate(f):
if i < 10:
print(line.strip())
if i == 10:
break
f.seek(0) # Reset file pointer
for line in f:
line = line.strip()
if line.startswith('!Platform_table_begin'):
within_platform = True
# Print a few lines after table begin to confirm structure
print("\nPlatform table header:")
header = next(f).strip()
print(header)
header = header.split('\t')
try:
id_idx = header.index('ID')
gene_idx = header.index('Gene Symbol') # Try alternative column name
except ValueError:
# If first attempt fails, print all column names for debugging
print("\nAll column names found:")
print(header)
# Try other common variations
gene_idx = next((i for i, col in enumerate(header)
if 'gene' in col.lower() and 'symbol' in col.lower()), -1)
if gene_idx == -1:
raise ValueError("Could not find gene symbol column")
continue
if within_platform:
if line.startswith('!Platform_table_end'):
break
fields = line.split('\t')
if len(fields) > max(id_idx, gene_idx):
probe_id = fields[id_idx]
gene_symbol = fields[gene_idx]
if gene_symbol and gene_symbol != '---':
probe_to_gene.append([probe_id, gene_symbol])
mapping_df = pd.DataFrame(probe_to_gene, columns=['ID', 'Gene'])
print(f"\nFound {len(mapping_df)} probe-to-gene mappings")
return mapping_df
# Get mapping between probe IDs and gene symbols
mapping_data = parse_soft_file(soft_file_path)
# Apply gene mapping to convert probe-level data to gene-level expression
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Preview mapped gene data
print("\nFirst few gene symbols:")
print(list(gene_data.index)[:10])
# Save gene expression data
gene_data.to_csv(out_gene_data_file)
# Parse SOFT file with more debugging and flexible header parsing
def parse_soft_file(file_path):
probe_to_gene = []
within_platform = False
header_found = False
with gzip.open(file_path, 'rt') as f:
for line in f:
line = line.strip()
# Print sections looking for platform metadata
if line.startswith('^PLATFORM'):
within_platform = True
print(f"\nFound platform section: {line}")
continue
# After platform marker, look for the header line with probe metadata
if within_platform and not header_found and line.startswith('!Platform_var'):
header_line = line
print(f"\nPotential header line found: {header_line}")
if 'gene symbol' in header_line.lower():
print("Found gene symbol column info")
# Extract column name mapping if this line contains it
if ' = ' in line:
field_name = line.split(' = ')[1]
print(f"Field name: {field_name}")
if 'gene symbol' in field_name.lower():
gene_col = field_name
print(f"Gene column found: {gene_col}")
header_found = True
continue
# After finding header info, process data rows
if within_platform and header_found:
if line.startswith('#') or line.startswith('!'):
continue
fields = line.split('\t')
if len(fields) < 2:
continue
probe_id = fields[0]
# Look for gene symbol in likely positions
for field in fields[1:]:
if '//' in field: # Common format in GEO: geneID//geneSymbol//geneName
parts = field.split('//')
if len(parts) > 1:
gene_symbol = parts[1].strip()
if gene_symbol and gene_symbol not in ['---', '']:
probe_to_gene.append([probe_id, gene_symbol])
break
mapping_df = pd.DataFrame(probe_to_gene, columns=['ID', 'Gene'])
print(f"\nFound {len(mapping_df)} probe-to-gene mappings")
if len(mapping_df) > 0:
print("\nFirst few mappings:")
print(mapping_df.head())
return mapping_df
# Get mapping between probe IDs and gene symbols
print("Extracting probe-to-gene mappings from SOFT file...")
mapping_data = parse_soft_file(soft_file_path)
if len(mapping_data) > 0:
# Apply gene mapping to convert probe-level data to gene-level expression
print("\nConverting probe-level to gene-level expression data...")
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Save gene expression data
print("\nSaving gene expression data...")
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)
# Normalize and link data
gene_data = normalize_gene_symbols_in_index(gene_data)
clinical_features = pd.read_csv(out_clinical_data_file, index_col=0)
linked_data = geo_link_clinical_genetic_data(clinical_features, gene_data)
# Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# Judge bias in features and remove biased ones
trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# Final validation and save metadata
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=True,
is_biased=trait_biased,
df=linked_data,
note="Gene expression data from melanoma patients receiving PD-1 immunotherapy, with long-term benefit as outcome."
)
# Save linked data if usable
if is_usable:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
linked_data.to_csv(out_data_file)
else:
print("Failed to extract gene mappings. Cannot proceed with data processing.")