Liu-Hy's picture
Add files using upload-large-folder tool
ba45cf6 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Pancreatic_Cancer"
cohort = "GSE130563"
# Input paths
in_trait_dir = "../DATA/GEO/Pancreatic_Cancer"
in_cohort_dir = "../DATA/GEO/Pancreatic_Cancer/GSE130563"
# Output paths
out_data_file = "./output/preprocess/3/Pancreatic_Cancer/GSE130563.csv"
out_gene_data_file = "./output/preprocess/3/Pancreatic_Cancer/gene_data/GSE130563.csv"
out_clinical_data_file = "./output/preprocess/3/Pancreatic_Cancer/clinical_data/GSE130563.csv"
json_path = "./output/preprocess/3/Pancreatic_Cancer/cohort_info.json"
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract background info and clinical data
background_info, clinical_data = get_background_and_clinical_data(matrix_file)
# Get unique values per clinical feature
sample_characteristics = get_unique_values_by_row(clinical_data)
# Print background info
print("Dataset Background Information:")
print(f"{background_info}\n")
# Print sample characteristics
print("Sample Characteristics:")
for feature, values in sample_characteristics.items():
print(f"Feature: {feature}")
print(f"Values: {values}\n")
# 1. Gene Expression Data Availability
# Yes - this is a microarray study analyzing transcriptional profiling data
is_gene_available = True
# 2. Variable Availability and Data Types
# 2.1 Data Availability
trait_row = 0 # Diagnosis info in row 0
age_row = 4 # Age info in row 4
gender_row = 1 # Sex info in row 1
# 2.2 Data Type Conversion Functions
def convert_trait(value: str) -> int:
"""Convert diagnosis info to binary: 1 for PDAC, 0 for non-cancer controls"""
if value is None or 'diagnosis:' not in value:
return None
diagnosis = value.split('diagnosis:')[1].strip().lower()
if 'pancreatic ductal adenocarcinoma' in diagnosis:
return 1
elif 'chronic pancreatitis' in diagnosis: # Excluded from analysis per background info
return None
else: # All other diagnoses are non-cancer controls
return 0
def convert_age(value: str) -> float:
"""Convert age to continuous value"""
if value is None or 'age:' not in value:
return None
try:
return float(value.split('age:')[1].strip())
except:
return None
def convert_gender(value: str) -> int:
"""Convert sex to binary: 0 for female, 1 for male"""
if value is None or 'Sex:' not in value:
return None
sex = value.split('Sex:')[1].strip().upper()
if sex == 'F':
return 0
elif sex == 'M':
return 1
return None
# 3. Save Metadata
is_trait_available = trait_row is not None
validate_and_save_cohort_info(is_final=False, cohort=cohort, info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available)
# 4. Clinical Feature Extraction
if trait_row is not None:
selected_clinical_df = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
# Preview the extracted features
print("Preview of extracted clinical features:")
print(preview_df(selected_clinical_df))
# Save to CSV
selected_clinical_df.to_csv(out_clinical_data_file)
# Extract gene expression data from matrix file
gene_data = get_genetic_data(matrix_file)
# Print first 20 row IDs and shape of data to help debug
print("Shape of gene expression data:", gene_data.shape)
print("\nFirst few rows of data:")
print(gene_data.head())
print("\nFirst 20 gene/probe identifiers:")
print(gene_data.index[:20])
# Inspect a snippet of raw file to verify identifier format
import gzip
with gzip.open(matrix_file, 'rt', encoding='utf-8') as f:
lines = []
for i, line in enumerate(f):
if "!series_matrix_table_begin" in line:
# Get the next 5 lines after the marker
for _ in range(5):
lines.append(next(f).strip())
break
print("\nFirst few lines after matrix marker in raw file:")
for line in lines:
print(line)
# The gene identifiers end with '_at', which is a characteristic format of Affymetrix
# microarray probe IDs rather than standard human gene symbols
requires_gene_mapping = True
# Get file paths using library function
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Let's inspect more of the raw SOFT file to find the annotation data
import gzip
start_line = "!platform_table_begin"
end_line = "!platform_table_end"
found_data = False
print("Sample of annotation data from SOFT file:")
with gzip.open(soft_file, 'rt', encoding='utf-8') as f:
for line in f:
if start_line in line:
found_data = True
# Skip the header line
next(f)
# Print first few lines of actual data
for _ in range(5):
print(next(f).strip())
break
# Extract gene annotation data - exclude metadata prefixes and keep data between platform table markers
gene_annotation = get_gene_annotation(soft_file)
# Preview annotation data
print("\nGene annotation columns and example values:")
print(preview_df(gene_annotation))
# Display column names to help identify relevant fields
print("\nAvailable columns:")
print(gene_annotation.columns.tolist())
# Since we can't access the proper gene symbol mapping file,
# let's look for gene annotation information in the SOFT file
import gzip
# Search for gene symbols in the SOFT file
found_symbols = False
gene_symbols = []
with gzip.open(soft_file, 'rt') as f:
for line in f:
# Look for platform table begin marker
if "!Platform_table_begin" in line:
headers = next(f).strip().split('\t')
# Find columns that might contain gene symbol information
symbol_cols = [i for i, h in enumerate(headers)
if 'symbol' in h.lower() or 'gene' in h.lower()]
if symbol_cols:
found_symbols = True
# Extract gene symbols from identified columns
for line in f:
if "!Platform_table_end" in line:
break
values = line.strip().split('\t')
for col in symbol_cols:
if col < len(values):
gene_symbols.append(values[col])
break
if found_symbols and len(gene_symbols) > 0:
# Create mapping using found gene symbols
unique_probes = gene_annotation['ID'].unique()
mapping_df = pd.DataFrame({
'ID': unique_probes,
'Gene': gene_symbols[:len(unique_probes)]
})
else:
# If no gene symbols found, create temporary mapping using probe IDs
# This allows pipeline to continue but indicates mapping needs to be updated
mapping_df = pd.DataFrame({
'ID': gene_annotation['ID'],
'Gene': gene_annotation['ID']
})
print("WARNING: No gene symbols found. Using probe IDs as temporary mapping.")
# Convert probe-level measurements to gene-level measurements
gene_data = apply_gene_mapping(gene_data, mapping_df)
print("Shape of gene expression data after mapping:", gene_data.shape)
print("\nPreview of mapped gene expression data:")
print(gene_data.head())
# 1. Skip normalization and use probe-level data since gene mapping failed
gene_data = get_genetic_data(matrix_file)
print("WARNING: Using probe IDs instead of gene symbols due to failed mapping")
gene_data.to_csv(out_gene_data_file)
# 2. Link clinical and genetic data and trait
selected_clinical = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
# Debug pre-linking
print("\nPre-linking data shapes:")
print("Clinical data shape:", selected_clinical.shape)
print("Gene data shape:", gene_data.shape)
print("\nClinical data preview:")
print(selected_clinical.head())
# Link the data
gene_data_t = gene_data.T
linked_data = pd.concat([selected_clinical.T, gene_data_t], axis=1)
# 3. Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# 4. Check for biased features
is_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# 5. Validate data quality and save metadata
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=is_biased,
df=linked_data,
note="Gene expression data from pancreatic cancer study. Using probe IDs instead of gene symbols."
)
# 6. Save if usable
if is_usable:
linked_data.to_csv(out_data_file)