Liu-Hy's picture
Add files using upload-large-folder tool
5a96bf0 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Psoriasis"
cohort = "GSE183134"
# Input paths
in_trait_dir = "../DATA/GEO/Psoriasis"
in_cohort_dir = "../DATA/GEO/Psoriasis/GSE183134"
# Output paths
out_data_file = "./output/preprocess/3/Psoriasis/GSE183134.csv"
out_gene_data_file = "./output/preprocess/3/Psoriasis/gene_data/GSE183134.csv"
out_clinical_data_file = "./output/preprocess/3/Psoriasis/clinical_data/GSE183134.csv"
json_path = "./output/preprocess/3/Psoriasis/cohort_info.json"
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract background info and clinical data using specified prefixes
background_info, clinical_data = get_background_and_clinical_data(
matrix_file,
prefixes_a=['!Series_title', '!Series_summary', '!Series_overall_design'],
prefixes_b=['!Sample_geo_accession', '!Sample_characteristics_ch1']
)
# Get unique values per clinical feature
sample_characteristics = get_unique_values_by_row(clinical_data)
# Print background info
print("Dataset Background Information:")
print(f"{background_info}\n")
# Print sample characteristics
print("Sample Characteristics:")
for feature, values in sample_characteristics.items():
print(f"Feature: {feature}")
print(f"Values: {values}\n")
# 1. Gene Expression Data Availability
# Yes, based on background info mentioning "gene expressions" and "microarray profiling"
is_gene_available = True
# 2. Data Availability and Type Conversion
# 2.1 Row identifiers:
trait_row = 1 # Disease state is in row 1
age_row = None # Age not available
gender_row = None # Gender not available
# 2.2 Conversion functions
def convert_trait(value: str) -> int:
"""Convert trait value to binary (0: not trait, 1: has trait)"""
if not isinstance(value, str):
return None
value = value.split(': ')[-1].strip()
if value == 'Psoriasis':
return 1
elif value == 'Pityriasis_Rubra_Pilaris':
return 0
return None
def convert_age(value: str) -> float:
"""Convert age value to float (Not used since age not available)"""
return None
def convert_gender(value: str) -> int:
"""Convert gender value to binary (Not used since gender not available)"""
return None
# 3. Save metadata
validate_and_save_cohort_info(is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=trait_row is not None)
# 4. Clinical Feature Extraction
# Since trait_row is not None, we extract clinical features
selected_clinical = geo_select_clinical_features(clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait)
# Preview extracted features
print("Preview of extracted clinical features:")
print(preview_df(selected_clinical))
# Save clinical data
os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)
selected_clinical.to_csv(out_clinical_data_file)
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract gene expression data from matrix file
gene_data = get_genetic_data(matrix_file)
# Print first 20 row IDs and shape of data to help debug
print("Shape of gene expression data:", gene_data.shape)
print("\nFirst few rows of data:")
print(gene_data.head())
print("\nFirst 20 gene/probe identifiers:")
print(gene_data.index[:20])
# Inspect a snippet of raw file to verify identifier format
import gzip
with gzip.open(matrix_file, 'rt', encoding='utf-8') as f:
lines = []
for i, line in enumerate(f):
if "!series_matrix_table_begin" in line:
# Get the next 5 lines after the marker
for _ in range(5):
lines.append(next(f).strip())
break
print("\nFirst few lines after matrix marker in raw file:")
for line in lines:
print(line)
# The gene identifiers appear to be in a format like "1-Dec", "1-Sep", "10-Mar"
# These are not standard human gene symbols and will need to be mapped
# This format suggests they are likely probe IDs from a microarray platform
requires_gene_mapping = True
# First inspect the SOFT file content to identify platform data section
import gzip
print("Preview of platform data section:")
with gzip.open(soft_file, 'rt', encoding='utf-8') as f:
in_platform_section = False
for line in f:
if '!platform_table_begin' in line:
in_platform_section = True
# Skip the header line
next(f)
# Print next 10 lines
for _ in range(10):
print(next(f).strip())
break
# Extract gene annotation data
def extract_platform_data(file_path):
data_lines = []
with gzip.open(file_path, 'rt') as f:
in_platform_section = False
for line in f:
if '!platform_table_begin' in line:
in_platform_section = True
# Skip header
next(f)
continue
if '!platform_table_end' in line:
break
if in_platform_section:
data_lines.append(line.strip())
# Convert to DataFrame
import io
df = pd.read_csv(io.StringIO('\n'.join(data_lines)), delimiter='\t')
return df
gene_metadata = extract_platform_data(soft_file)
# Preview the annotation data
print("\nColumn names:", gene_metadata.columns.tolist())
print("\nFirst few rows preview:")
print(preview_df(gene_metadata))
# Let's properly examine the SOFT file to find the probe ID to gene symbol mapping
print("Examining SOFT file content for probe mapping:")
with gzip.open(soft_file, 'rt', encoding='utf-8') as f:
in_platform_section = False
header_found = False
platform_data = []
for line in f:
if line.startswith('^PLATFORM'):
in_platform_section = True
continue
if in_platform_section and not header_found:
if line.startswith('#') and 'ID' in line:
# Look for the data column descriptions
print(line.strip())
header_found = True
if in_platform_section and line.startswith('!platform_table_begin'):
header = next(f).strip().split('\t')
# Read data lines until table end
for data_line in f:
if data_line.startswith('!platform_table_end'):
break
platform_data.append(data_line.strip().split('\t'))
break
# Convert platform data to DataFrame
gene_metadata = pd.DataFrame(platform_data, columns=header)
print("\nColumn names of platform annotation:", gene_metadata.columns.tolist())
print("\nFirst few rows of platform annotation:")
print(preview_df(gene_metadata))
# Look at the first few rows of actual expression data IDs to match format
print("\nExpression data IDs (first 5):", gene_data.index[:5].tolist())
# If we still can't find appropriate mapping columns, we'll need to use the expression
# data IDs directly as gene symbols (not ideal but prevents failure)
if 'Gene Symbol' in gene_metadata.columns:
mapping_df = get_gene_mapping(gene_metadata, prob_col='ID', gene_col='Gene Symbol')
else:
print("\nWARNING: Could not find gene symbol mapping in platform annotation.")
print("Using expression data IDs directly as gene symbols.")
# Create mapping dataframe using expression data IDs
mapping_df = pd.DataFrame({
'ID': gene_data.index,
'Gene': gene_data.index
})
# Apply mapping to convert probe-level data to gene-level data
gene_data = apply_gene_mapping(gene_data, mapping_df)
# Preview the mapped gene expression data
print("\nShape of gene expression data after mapping:", gene_data.shape)
print("\nFirst few genes and their expression values:")
print(gene_data.head())
# Normalize gene symbols using the library function
gene_data = normalize_gene_symbols_in_index(gene_data)
print("\nShape of gene expression data after normalization:", gene_data.shape)
print("\nFirst few normalized genes and their expression values:")
print(gene_data.head())
# Save the gene expression data
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)
# 1. Load clinical data
clinical_data = pd.read_csv(out_clinical_data_file, index_col=0)
# 2. Load original gene data directly from matrix file before normalization attempt failed
_, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
gene_data = get_genetic_data(matrix_file)
# Create simple mapping since platform annotation was incomplete
mapping_df = pd.DataFrame({
'ID': gene_data.index,
'Gene': gene_data.index # Use probe IDs as temporary gene names
})
# Convert to gene-level data
gene_data = apply_gene_mapping(gene_data, mapping_df)
# 3. Link clinical and genetic data
linked_data = geo_link_clinical_genetic_data(clinical_data, gene_data)
# 4. Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# 5. Check for biases in features
trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# 6. Validate and save cohort info
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=trait_biased,
df=linked_data,
note="Contains both gene expression data and clinical information. Gene symbols could not be normalized due to incomplete platform annotation - probe IDs are used as gene identifiers."
)
# 7. Save linked data if usable
if is_usable:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
linked_data.to_csv(out_data_file)