Liu-Hy's picture
Add files using upload-large-folder tool
dd19378 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Adrenocortical_Cancer"
cohort = "GSE67766"
# Input paths
in_trait_dir = "../DATA/GEO/Adrenocortical_Cancer"
in_cohort_dir = "../DATA/GEO/Adrenocortical_Cancer/GSE67766"
# Output paths
out_data_file = "./output/preprocess/3/Adrenocortical_Cancer/GSE67766.csv"
out_gene_data_file = "./output/preprocess/3/Adrenocortical_Cancer/gene_data/GSE67766.csv"
out_clinical_data_file = "./output/preprocess/3/Adrenocortical_Cancer/clinical_data/GSE67766.csv"
json_path = "./output/preprocess/3/Adrenocortical_Cancer/cohort_info.json"
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# First extract background info and subseries info
background_info, _ = get_background_and_clinical_data(matrix_file,
prefixes_a=['!Series_title', '!Series_summary',
'!Series_overall_design', '!Series_type',
'!Series_relation'],
prefixes_b=None)
print("Initial Dataset Information:")
print(background_info)
print("\nChecking for subseries...\n")
# If SuperSeries, get the constituent series accession
subseries = None
if 'SuperSeries' in background_info:
for line in background_info.split('\n'):
if '!Series_relation\t' in line:
matches = re.finditer(r'GSE\d+', line)
for match in matches:
potential_subseries = match.group(0)
if potential_subseries != cohort: # Skip if it's the SuperSeries ID
subseries_dir = os.path.join(in_trait_dir, potential_subseries)
if os.path.exists(subseries_dir):
print(f"Found valid subseries: {potential_subseries}")
subseries = potential_subseries
break
# If subseries found, update directory path and get new files
if subseries:
in_cohort_dir = os.path.join(in_trait_dir, subseries)
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
print(f"\nUsing subseries data from: {in_cohort_dir}\n")
else:
print("\nNo valid subseries found, using original data\n")
# Extract background info and clinical data from final files
background_info, clinical_data = get_background_and_clinical_data(matrix_file)
# Get unique values per clinical feature
sample_characteristics = get_unique_values_by_row(clinical_data)
# Print final dataset information
print("Final Dataset Information:")
print(f"{background_info}\n")
print("Sample Characteristics:")
for feature, values in sample_characteristics.items():
print(f"Feature: {feature}")
print(f"Values: {values}\n")
# 1. Gene Expression Data Availability
# Based on !Series_type, which includes "Expression profiling by array" and "Expression profiling by high throughput sequencing"
# this dataset likely contains gene expression data
is_gene_available = True
# 2.1 Data Availability
# Looking at sample characteristics:
# Row 0 shows "cell line: SW-13" - this indicates cell line data, not clinical samples
# No rows for trait, age or gender found
trait_row = None
age_row = None
gender_row = None
# 2.2 Data Type Conversion Functions
# Although not used since data is unavailable, define placeholder functions
def convert_trait(x):
if x is None or pd.isna(x):
return None
value = str(x).split(':')[-1].strip()
# Binary conversion would go here
return None
def convert_age(x):
if x is None or pd.isna(x):
return None
value = str(x).split(':')[-1].strip()
# Numeric conversion would go here
return None
def convert_gender(x):
if x is None or pd.isna(x):
return None
value = str(x).split(':')[-1].strip().lower()
# Gender binary conversion would go here
return None
# 3. Save Metadata
# trait_row is None so is_trait_available is False
validate_and_save_cohort_info(is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=False)
# 4. Clinical Feature Extraction
# Skip since trait_row is None
# Extract gene expression data from matrix file
gene_data = get_genetic_data(matrix_file)
# Print first 20 row IDs and shape of data to help debug
print("Shape of gene expression data:", gene_data.shape)
print("\nFirst few rows of data:")
print(gene_data.head())
print("\nFirst 20 gene/probe identifiers:")
print(gene_data.index[:20])
# Inspect a snippet of raw file to verify identifier format
import gzip
with gzip.open(matrix_file, 'rt', encoding='utf-8') as f:
lines = []
for i, line in enumerate(f):
if "!series_matrix_table_begin" in line:
# Get the next 5 lines after the marker
for _ in range(5):
lines.append(next(f).strip())
break
print("\nFirst few lines after matrix marker in raw file:")
for line in lines:
print(line)
# Looking at the identifiers starting with "ILMN_", these are Illumina probe IDs
# They need to be mapped to official gene symbols to be interpretable in analysis
requires_gene_mapping = True
# Get file paths using library function
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract gene annotation from SOFT file and get meaningful data
gene_annotation = get_gene_annotation(soft_file)
# Preview gene annotation data
print("Gene annotation shape:", gene_annotation.shape)
print("\nGene annotation preview:")
print(preview_df(gene_annotation))
print("\nNumber of non-null values in each column:")
print(gene_annotation.count())
# Print example rows showing the mapping information columns
print("\nSample mapping columns ('ID' and 'Symbol'):")
print("\nFirst 5 rows:")
print(gene_annotation[['ID', 'Symbol']].head().to_string())
print("\nNote: Gene mapping will use:")
print("'ID' column: Probe identifiers")
print("'Symbol' column: Contains gene symbol information")
# 1. Based on previous output:
# Gene expression data uses 'ILMN_*' identifiers as index
# Gene annotation data has matching IDs in 'ID' column and gene symbols in 'Symbol' column
# 2. Extract mapping between probe IDs and gene symbols
mapping = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='Symbol')
# 3. Apply gene mapping to convert probe-level measurements to gene expression
gene_data = apply_gene_mapping(gene_data, mapping)
# Print info about the mapping results
print("Shape of probe-level data:", gene_data.shape)
print("\nShape after mapping to genes:", gene_data.shape)
print("\nFirst few rows of gene expression data:")
print(gene_data.head())
print("\nFirst few gene symbols:")
print(gene_data.index[:10])
# 1. Normalize and save gene expression data
gene_data = normalize_gene_symbols_in_index(gene_data)
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)
# 2-4. Skip clinical data linking and bias checking since no clinical data exists
# 5. Update cohort info to reflect dataset is not usable due to lack of trait data
validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=True, # Cell line data is considered biased for human trait analysis
df=gene_data, # Provide gene expression data
note="Dataset contains only cell line data (SW-13) without clinical information"
)
# 6. Skip saving linked data since dataset is not usable for trait analysis