File size: 6,797 Bytes
6f366b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Substance_Use_Disorder"
cohort = "GSE116833"
# Input paths
in_trait_dir = "../DATA/GEO/Substance_Use_Disorder"
in_cohort_dir = "../DATA/GEO/Substance_Use_Disorder/GSE116833"
# Output paths
out_data_file = "./output/preprocess/3/Substance_Use_Disorder/GSE116833.csv"
out_gene_data_file = "./output/preprocess/3/Substance_Use_Disorder/gene_data/GSE116833.csv"
out_clinical_data_file = "./output/preprocess/3/Substance_Use_Disorder/clinical_data/GSE116833.csv"
json_path = "./output/preprocess/3/Substance_Use_Disorder/cohort_info.json"
# Get file paths
soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)
# Get background info and clinical data
background_info, clinical_data = get_background_and_clinical_data(matrix_file_path)
print("Background Information:")
print(background_info)
print("\nSample Characteristics:")
# Get dictionary of unique values per row
unique_values_dict = get_unique_values_by_row(clinical_data)
for row, values in unique_values_dict.items():
print(f"\n{row}:")
print(values)
# 1. Gene Expression Data Availability
is_gene_available = True # The background info shows it used HumanHT-12 v4.0 Gene Expression BeadChip
# 2.1 Data Availability
# trait info can be found in key 0 showing anhedonia levels
trait_row = 0
# age info found in key 2
age_row = 2
# gender info found in key 1
gender_row = 1
# 2.2 Data Type Conversion
def convert_trait(x):
"""Convert anhedonia level to binary: high=1, low=0"""
if not isinstance(x, str):
return None
val = x.split(': ')[1].lower() if ': ' in x else x.lower()
if 'high' in val:
return 1
elif 'low' in val:
return 0
return None
def convert_age(x):
"""Convert age to continuous numeric value"""
if not isinstance(x, str):
return None
try:
return float(x.split(': ')[1])
except:
return None
def convert_gender(x):
"""Convert gender to binary: female=0, male=1"""
if not isinstance(x, str):
return None
val = x.split(': ')[1].lower() if ': ' in x else x.lower()
if 'female' in val:
return 0
elif 'male' in val:
return 1
return None
# 3. Save Metadata
# Initial filtering based on trait and gene data availability
is_trait_available = trait_row is not None
validate_and_save_cohort_info(is_final=False, cohort=cohort, info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available)
# 4. Clinical Feature Extraction
# Since trait_row is not None, we extract clinical features
clinical_df = geo_select_clinical_features(clinical_data, trait, trait_row, convert_trait,
age_row, convert_age,
gender_row, convert_gender)
# Preview the extracted features
preview_result = preview_df(clinical_df)
# Save clinical features to CSV
clinical_df.to_csv(out_clinical_data_file)
# Get gene expression data from matrix file
genetic_data = get_genetic_data(matrix_file_path)
# Examine data structure
print("Data structure and head:")
print(genetic_data.head())
print("\nShape:", genetic_data.shape)
print("\nFirst 20 row IDs (gene/probe identifiers):")
print(list(genetic_data.index)[:20])
# Get a few column names to verify sample IDs
print("\nFirst 5 column names:")
print(list(genetic_data.columns)[:5])
# ILMN_ prefix indicates these are Illumina array probe IDs, not gene symbols
# These need to be mapped to human gene symbols for analysis
requires_gene_mapping = True
# First inspect where platform data begins
print("Looking for platform data section:")
with gzip.open(soft_file_path, 'rt') as f:
for i, line in enumerate(f):
if "!Platform_table_begin" in line or "^PLATFORM" in line:
print(f"Found platform marker at line {i}:")
print(line.strip())
# Print next few lines to see format
for _ in range(5):
print(next(f).strip())
break
print("\nExtracting gene annotations...")
# Use library function to extract annotations
gene_annotation = get_gene_annotation(soft_file_path)
# Extract ID and Symbol columns which we need for mapping
mapping_df = gene_annotation[['ID', 'Symbol']].copy()
print("\nMapping data preview:")
preview = preview_df(mapping_df)
print(preview)
print("\nShape of mapping data:", mapping_df.shape)
print("Number of non-null Symbols:", mapping_df['Symbol'].count())
# Prepare mapping dataframe with correct column names
mapping_df = mapping_df.rename(columns={'Symbol': 'Gene'})
# Map probe IDs to gene symbols using predefined function
gene_data = apply_gene_mapping(genetic_data, mapping_df)
# Normalize gene symbols to their official HGNC names and combine rows with same gene symbol
gene_data = normalize_gene_symbols_in_index(gene_data)
# Save gene expression data
gene_data.to_csv(out_gene_data_file)
# Print shape and preview after mapping
print("Shape after mapping:", gene_data.shape)
print("\nPreview of first few rows after mapping:")
print(preview_df(gene_data))
# 1. Normalize gene symbols in gene expression data
gene_data = normalize_gene_symbols_in_index(gene_data)
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)
print("\nGene data shape (normalized gene-level):", gene_data.shape)
# Load clinical data previously processed
selected_clinical_df = pd.read_csv(out_clinical_data_file, index_col=0)
print("\nClinical data shape:", selected_clinical_df.shape)
# 2. Link clinical and genetic data using normalized gene-level data
linked_data = geo_link_clinical_genetic_data(selected_clinical_df, gene_data)
print("\nLinked data shape:", linked_data.shape)
# 3. Handle missing values systematically
if trait in linked_data.columns:
linked_data = handle_missing_values(linked_data, trait)
# 4. Check for bias in trait and demographic features
trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# 5. Final validation and information saving
note = "Data was successfully preprocessed from probe-level to gene-level expression using gene symbol normalization with NCBI Gene database."
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=trait_biased,
df=linked_data,
note=note
)
# 6. Save linked data only if usable and not biased
if is_usable and not trait_biased:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
linked_data.to_csv(out_data_file) |