File size: 7,922 Bytes
a0747da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Lung_Cancer"
cohort = "GSE249262"
# Input paths
in_trait_dir = "../DATA/GEO/Lung_Cancer"
in_cohort_dir = "../DATA/GEO/Lung_Cancer/GSE249262"
# Output paths
out_data_file = "./output/preprocess/3/Lung_Cancer/GSE249262.csv"
out_gene_data_file = "./output/preprocess/3/Lung_Cancer/gene_data/GSE249262.csv"
out_clinical_data_file = "./output/preprocess/3/Lung_Cancer/clinical_data/GSE249262.csv"
json_path = "./output/preprocess/3/Lung_Cancer/cohort_info.json"
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract background info and clinical data using specified prefixes
background_info, clinical_data = get_background_and_clinical_data(
matrix_file,
prefixes_a=['!Series_title', '!Series_summary', '!Series_overall_design'],
prefixes_b=['!Sample_geo_accession', '!Sample_characteristics_ch1']
)
# Get unique values per clinical feature
sample_characteristics = get_unique_values_by_row(clinical_data)
# Print background info
print("Dataset Background Information:")
print(f"{background_info}\n")
# Print sample characteristics
print("Sample Characteristics:")
for feature, values in sample_characteristics.items():
print(f"Feature: {feature}")
print(f"Values: {values}\n")
# 1. Gene Expression Data
is_gene_available = True # RNA microarray data is mentioned in background
# 2. Clinical Data Analysis
# For trait: Use status field (Feature 3) to determine disease progression
trait_row = 3
def convert_trait(x):
if not isinstance(x, str): return None
val = x.split(': ')[1] if ': ' in x else x
if 'progression' in val.lower():
return 1
elif 'stable' in val.lower():
return 0
return None
# Age and gender not available in characteristics
age_row = None
gender_row = None
convert_age = None
convert_gender = None
# 3. Save Initial Filtering Results
validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=(trait_row is not None)
)
# 4. Extract Clinical Features
if trait_row is not None:
selected_clinical = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
# Preview the processed data
print("Preview of processed clinical data:")
print(preview_df(selected_clinical))
# Save to CSV
selected_clinical.to_csv(out_clinical_data_file)
# Get file paths
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# Extract gene expression data from matrix file
gene_data = get_genetic_data(matrix_file)
# Print first 20 row IDs and shape of data to help debug
print("Shape of gene expression data:", gene_data.shape)
print("\nFirst few rows of data:")
print(gene_data.head())
print("\nFirst 20 gene/probe identifiers:")
print(gene_data.index[:20])
# Inspect a snippet of raw file to verify identifier format
import gzip
with gzip.open(matrix_file, 'rt', encoding='utf-8') as f:
lines = []
for i, line in enumerate(f):
if "!series_matrix_table_begin" in line:
# Get the next 5 lines after the marker
for _ in range(5):
lines.append(next(f).strip())
break
print("\nFirst few lines after matrix marker in raw file:")
for line in lines:
print(line)
# The identifiers appear to be numeric probe IDs (e.g. 23064070) rather than standard gene symbols
# Based on the ID format and my knowledge of microarray data, these are likely probe IDs that need
# mapping to gene symbols
requires_gene_mapping = True
# Extract gene annotation data
gene_metadata = get_gene_annotation(soft_file)
# Try searching for ID patterns in all columns
print("All column names:", gene_metadata.columns.tolist())
print("\nPreview first few rows of each column to locate numeric IDs:")
for col in gene_metadata.columns:
sample_values = gene_metadata[col].dropna().head().tolist()
print(f"\n{col}:")
print(sample_values)
# Inspect raw file to see unfiltered annotation format
import gzip
print("\nRaw SOFT file preview:")
with gzip.open(soft_file, 'rt', encoding='utf-8') as f:
header = []
for i, line in enumerate(f):
header.append(line.strip())
if i >= 10: # Preview first 10 lines
break
print('\n'.join(header))
# First inspect the platform table structure
with gzip.open(soft_file, 'rt', encoding='utf-8') as f:
for line in f:
if "!Platform_table_begin" in line:
print("Header:", next(f).strip())
print("First data row:", next(f).strip())
break
# Extract platform data with proper column headers
platform_rows = []
with gzip.open(soft_file, 'rt', encoding='utf-8') as f:
platform_found = False
for line in f:
if "!Platform_table_begin" in line:
platform_found = True
header = next(f).strip().split('\t')
continue
if platform_found:
if "!Platform_table_end" in line:
break
row = line.strip().split('\t')
if len(row) == len(header):
platform_rows.append(row)
platform_data = pd.DataFrame(platform_rows, columns=header)
print("\nAvailable columns:", platform_data.columns.tolist())
# Create mapping between probe IDs and gene symbols
mapping_df = pd.DataFrame()
id_col = [col for col in platform_data.columns if 'id' in col.lower()][0]
gene_col = [col for col in platform_data.columns if 'gene' in col.lower() or 'symbol' in col.lower()][0]
mapping_df['ID'] = platform_data[id_col]
mapping_df['Gene'] = platform_data[gene_col]
# Apply gene mapping to convert probe-level data to gene-level data
gene_data = apply_gene_mapping(expression_df=gene_data, mapping_df=mapping_df)
# Print info about the mapped data
print("\nShape of mapped gene expression data:", gene_data.shape)
print("\nFirst few gene symbols:", gene_data.index[:5].tolist())
# Extract gene annotation data
gene_metadata = get_gene_annotation(soft_file)
# Try searching for ID patterns in all columns
print("All column names:", gene_metadata.columns.tolist())
print("\nPreview first few rows of each column to locate numeric IDs:")
for col in gene_metadata.columns:
sample_values = gene_metadata[col].dropna().head().tolist()
print(f"\n{col}:")
print(sample_values)
# Inspect raw file to see unfiltered annotation format
import gzip
print("\nRaw SOFT file preview:")
with gzip.open(soft_file, 'rt', encoding='utf-8') as f:
header = []
for i, line in enumerate(f):
header.append(line.strip())
if i >= 10: # Preview first 10 lines
break
print('\n'.join(header))
# Since gene mapping failed in a previous step, we'll fall back to using probe IDs
# Load clinical data
selected_clinical_df = pd.read_csv(out_clinical_data_file, index_col=0)
# Save raw gene expression data with probe IDs
gene_data.to_csv(out_gene_data_file)
# Link clinical and genetic data
linked_data = geo_link_clinical_genetic_data(selected_clinical_df, gene_data)
# Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# Evaluate bias in features
is_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# Record cohort information
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=is_biased,
df=linked_data,
note="Contains numerical probe-level expression data and clinical data. Gene symbol mapping was not completed."
)
# Save linked data if usable
if is_usable:
linked_data.to_csv(out_data_file) |