File size: 9,825 Bytes
4144951 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 |
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Underweight"
cohort = "GSE84954"
# Input paths
in_trait_dir = "../DATA/GEO/Underweight"
in_cohort_dir = "../DATA/GEO/Underweight/GSE84954"
# Output paths
out_data_file = "./output/preprocess/3/Underweight/GSE84954.csv"
out_gene_data_file = "./output/preprocess/3/Underweight/gene_data/GSE84954.csv"
out_clinical_data_file = "./output/preprocess/3/Underweight/clinical_data/GSE84954.csv"
json_path = "./output/preprocess/3/Underweight/cohort_info.json"
# Get file paths
soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)
# Get background info and clinical data
background_info, clinical_data = get_background_and_clinical_data(matrix_file_path)
# Print shape and first few rows to verify data
print("Background Information:")
print(background_info)
print("\nClinical Data Shape:", clinical_data.shape)
print("\nFirst few rows of Clinical Data:")
print(clinical_data.head())
print("\nSample Characteristics:")
# Get dictionary of unique values per row
unique_values_dict = get_unique_values_by_row(clinical_data)
for row, values in unique_values_dict.items():
print(f"\n{row}:")
print(values)
# 1. Gene Expression Data Availability
# This is microarray data studying molecular pathways in tissues, so it should contain gene expression data
is_gene_available = True
# 2. Variable Availability and Data Type Conversion
# Trait - use chronic liver disease as trait indicator (disease status)
trait_row = 1 # Disease information in row 1
def convert_trait(value: str) -> Optional[int]:
"""Convert disease status to binary (0 for control, 1 for liver disease)"""
if not value or ':' not in value:
return None
value = value.split(':', 1)[1].strip()
if 'Crigler-Najjar' in value: # Control group
return 0
elif 'chronic liver disease' in value or 'Alagille' in value: # Disease group
return 1
return None
# Age - not available in sample characteristics
age_row = None
convert_age = None
# Gender - not available in sample characteristics
gender_row = None
convert_gender = None
# 3. Save metadata
# Trait data is available since trait_row is not None
is_trait_available = trait_row is not None
validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4. Extract clinical features since trait data is available
selected_clinical_df = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
# Preview the clinical data
print("Clinical Data Preview:")
print(preview_df(selected_clinical_df))
# Save clinical data
os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)
selected_clinical_df.to_csv(out_clinical_data_file)
# Extract gene expression data from matrix file
genetic_data = get_genetic_data(matrix_file_path)
# Print first 20 row IDs
print("First 20 gene/probe IDs:")
print(list(genetic_data.index[:20]))
# Analyzing gene identifiers
# The identifiers appear to be numeric probe IDs (16650001, 16650003 etc)
# These are not standard human gene symbols which are typically alphanumeric like 'BRCA1'
# They seem to be probe IDs from a microarray platform that need mapping to gene symbols
requires_gene_mapping = True
# Extract gene annotation from SOFT file
gene_annotation = get_gene_annotation(soft_file_path)
# Preview annotation structure
print("Gene annotation preview:")
print(preview_df(gene_annotation))
print("\nAll columns in annotation data:")
print(list(gene_annotation.columns))
# 1. Get metadata from SOFT file with correct pattern matching
metadata_pattern = r'!platform_table_begin\n(.*?)\n!platform_table_end'
with gzip.open(soft_file_path, 'rt') as f:
content = f.read()
matches = re.findall(metadata_pattern, content, re.DOTALL)
if matches:
platform_data = pd.read_csv(io.StringIO(matches[0]), sep='\t')
# Create mapping using platform data
mapping_data = platform_data[['ID', 'Gene Symbol']].copy()
mapping_data = mapping_data.dropna()
mapping_data = mapping_data.rename(columns={'Gene Symbol': 'Gene'})
# 3. Apply mapping to get gene expression data
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Preview the mapped gene data
print("\nFirst 10 rows of mapped gene expression data:")
print(preview_df(gene_data.head(10)))
# Save gene data
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)
else:
print("Could not find platform table in SOFT file")
# 1. Get metadata from SOFT file with correct pattern matching
# Need to look for lines with gene symbol information
platform_pattern = r'#ID = (.*?)\n(.*?)!platform_table_begin'
gene_pattern = r'#Gene_Symbol = (.*?)\n'
with gzip.open(soft_file_path, 'rt') as f:
content = f.read()
# Find and extract the platform GPL information section which contains annotation details
platform_matches = re.search(platform_pattern, content, re.DOTALL)
if platform_matches:
platform_section = platform_matches.group(2)
gene_matches = re.search(gene_pattern, platform_section)
if gene_matches:
# Create mapping dataframe with the proper gene symbol column
platform_data = pd.read_csv(io.StringIO(platform_matches.group(2)), sep='\t')
gene_col = gene_matches.group(1).strip()
mapping_data = platform_data[['ID', gene_col]].copy()
mapping_data = mapping_data.dropna()
mapping_data = mapping_data.rename(columns={gene_col: 'Gene'})
# Apply mapping to get gene expression data
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Preview the mapped gene expression data
print("\nFirst 10 rows of mapped gene expression data:")
print(preview_df(gene_data.head(10)))
# Save gene data
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)
else:
print("Could not find gene symbol column information in platform metadata")
else:
print("Could not find platform metadata section in SOFT file")
# Let's examine the SOFT file structure more carefully to find gene symbols
with gzip.open(soft_file_path, 'rt') as f:
platform_section = False
gene_mapping_lines = []
for line in f:
if line.startswith('!Platform_table_begin'):
platform_section = True
continue
elif line.startswith('!Platform_table_end'):
platform_section = False
continue
if platform_section:
gene_mapping_lines.append(line)
# Create mapping dataframe
mapping_data = pd.read_csv(io.StringIO(''.join(gene_mapping_lines)), sep='\t')
# Filter rows where gene symbol exists and is not empty
mapping_data = mapping_data[['ID', 'Symbol']].copy()
mapping_data = mapping_data.dropna(subset=['Symbol'])
mapping_data = mapping_data[mapping_data['Symbol'].str.strip() != '']
mapping_data = mapping_data.rename(columns={'Symbol': 'Gene'})
# Apply mapping to get gene-level data
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Normalize gene symbols using NCBI data
gene_data = normalize_gene_symbols_in_index(gene_data)
print("\nGene data shape (after normalization):", gene_data.shape)
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_data.to_csv(out_gene_data_file)
# Link clinical and genetic data
selected_clinical_df = pd.read_csv(out_clinical_data_file, index_col=0)
linked_data = geo_link_clinical_genetic_data(selected_clinical_df, gene_data)
# Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# Check for bias in features
is_trait_biased, linked_data = judge_and_remove_biased_features(linked_data, trait)
# Validate and save dataset metadata
note = "Dataset contains gene expression data from liver disease patients and controls, with proper mapping to standardized gene symbols."
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=is_trait_biased,
df=linked_data,
note=note
)
# Save linked data if usable
if is_usable:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
linked_data.to_csv(out_data_file)
# Get gene annotation data using the library function
gene_annotation = get_gene_annotation(soft_file_path)
# Create mapping using ID and Symbol columns
mapping_data = gene_annotation[['ID', 'Symbol']].copy()
mapping_data = mapping_data.dropna()
mapping_data = mapping_data.rename(columns={'Symbol': 'Gene'})
# Apply mapping to get gene expression data
gene_data = apply_gene_mapping(genetic_data, mapping_data)
# Preview the mapped gene expression data
print("\nFirst few rows of mapped gene expression data:")
print(preview_df(gene_data.head()))
# Get file paths
soft_file_path, matrix_file_path = geo_get_relevant_filepaths(in_cohort_dir)
# Get background info and clinical data
background_info, clinical_data = get_background_and_clinical_data(matrix_file_path)
# Print shape and first few rows to verify data
print("Background Information:")
print(background_info)
print("\nClinical Data Shape:", clinical_data.shape)
print("\nFirst few rows of Clinical Data:")
print(clinical_data.head())
print("\nSample Characteristics:")
# Get dictionary of unique values per row
unique_values_dict = get_unique_values_by_row(clinical_data)
for row, values in unique_values_dict.items():
print(f"\n{row}:")
print(values) |