# Path Configuration from tools.preprocess import * # Processing context trait = "Sarcoma" cohort = "GSE118336" # Input paths in_trait_dir = "../DATA/GEO/Sarcoma" in_cohort_dir = "../DATA/GEO/Sarcoma/GSE118336" # Output paths out_data_file = "./output/preprocess/1/Sarcoma/GSE118336.csv" out_gene_data_file = "./output/preprocess/1/Sarcoma/gene_data/GSE118336.csv" out_clinical_data_file = "./output/preprocess/1/Sarcoma/clinical_data/GSE118336.csv" json_path = "./output/preprocess/1/Sarcoma/cohort_info.json" # STEP1 from tools.preprocess import * # 1. Identify the paths to the SOFT file and the matrix file soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir) # 2. Read the matrix file to obtain background information and sample characteristics data background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design'] clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1'] background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes) # 3. Obtain the sample characteristics dictionary from the clinical dataframe sample_characteristics_dict = get_unique_values_by_row(clinical_data) # 4. Explicitly print out all the background information and the sample characteristics dictionary print("Background Information:") print(background_info) print("Sample Characteristics Dictionary:") print(sample_characteristics_dict) # Step: Dataset Analysis and Clinical Feature Extraction # 1. Gene Expression Data Availability # The Series title indicates "HTA2.0 (human transcriptome array) analysis", which suggests # actual gene expression data is available (and not simply miRNA or methylation data). is_gene_available = True # 2. Variable Availability and Data Type Conversion # 2.1 Identify keys in the sample characteristics for each variable: trait_row = None # No mention of 'Sarcoma' or a relevant disease key in the dictionary. age_row = None # No age information found. gender_row = None # No gender information found. # 2.2 Define data-type conversion functions. Even though data is not available, # we provide them as placeholders. def convert_trait(x: str) -> int: # Not used because trait_row is None, but here's a placeholder function. # Convert to 'binary' if used, or return None for unknown. return None def convert_age(x: str) -> float: # Not used because age_row is None, placeholder function return None def convert_gender(x: str) -> int: # Not used because gender_row is None, placeholder function return None # 3. Save Metadata (initial filtering). # Trait data availability depends on trait_row. Since trait_row is None, is_trait_available=False. is_trait_available = False is_usable = validate_and_save_cohort_info( is_final=False, cohort=cohort, info_path=json_path, is_gene_available=is_gene_available, is_trait_available=is_trait_available ) # 4. Since trait_row is None, we skip clinical feature extraction and do not call geo_select_clinical_features. # STEP3 import gzip import pandas as pd try: # 1. Attempt to extract gene expression data using the library function gene_data = get_genetic_data(matrix_file) except KeyError: # Fallback: the expected "ID_REF" column may be absent, so manually parse the file # and rename the first column to "ID". marker = "!series_matrix_table_begin" skip_rows = None # Determine how many rows to skip before the matrix data begins with gzip.open(matrix_file, 'rt') as f: for i, line in enumerate(f): if marker in line: skip_rows = i + 1 break else: raise ValueError(f"Marker '{marker}' not found in the file.") # Read the data from the determined position gene_data = pd.read_csv( matrix_file, compression='gzip', skiprows=skip_rows, comment='!', delimiter='\t', on_bad_lines='skip' ) # If a different column name is used instead of 'ID_REF', rename appropriately if 'ID_REF' in gene_data.columns: gene_data.rename(columns={'ID_REF': 'ID'}, inplace=True) else: first_col = gene_data.columns[0] gene_data.rename(columns={first_col: 'ID'}, inplace=True) gene_data['ID'] = gene_data['ID'].astype(str) gene_data.set_index('ID', inplace=True) # 2. Print the first 20 row IDs (gene or probe identifiers) for future observation. print(gene_data.index[:20]) requires_gene_mapping = True # STEP5 # 1 & 2. Only extract and preview gene annotation data if the SOFT file exists, otherwise skip. if soft_file is None: print("No SOFT file found. Skipping gene annotation extraction.") gene_annotation = pd.DataFrame() else: try: # Attempt to extract gene annotation with the default method gene_annotation = get_gene_annotation(soft_file) except UnicodeDecodeError: # Fallback if UTF-8 decoding fails: read with a more lenient encoding and pass the content as a string import gzip with gzip.open(soft_file, 'rt', encoding='latin-1', errors='replace') as f: content = f.read() gene_annotation = filter_content_by_prefix( content, prefixes_a=['^','!','#'], unselect=True, source_type='string', return_df_a=True )[0] print("Gene annotation preview:") print(preview_df(gene_annotation)) # STEP 6: Gene Identifier Mapping # We'll attempt to map the probe-level data to gene symbols only if there's a genuine overlap # between the expression data indices and the annotation IDs. probe_column_candidates = ["ID", "probeset_id"] gene_symbol_column_candidates = ["gene_assignment", "mrna_assignment"] chosen_probe_col = None chosen_symbol_col = None # 1. Find a probe column with overlap for col in probe_column_candidates: if col in gene_annotation.columns: overlap = set(gene_annotation[col]) & set(gene_data.index) if len(overlap) > 0: chosen_probe_col = col break # 2. Pick a gene symbol column for col in gene_symbol_column_candidates: if col in gene_annotation.columns: chosen_symbol_col = col break # If none found, skip mapping if not chosen_probe_col or not chosen_symbol_col: print("No suitable probe or gene symbol columns found in the annotation. Skipping mapping.") else: # Build a preliminary mapping DataFrame mapping_df = get_gene_mapping( gene_annotation, prob_col=chosen_probe_col, gene_col=chosen_symbol_col ) # 3. Check for genuine overlap after dropping invalid entries mapped_ids = set(mapping_df["ID"].unique()) & set(gene_data.index) if len(mapped_ids) == 0: print("No overlapping probe IDs after cleaning. Skipping mapping.") else: # Proceed with the mapping since there is an actual overlap gene_data = apply_gene_mapping(gene_data, mapping_df) print("Gene-level mapping performed successfully.") print("Mapped gene_data shape:", gene_data.shape) print("First few gene symbols after mapping:", gene_data.index[:10].tolist()) import os import pandas as pd # STEP 7: Data Normalization and Linking # First, check if the clinical CSV file exists. If it does not, we cannot proceed with trait-based linking. if not os.path.exists(out_clinical_data_file): # No trait data file => dataset is not usable for trait analysis df_null = pd.DataFrame() is_biased = True # Arbitrary boolean to satisfy function requirement validate_and_save_cohort_info( is_final=True, cohort=cohort, info_path=json_path, is_gene_available=True, is_trait_available=False, is_biased=is_biased, df=df_null, note="No trait data file found; dataset not usable for trait analysis." ) else: # 1. Normalize the mapped gene expression data using known gene symbol synonyms, then save. normalized_gene_data = normalize_gene_symbols_in_index(gene_data) normalized_gene_data.to_csv(out_gene_data_file) # 2. Load the previously extracted clinical CSV. selected_clinical_df = pd.read_csv(out_clinical_data_file) # If we had a single-row trait, rename row 0 to the trait name (example usage). selected_clinical_df = selected_clinical_df.rename(index={0: trait}) # Combine these as our final clinical data; in this dataset, we only have trait info (if any). combined_clinical_df = selected_clinical_df # Link the clinical and genetic data by matching sample IDs in columns. linked_data = geo_link_clinical_genetic_data(combined_clinical_df, normalized_gene_data) # 3. Handle missing values in the linked data (drop incomplete rows/columns, then impute). processed_data = handle_missing_values(linked_data, trait) # 4. Check trait bias and remove any biased demographic features (if any). trait_biased, processed_data = judge_and_remove_biased_features(processed_data, trait) # 5. Final validation and metadata saving. is_usable = validate_and_save_cohort_info( is_final=True, cohort=cohort, info_path=json_path, is_gene_available=True, is_trait_available=True, is_biased=trait_biased, df=processed_data, note="Completed trait-based preprocessing." ) # 6. If final dataset is usable, save. Otherwise, skip. if is_usable: processed_data.to_csv(out_data_file)