# --- START OF FILE app.py --- import json import gradio as gr import pandas as pd import plotly.express as px import os import numpy as np import duckdb from tqdm.auto import tqdm # Standard tqdm for console, gr.Progress will track it import time import ast # For safely evaluating string representations of lists/dicts # --- Constants --- MODEL_SIZE_RANGES = { "Small (<1GB)": (0, 1), "Medium (1-5GB)": (1, 5), "Large (5-20GB)": (5, 20), "X-Large (20-50GB)": (20, 50), "XX-Large (>50GB)": (50, float('inf')) } PROCESSED_PARQUET_FILE_PATH = "models_processed.parquet" HF_PARQUET_URL = 'https://huggingface.co/datasets/cfahlgren1/hub-stats/resolve/main/models.parquet' # Added for completeness within app.py context TAG_FILTER_CHOICES = [ "Audio & Speech", "Time series", "Robotics", "Music", "Video", "Images", "Text", "Biomedical", "Sciences" ] PIPELINE_TAGS = [ 'text-generation', 'text-to-image', 'text-classification', 'text2text-generation', 'audio-to-audio', 'feature-extraction', 'image-classification', 'translation', 'reinforcement-learning', 'fill-mask', 'text-to-speech', 'automatic-speech-recognition', 'image-text-to-text', 'token-classification', 'sentence-similarity', 'question-answering', 'image-feature-extraction', 'summarization', 'zero-shot-image-classification', 'object-detection', 'image-segmentation', 'image-to-image', 'image-to-text', 'audio-classification', 'visual-question-answering', 'text-to-video', 'zero-shot-classification', 'depth-estimation', 'text-ranking', 'image-to-video', 'multiple-choice', 'unconditional-image-generation', 'video-classification', 'text-to-audio', 'time-series-forecasting', 'any-to-any', 'video-text-to-text', 'table-question-answering', ] def extract_model_size(safetensors_data): try: if pd.isna(safetensors_data): return 0.0 data_to_parse = safetensors_data if isinstance(safetensors_data, str): try: if (safetensors_data.startswith('{') and safetensors_data.endswith('}')) or \ (safetensors_data.startswith('[') and safetensors_data.endswith(']')): data_to_parse = ast.literal_eval(safetensors_data) else: data_to_parse = json.loads(safetensors_data) except: return 0.0 if isinstance(data_to_parse, dict) and 'total' in data_to_parse: try: total_bytes_val = data_to_parse['total'] size_bytes = float(total_bytes_val) return size_bytes / (1024 * 1024 * 1024) except (ValueError, TypeError): pass return 0.0 except: return 0.0 def extract_org_from_id(model_id): if pd.isna(model_id): return "unaffiliated" model_id_str = str(model_id) return model_id_str.split("/")[0] if "/" in model_id_str else "unaffiliated" def process_tags_for_series(series_of_tags_values): processed_tags_accumulator = [] for i, tags_value_from_series in enumerate(tqdm(series_of_tags_values, desc="Standardizing Tags", leave=False, unit="row")): temp_processed_list_for_row = [] current_value_for_error_msg = str(tags_value_from_series)[:200] # Truncate for long error messages try: # Order of checks is important! # 1. Handle explicit Python lists first if isinstance(tags_value_from_series, list): current_tags_in_list = [] for idx_tag, tag_item in enumerate(tags_value_from_series): try: # Ensure item is not NaN before string conversion if it might be a float NaN in a list if pd.isna(tag_item): continue str_tag = str(tag_item) stripped_tag = str_tag.strip() if stripped_tag: current_tags_in_list.append(stripped_tag) except Exception as e_inner_list_proc: print(f"ERROR processing item '{tag_item}' (type: {type(tag_item)}) within a list for row {i}. Error: {e_inner_list_proc}. Original list: {current_value_for_error_msg}") temp_processed_list_for_row = current_tags_in_list # 2. Handle NumPy arrays elif isinstance(tags_value_from_series, np.ndarray): # Convert to list, then process elements, handling potential NaNs within the array current_tags_in_list = [] for idx_tag, tag_item in enumerate(tags_value_from_series.tolist()): # .tolist() is crucial try: if pd.isna(tag_item): continue # Check for NaN after converting to Python type str_tag = str(tag_item) stripped_tag = str_tag.strip() if stripped_tag: current_tags_in_list.append(stripped_tag) except Exception as e_inner_array_proc: print(f"ERROR processing item '{tag_item}' (type: {type(tag_item)}) within a NumPy array for row {i}. Error: {e_inner_array_proc}. Original array: {current_value_for_error_msg}") temp_processed_list_for_row = current_tags_in_list # 3. Handle simple None or pd.NA after lists and arrays (which might contain pd.NA elements handled above) elif tags_value_from_series is None or pd.isna(tags_value_from_series): # Now pd.isna is safe for scalars temp_processed_list_for_row = [] # 4. Handle strings (could be JSON-like, list-like, or comma-separated) elif isinstance(tags_value_from_series, str): processed_str_tags = [] # Attempt ast.literal_eval for strings that look like lists/tuples if (tags_value_from_series.startswith('[') and tags_value_from_series.endswith(']')) or \ (tags_value_from_series.startswith('(') and tags_value_from_series.endswith(')')): try: evaluated_tags = ast.literal_eval(tags_value_from_series) if isinstance(evaluated_tags, (list, tuple)): # Check if eval result is a list/tuple # Recursively process this evaluated list/tuple, as its elements could be complex # For simplicity here, assume elements are simple strings after eval current_eval_list = [] for tag_item in evaluated_tags: if pd.isna(tag_item): continue str_tag = str(tag_item).strip() if str_tag: current_eval_list.append(str_tag) processed_str_tags = current_eval_list except (ValueError, SyntaxError): pass # If ast.literal_eval fails, let it fall to JSON or comma split # If ast.literal_eval didn't populate, try JSON if not processed_str_tags: try: json_tags = json.loads(tags_value_from_series) if isinstance(json_tags, list): # Similar to above, assume elements are simple strings after JSON parsing current_json_list = [] for tag_item in json_tags: if pd.isna(tag_item): continue str_tag = str(tag_item).strip() if str_tag: current_json_list.append(str_tag) processed_str_tags = current_json_list except json.JSONDecodeError: # If not a valid JSON list, fall back to comma splitting as the final string strategy processed_str_tags = [tag.strip() for tag in tags_value_from_series.split(',') if tag.strip()] except Exception as e_json_other: print(f"ERROR during JSON processing for string '{current_value_for_error_msg}' for row {i}. Error: {e_json_other}") processed_str_tags = [tag.strip() for tag in tags_value_from_series.split(',') if tag.strip()] # Fallback temp_processed_list_for_row = processed_str_tags # 5. Fallback for other scalar types (e.g., int, float that are not NaN) else: # This path is for non-list, non-ndarray, non-None/NaN, non-string types. # Or for NaNs that slipped through if they are not None or pd.NA (e.g. float('nan')) if pd.isna(tags_value_from_series): # Catch any remaining NaNs like float('nan') temp_processed_list_for_row = [] else: str_val = str(tags_value_from_series).strip() temp_processed_list_for_row = [str_val] if str_val else [] processed_tags_accumulator.append(temp_processed_list_for_row) except Exception as e_outer_tag_proc: print(f"CRITICAL UNHANDLED ERROR processing row {i}: value '{current_value_for_error_msg}' (type: {type(tags_value_from_series)}). Error: {e_outer_tag_proc}. Appending [].") processed_tags_accumulator.append([]) return processed_tags_accumulator def load_models_data(force_refresh=False, tqdm_cls=None): if tqdm_cls is None: tqdm_cls = tqdm overall_start_time = time.time() print(f"Gradio load_models_data called with force_refresh={force_refresh}") expected_cols_in_processed_parquet = [ 'id', 'downloads', 'downloadsAllTime', 'likes', 'pipeline_tag', 'tags', 'params', 'size_category', 'organization', 'has_audio', 'has_speech', 'has_music', 'has_robot', 'has_bio', 'has_med', 'has_series', 'has_video', 'has_image', 'has_text', 'has_science', 'is_audio_speech', 'is_biomed', 'data_download_timestamp' ] if not force_refresh and os.path.exists(PROCESSED_PARQUET_FILE_PATH): print(f"Attempting to load pre-processed data from: {PROCESSED_PARQUET_FILE_PATH}") try: df = pd.read_parquet(PROCESSED_PARQUET_FILE_PATH) elapsed = time.time() - overall_start_time missing_cols = [col for col in expected_cols_in_processed_parquet if col not in df.columns] if missing_cols: raise ValueError(f"Pre-processed Parquet is missing columns: {missing_cols}. Please run preprocessor or refresh data in app.") # --- Diagnostic for 'has_robot' after loading parquet --- if 'has_robot' in df.columns: robot_count_parquet = df['has_robot'].sum() print(f"DIAGNOSTIC (App - Parquet Load): 'has_robot' column found. Number of True values: {robot_count_parquet}") if 0 < robot_count_parquet < 10: print(f"Sample 'has_robot' models (from parquet): {df[df['has_robot']]['id'].head().tolist()}") else: print("DIAGNOSTIC (App - Parquet Load): 'has_robot' column NOT FOUND.") # --- End Diagnostic --- msg = f"Successfully loaded pre-processed data in {elapsed:.2f}s. Shape: {df.shape}" print(msg) return df, True, msg except Exception as e: print(f"Could not load pre-processed Parquet: {e}. ") if force_refresh: print("Proceeding to fetch fresh data as force_refresh=True.") else: err_msg = (f"Pre-processed data could not be loaded: {e}. " "Please use 'Refresh Data from Hugging Face' button.") return pd.DataFrame(), False, err_msg df_raw = None raw_data_source_msg = "" if force_refresh: print("force_refresh=True (Gradio). Fetching fresh data...") fetch_start = time.time() try: query = f"SELECT * FROM read_parquet('{HF_PARQUET_URL}')" # Ensure HF_PARQUET_URL is defined df_raw = duckdb.sql(query).df() if df_raw is None or df_raw.empty: raise ValueError("Fetched data is empty or None.") raw_data_source_msg = f"Fetched by Gradio in {time.time() - fetch_start:.2f}s. Rows: {len(df_raw)}" print(raw_data_source_msg) except Exception as e_hf: return pd.DataFrame(), False, f"Fatal error fetching from Hugging Face (Gradio): {e_hf}" else: err_msg = (f"Pre-processed data '{PROCESSED_PARQUET_FILE_PATH}' not found/invalid. " "Run preprocessor or use 'Refresh Data' button.") return pd.DataFrame(), False, err_msg print(f"Initiating processing for data newly fetched by Gradio. {raw_data_source_msg}") df = pd.DataFrame() proc_start = time.time() core_cols = {'id': str, 'downloads': float, 'downloadsAllTime': float, 'likes': float, 'pipeline_tag': str, 'tags': object, 'safetensors': object} for col, dtype in core_cols.items(): if col in df_raw.columns: df[col] = df_raw[col] if dtype == float: df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0.0) elif dtype == str: df[col] = df[col].astype(str).fillna('') else: if col in ['downloads', 'downloadsAllTime', 'likes']: df[col] = 0.0 elif col == 'pipeline_tag': df[col] = '' elif col == 'tags': df[col] = pd.Series([[] for _ in range(len(df_raw))]) elif col == 'safetensors': df[col] = None elif col == 'id': return pd.DataFrame(), False, "Critical: 'id' column missing." output_filesize_col_name = 'params' if output_filesize_col_name in df_raw.columns and pd.api.types.is_numeric_dtype(df_raw[output_filesize_col_name]): df[output_filesize_col_name] = pd.to_numeric(df_raw[output_filesize_col_name], errors='coerce').fillna(0.0) elif 'safetensors' in df.columns: safetensors_iter = df['safetensors'] if tqdm_cls != tqdm : safetensors_iter = tqdm_cls(df['safetensors'], desc="Extracting model sizes (GB)") df[output_filesize_col_name] = [extract_model_size(s) for s in safetensors_iter] df[output_filesize_col_name] = pd.to_numeric(df[output_filesize_col_name], errors='coerce').fillna(0.0) else: df[output_filesize_col_name] = 0.0 def get_size_category_gradio(size_gb_val): try: numeric_size_gb = float(size_gb_val) except (ValueError, TypeError): numeric_size_gb = 0.0 if pd.isna(numeric_size_gb): numeric_size_gb = 0.0 if 0 <= numeric_size_gb < 1: return "Small (<1GB)" elif 1 <= numeric_size_gb < 5: return "Medium (1-5GB)" elif 5 <= numeric_size_gb < 20: return "Large (5-20GB)" elif 20 <= numeric_size_gb < 50: return "X-Large (20-50GB)" elif numeric_size_gb >= 50: return "XX-Large (>50GB)" else: return "Small (<1GB)" df['size_category'] = df[output_filesize_col_name].apply(get_size_category_gradio) df['tags'] = process_tags_for_series(df['tags']) df['temp_tags_joined'] = df['tags'].apply( lambda tl: '~~~'.join(str(t).lower() for t in tl if pd.notna(t) and str(t).strip()) if isinstance(tl, list) else '' ) tag_map = { 'has_audio': ['audio'], 'has_speech': ['speech'], 'has_music': ['music'], 'has_robot': ['robot', 'robotics'], 'has_bio': ['bio'], 'has_med': ['medic', 'medical'], 'has_series': ['series', 'time-series', 'timeseries'], 'has_video': ['video'], 'has_image': ['image', 'vision'], 'has_text': ['text', 'nlp', 'llm'] } for col, kws in tag_map.items(): pattern = '|'.join(kws) df[col] = df['temp_tags_joined'].str.contains(pattern, na=False, case=False, regex=True) df['has_science'] = ( df['temp_tags_joined'].str.contains('science', na=False, case=False, regex=True) & ~df['temp_tags_joined'].str.contains('bigscience', na=False, case=False, regex=True) ) del df['temp_tags_joined'] df['is_audio_speech'] = (df['has_audio'] | df['has_speech'] | df['pipeline_tag'].str.contains('audio|speech', case=False, na=False, regex=True)) df['is_biomed'] = df['has_bio'] | df['has_med'] df['organization'] = df['id'].apply(extract_org_from_id) if 'safetensors' in df.columns and \ not (output_filesize_col_name in df_raw.columns and pd.api.types.is_numeric_dtype(df_raw[output_filesize_col_name])): df = df.drop(columns=['safetensors'], errors='ignore') # --- Diagnostic for 'has_robot' after app-side processing (force_refresh path) --- if force_refresh and 'has_robot' in df.columns: robot_count_app_proc = df['has_robot'].sum() print(f"DIAGNOSTIC (App - Force Refresh Processing): 'has_robot' column processed. Number of True values: {robot_count_app_proc}") if 0 < robot_count_app_proc < 10: print(f"Sample 'has_robot' models (App processed): {df[df['has_robot']]['id'].head().tolist()}") # --- End Diagnostic --- print(f"Data processing by Gradio completed in {time.time() - proc_start:.2f}s.") total_elapsed = time.time() - overall_start_time final_msg = f"{raw_data_source_msg}. Processing by Gradio took {time.time() - proc_start:.2f}s. Total: {total_elapsed:.2f}s. Shape: {df.shape}" print(final_msg) return df, True, final_msg def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=None, size_filter=None, skip_orgs=None): if df is None or df.empty: return pd.DataFrame() filtered_df = df.copy() col_map = { "Audio & Speech": "is_audio_speech", "Music": "has_music", "Robotics": "has_robot", "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science", "Video": "has_video", "Images": "has_image", "Text": "has_text"} # --- Diagnostic within make_treemap_data --- if 'has_robot' in filtered_df.columns: initial_robot_count = filtered_df['has_robot'].sum() print(f"DIAGNOSTIC (make_treemap_data entry): Input df has {initial_robot_count} 'has_robot' models.") else: print("DIAGNOSTIC (make_treemap_data entry): 'has_robot' column NOT in input df.") # --- End Diagnostic --- if tag_filter and tag_filter in col_map: target_col = col_map[tag_filter] if target_col in filtered_df.columns: # --- Diagnostic for specific 'Robotics' filter application --- if tag_filter == "Robotics": count_before_robot_filter = filtered_df[target_col].sum() print(f"DIAGNOSTIC (make_treemap_data): Applying 'Robotics' filter. Models with '{target_col}'=True before this filter step: {count_before_robot_filter}") # --- End Diagnostic --- filtered_df = filtered_df[filtered_df[target_col]] if tag_filter == "Robotics": print(f"DIAGNOSTIC (make_treemap_data): After 'Robotics' filter ({target_col}), df rows: {len(filtered_df)}") else: print(f"Warning: Tag filter column '{col_map[tag_filter]}' not found in DataFrame.") if pipeline_filter: if "pipeline_tag" in filtered_df.columns: filtered_df = filtered_df[filtered_df["pipeline_tag"] == pipeline_filter] else: print(f"Warning: 'pipeline_tag' column not found for filtering.") if size_filter and size_filter != "None" and size_filter in MODEL_SIZE_RANGES.keys(): if 'size_category' in filtered_df.columns: filtered_df = filtered_df[filtered_df['size_category'] == size_filter] else: print("Warning: 'size_category' column not found for filtering.") if skip_orgs and len(skip_orgs) > 0: if "organization" in filtered_df.columns: filtered_df = filtered_df[~filtered_df["organization"].isin(skip_orgs)] else: print("Warning: 'organization' column not found for filtering.") if filtered_df.empty: return pd.DataFrame() if count_by not in filtered_df.columns or not pd.api.types.is_numeric_dtype(filtered_df[count_by]): filtered_df[count_by] = pd.to_numeric(filtered_df.get(count_by), errors="coerce").fillna(0.0) org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first') top_orgs_list = org_totals.index.tolist() treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy() treemap_data["root"] = "models" treemap_data[count_by] = pd.to_numeric(treemap_data[count_by], errors="coerce").fillna(0.0) return treemap_data def create_treemap(treemap_data, count_by, title=None): if treemap_data.empty: fig = px.treemap(names=["No data matches filters"], parents=[""], values=[1]) fig.update_layout(title="No data matches the selected filters", margin=dict(t=50, l=25, r=25, b=25)) return fig fig = px.treemap( treemap_data, path=["root", "organization", "id"], values=count_by, title=title or f"HuggingFace Models - {count_by.capitalize()} by Organization", color_discrete_sequence=px.colors.qualitative.Plotly ) fig.update_layout(margin=dict(t=50, l=25, r=25, b=25)) fig.update_traces(textinfo="label+value+percent root", hovertemplate="%{label}
%{value:,} " + count_by + "
%{percentRoot:.2%} of total") return fig with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo: models_data_state = gr.State(pd.DataFrame()) loading_complete_state = gr.State(False) with gr.Row(): gr.Markdown("# HuggingFace Models TreeMap Visualization") with gr.Row(): with gr.Column(scale=1): count_by_dropdown = gr.Dropdown(label="Metric", choices=[("Downloads (last 30 days)", "downloads"), ("Downloads (All Time)", "downloadsAllTime"), ("Likes", "likes")], value="downloads") filter_choice_radio = gr.Radio(label="Filter Type", choices=["None", "Tag Filter", "Pipeline Filter"], value="None") tag_filter_dropdown = gr.Dropdown(label="Select Tag", choices=TAG_FILTER_CHOICES, value=None, visible=False) pipeline_filter_dropdown = gr.Dropdown(label="Select Pipeline Tag", choices=PIPELINE_TAGS, value=None, visible=False) size_filter_dropdown = gr.Dropdown(label="Model Size Filter", choices=["None"] + list(MODEL_SIZE_RANGES.keys()), value="None") top_k_slider = gr.Slider(label="Number of Top Organizations", minimum=5, maximum=50, value=25, step=5) skip_orgs_textbox = gr.Textbox(label="Organizations to Skip (comma-separated)", value="TheBloke,MaziyarPanahi,unsloth,modularai,Gensyn,bartowski") generate_plot_button = gr.Button(value="Generate Plot", variant="primary", interactive=False) refresh_data_button = gr.Button(value="Refresh Data from Hugging Face", variant="secondary") with gr.Column(scale=3): plot_output = gr.Plot() status_message_md = gr.Markdown("Initializing...") data_info_md = gr.Markdown("") def _update_button_interactivity(is_loaded_flag): return gr.update(interactive=is_loaded_flag) loading_complete_state.change(fn=_update_button_interactivity, inputs=loading_complete_state, outputs=generate_plot_button) def _toggle_filters_visibility(choice): return gr.update(visible=choice == "Tag Filter"), gr.update(visible=choice == "Pipeline Filter") filter_choice_radio.change(fn=_toggle_filters_visibility, inputs=filter_choice_radio, outputs=[tag_filter_dropdown, pipeline_filter_dropdown]) def ui_load_data_controller(force_refresh_ui_trigger=False, progress=gr.Progress(track_tqdm=True)): print(f"ui_load_data_controller called with force_refresh_ui_trigger={force_refresh_ui_trigger}") status_msg_ui = "Loading data..." data_info_text = "" current_df = pd.DataFrame() load_success_flag = False data_as_of_date_display = "N/A" try: current_df, load_success_flag, status_msg_from_load = load_models_data( force_refresh=force_refresh_ui_trigger, tqdm_cls=progress.tqdm ) if load_success_flag: if force_refresh_ui_trigger: data_as_of_date_display = pd.Timestamp.now(tz='UTC').strftime('%B %d, %Y, %H:%M:%S %Z') elif 'data_download_timestamp' in current_df.columns and not current_df.empty and pd.notna(current_df['data_download_timestamp'].iloc[0]): timestamp_from_parquet = pd.to_datetime(current_df['data_download_timestamp'].iloc[0]) if timestamp_from_parquet.tzinfo is None: timestamp_from_parquet = timestamp_from_parquet.tz_localize('UTC') data_as_of_date_display = timestamp_from_parquet.strftime('%B %d, %Y, %H:%M:%S %Z') else: data_as_of_date_display = "Pre-processed (date unavailable)" size_dist_lines = [] if 'size_category' in current_df.columns: for cat in MODEL_SIZE_RANGES.keys(): count = (current_df['size_category'] == cat).sum() size_dist_lines.append(f" - {cat}: {count:,} models") else: size_dist_lines.append(" - Size category information not available.") size_dist = "\n".join(size_dist_lines) data_info_text = (f"### Data Information\n" f"- Overall Status: {status_msg_from_load}\n" f"- Total models loaded: {len(current_df):,}\n" f"- Data as of: {data_as_of_date_display}\n" f"- Size categories:\n{size_dist}") # # --- MODIFICATION: Add 'has_robot' count to UI data_info_text --- # if not current_df.empty and 'has_robot' in current_df.columns: # robot_true_count = current_df['has_robot'].sum() # data_info_text += f"\n- **Models flagged 'has_robot'**: {robot_true_count}" # if 0 < robot_true_count <= 10: # If a few are found, list some IDs # sample_robot_ids = current_df[current_df['has_robot']]['id'].head(5).tolist() # data_info_text += f"\n - Sample 'has_robot' model IDs: `{', '.join(sample_robot_ids)}`" # elif not current_df.empty: # data_info_text += "\n- **Models flagged 'has_robot'**: 'has_robot' column not found in loaded data." # # --- END MODIFICATION --- status_msg_ui = "Data loaded successfully. Ready to generate plot." else: data_info_text = f"### Data Load Failed\n- {status_msg_from_load}" status_msg_ui = status_msg_from_load except Exception as e: status_msg_ui = f"An unexpected error occurred in ui_load_data_controller: {str(e)}" data_info_text = f"### Critical Error\n- {status_msg_ui}" print(f"Critical error in ui_load_data_controller: {e}") load_success_flag = False return current_df, load_success_flag, data_info_text, status_msg_ui def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice, size_choice, k_orgs, skip_orgs_input, df_current_models): if df_current_models is None or df_current_models.empty: empty_fig = create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded") error_msg = "Model data is not loaded or is empty. Please load or refresh data first." gr.Warning(error_msg) return empty_fig, error_msg tag_to_use = tag_choice if filter_type == "Tag Filter" else None pipeline_to_use = pipeline_choice if filter_type == "Pipeline Filter" else None size_to_use = size_choice if size_choice != "None" else None orgs_to_skip = [org.strip() for org in skip_orgs_input.split(',') if org.strip()] if skip_orgs_input else [] # --- Diagnostic before calling make_treemap_data --- if 'has_robot' in df_current_models.columns: robot_count_before_treemap = df_current_models['has_robot'].sum() print(f"DIAGNOSTIC (ui_generate_plot_controller): df_current_models entering make_treemap_data has {robot_count_before_treemap} 'has_robot' models.") # --- End Diagnostic --- treemap_df = make_treemap_data(df_current_models, metric_choice, k_orgs, tag_to_use, pipeline_to_use, size_to_use, orgs_to_skip) title_labels = {"downloads": "Downloads (last 30 days)", "downloadsAllTime": "Downloads (All Time)", "likes": "Likes"} chart_title = f"HuggingFace Models - {title_labels.get(metric_choice, metric_choice)} by Organization" plotly_fig = create_treemap(treemap_df, metric_choice, chart_title) if treemap_df.empty: plot_stats_md = "No data matches the selected filters. Try adjusting your filters." else: total_items_in_plot = len(treemap_df['id'].unique()) total_value_in_plot = treemap_df[metric_choice].sum() plot_stats_md = (f"## Plot Statistics\n- **Models shown**: {total_items_in_plot:,}\n- **Total {metric_choice}**: {int(total_value_in_plot):,}") return plotly_fig, plot_stats_md demo.load( fn=lambda progress=gr.Progress(track_tqdm=True): ui_load_data_controller(force_refresh_ui_trigger=False, progress=progress), inputs=[], outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md] ) refresh_data_button.click( fn=lambda progress=gr.Progress(track_tqdm=True): ui_load_data_controller(force_refresh_ui_trigger=True, progress=progress), inputs=[], outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md] ) generate_plot_button.click( fn=ui_generate_plot_controller, inputs=[count_by_dropdown, filter_choice_radio, tag_filter_dropdown, pipeline_filter_dropdown, size_filter_dropdown, top_k_slider, skip_orgs_textbox, models_data_state], outputs=[plot_output, status_message_md] ) if __name__ == "__main__": if not os.path.exists(PROCESSED_PARQUET_FILE_PATH): print(f"WARNING: Pre-processed data file '{PROCESSED_PARQUET_FILE_PATH}' not found.") print("It is highly recommended to run the preprocessing script (e.g., preprocess.py) first.") # Corrected script name else: print(f"Found pre-processed data file: '{PROCESSED_PARQUET_FILE_PATH}'.") demo.launch() # --- END OF FILE app.py ---