import sys import gradio as gr import pandas as pd import plotly.express as px from gradio.themes.utils import colors from results.parse import parse_agg, read_data from static.about import CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT from style.css_html_js import custom_css from utils import filter_bench, filter_bench_all, filter_RTLRepo, handle_special_cases def filter_leaderboard(task, benchmark, model_type, search_query, max_params): subset = df.copy() # Filter by task specific benchmarks when 'All' benchmarks is selected if task == "Spec-to-RTL": valid_benchmarks = s2r_benchs if benchmark == "All": subset = subset[subset["Benchmark"].isin(valid_benchmarks)] elif task == "Code Completion": valid_benchmarks = cc_benchs if benchmark == "All": subset = subset[subset["Benchmark"].isin(valid_benchmarks)] elif task == "Line Completion": valid_benchmarks = lc_benchs if benchmark == "All": subset = subset[subset["Benchmark"].isin(valid_benchmarks)] if benchmark != "All": subset = df[df["Benchmark"] == benchmark] if model_type != "All": # without emojis subset = subset[subset["Model Type"] == model_type.split(" ")[0]] if search_query: subset = subset[ subset["Model"].str.contains(search_query, case=False, na=False) ] max_params = float(max_params) subset = subset[subset["Params"] <= max_params] if benchmark == "All": if task == "Spec-to-RTL": return filter_bench_all(subset, df_agg, agg_column="Agg S2R") elif task == "Code Completion": return filter_bench_all(subset, df_agg, agg_column="Agg MC") elif task == "Line Completion": return filter_RTLRepo(subset) elif benchmark == "RTL-Repo": return filter_RTLRepo(subset) else: agg_column = None if benchmark == "VerilogEval S2R": agg_column = "Agg VerilogEval S2R" elif benchmark == "VerilogEval MC": agg_column = "Agg VerilogEval MC" elif benchmark == "RTLLM": agg_column = "Agg RTLLM" elif benchmark == "VeriGen": agg_column = "Agg VeriGen" return filter_bench(subset, df_agg, agg_column) def update_benchmarks_by_task(task): if task == "Spec-to-RTL": new_benchmarks = ["All"] + s2r_benchs elif task == "Code Completion": new_benchmarks = ["All"] + cc_benchs elif task == "Line Completion": new_benchmarks = lc_benchs else: new_benchmarks = ["All"] + benchmarks benchmark_value = "All" if "All" in new_benchmarks else new_benchmarks[0] filtered = filter_leaderboard( task, benchmark_value, model_type_dropdown.value, search_box.value, params_slider.value, ) return gr.update(value=benchmark_value, choices=new_benchmarks), filtered def generate_scatter_plot(benchmark, metric): benchmark, metric = handle_special_cases(benchmark, metric) subset = df[df["Benchmark"] == benchmark] if benchmark == "RTL-Repo": subset = subset[subset["Metric"].str.contains("EM", case=False, na=False)] detailed_scores = subset.groupby("Model", as_index=False)["Score"].mean() detailed_scores.rename(columns={"Score": "Exact Matching (EM)"}, inplace=True) else: detailed_scores = subset.pivot_table( index="Model", columns="Metric", values="Score" ).reset_index() details = df[["Model", "Params", "Model Type"]].drop_duplicates("Model") scatter_data = pd.merge(detailed_scores, details, on="Model", how="left").dropna( subset=["Params", metric] ) scatter_data["x"] = scatter_data["Params"] scatter_data["y"] = scatter_data[metric] scatter_data["size"] = (scatter_data["x"] ** 0.3) * 40 type_colors = {"General": "green", "Coding": "yellow", "RTL-Specific": "blue"} scatter_data["color"] = scatter_data["Model Type"].map(type_colors).fillna("gray") y_axis_limits = { "Functionality (FNC)": [5, 90], "Syntax (STX)": [20, 100], "Synthesis (SYN)": [5, 90], "Power": [0, 50], "Performance": [0, 50], "Area": [0, 50], "Exact Matching (EM)": [0, 50], } y_range = y_axis_limits.get(metric, [0, 80]) fig = px.scatter( scatter_data, x="x", y="y", log_x=True, size="size", color="Model Type", text="Model", hover_data={metric: ":.2f"}, title=f"Params vs. {metric} for {benchmark}", labels={"x": "# Params (Log Scale)", "y": metric}, template="plotly_white", height=600, width=1200, ) fig.update_traces( textposition="top center", textfont_size=10, marker=dict(opacity=0.8, line=dict(width=0.5, color="black")), ) fig.update_layout( xaxis=dict( showgrid=True, type="log", tickmode="array", tickvals=[8, 14, 32, 72, 200, 700], ticktext=["8", "14", "32", "72", "200", "700"], ), showlegend=False, yaxis=dict(range=y_range), margin=dict(l=50, r=50, t=50, b=50), plot_bgcolor="white", ) return fig js_func = """ function refresh() { const url = new URL(window.location); if (url.searchParams.get('__theme') !== 'light') { url.searchParams.set('__theme', 'light'); window.location.href = url.href; } } """ with gr.Blocks( css=custom_css, js=js_func, theme=gr.themes.Default(primary_hue=colors.emerald) ) as app: df, benchmarks, metrics, default_metric = read_data() df_agg = parse_agg("./results/aggregated_scores.csv") tasks = ["Spec-to-RTL", "Code Completion", "Line Completion"] s2r_benchs = ["VerilogEval S2R", "RTLLM"] cc_benchs = ["VerilogEval MC", "VeriGen"] lc_benchs = ["RTL-Repo"] non_rtl_metrics = [ "Syntax (STX)", "Functionality (FNC)", "Synthesis (SYN)", "Power", "Performance", "Area", ] rtl_metrics = ["Exact Matching (EM)"] model_types = ["All", "General 🟢", "Coding 🔵", "RTL-Specific 🔴"] gr.HTML( """
Welcome to the TuRTLe Model Leaderboard! TuRTLe is a unified evaluation framework designed to systematically assess Large Language Models (LLMs) in RTL (Register-Transfer Level) generation for hardware design. Evaluation criteria include syntax correctness, functional accuracy, synthesizability, and post-synthesis quality (PPA: Power, Performance, Area). TuRTLe integrates multiple benchmarks to highlight strengths and weaknesses of available LLMs. Use the filters below to explore different RTL benchmarks and models.
NEW UPDATE (JUNE 2025): We make our framework open-source on GitHub, and add 7 new recent models! For a total of 40 base and instruct models and 5 RTL benchmarks.
The High-Performance Artificial Intelligence (HPAI) group is part of the Barcelona Supercomputing Center (BSC). This leaderboard is maintained by HPAI as part of our commitment to open science.
Feel free to contact us:
Email: hpai@bsc.es
Feel free to contact us: