Enderchef commited on
Commit
5177cd2
Β·
verified Β·
1 Parent(s): b30005f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -14
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  from datasets import load_dataset
@@ -6,11 +7,13 @@ import torch
6
  # Cache to avoid reloading the model
7
  model_cache = {}
8
 
 
 
9
  def load_model(model_id):
10
  if model_id in model_cache:
11
  return model_cache[model_id]
12
- tokenizer = AutoTokenizer.from_pretrained(model_id)
13
- model = AutoModelForCausalLM.from_pretrained(model_id).to("cuda" if torch.cuda.is_available() else "cpu")
14
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
15
  model_cache[model_id] = generator
16
  return generator
@@ -20,7 +23,11 @@ def format_prompt(item, source):
20
  prompt = f"{item['question']}\nA. {item['choices'][0]}\nB. {item['choices'][1]}\nC. {item['choices'][2]}\nD. {item['choices'][3]}\nAnswer:"
21
  answer = item['answer']
22
  elif source == "TIGER-Lab/MMLU-Pro":
23
- prompt = f"{item['question']}\nA. {item['A']}\nB. {item['B']}\nC. {item['C']}\nD. {item['D']}\nAnswer:"
 
 
 
 
24
  answer = item['answer']
25
  elif source == "cais/hle":
26
  prompt = f"{item['question']}\n{item['A']}\n{item['B']}\n{item['C']}\n{item['D']}\nAnswer:"
@@ -31,7 +38,7 @@ def format_prompt(item, source):
31
 
32
  def evaluate(model_id, dataset_name, sample_count):
33
  gen = load_model(model_id)
34
- dataset = load_dataset(dataset_name)
35
  if 'test' in dataset:
36
  dataset = dataset['test']
37
  else:
@@ -50,33 +57,38 @@ def evaluate(model_id, dataset_name, sample_count):
50
  results.append((prompt, output.strip(), answer, output_letter, is_correct))
51
 
52
  accuracy = correct / len(dataset) * 100
53
- return f"Accuracy: {accuracy:.2f}%", results
54
 
55
  def run(model_id, benchmark, sample_count):
56
- score, details = evaluate(model_id, benchmark, sample_count)
 
 
 
57
  formatted = "\n\n".join([
58
  f"### Question:\n{q}\n\n**Model Answer:** {o}\n**Expected:** {a}\n**Predicted:** {g}\n**Correct:** {c}"
59
  for q, o, a, g, c in details
60
  ])
61
- return score, formatted
 
 
 
 
62
 
63
  with gr.Blocks(css="body {font-family: Inter, sans-serif; padding: 1em; max-width: 900px; margin: auto;}", analytics_enabled=False) as demo:
64
  gr.Markdown("""
65
  # πŸ€– LLM Benchmark Evaluator
66
 
67
- Easily evaluate your Hugging Face-hosted model on:
68
- - **MMLU** (`cais/mmlu`)
69
- - **MMLU-Pro** (`TIGER-Lab/MMLU-Pro`)
70
- - **Humanity's Last Exam** (`cais/hle`)
71
 
72
- Enter your model ID, pick a benchmark, and hit evaluate.
73
  """)
74
 
75
  with gr.Row():
76
  model_id = gr.Textbox(label="Your Hugging Face Model ID", placeholder="e.g., your-org/your-model")
77
  benchmark = gr.Dropdown(
78
  label="Choose Benchmark",
79
- choices=["cais/mmlu", "TIGER-Lab/MMLU-Pro", "cais/hle"],
80
  value="cais/mmlu"
81
  )
82
  sample_count = gr.Slider(label="Number of Samples", minimum=1, maximum=100, value=10, step=1)
@@ -84,7 +96,9 @@ with gr.Blocks(css="body {font-family: Inter, sans-serif; padding: 1em; max-widt
84
  run_button = gr.Button("πŸš€ Run Evaluation")
85
  acc_output = gr.Textbox(label="Benchmark Accuracy", interactive=False)
86
  detail_output = gr.Textbox(label="Evaluation Details", lines=20, interactive=False)
 
87
 
88
  run_button.click(run, inputs=[model_id, benchmark, sample_count], outputs=[acc_output, detail_output])
 
89
 
90
- demo.launch()
 
1
+ import os
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
  from datasets import load_dataset
 
7
  # Cache to avoid reloading the model
8
  model_cache = {}
9
 
10
+ HF_TOKEN = os.environ.get("HF_TOKEN")
11
+
12
  def load_model(model_id):
13
  if model_id in model_cache:
14
  return model_cache[model_id]
15
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=HF_TOKEN)
16
+ model = AutoModelForCausalLM.from_pretrained(model_id, token=HF_TOKEN).to("cuda" if torch.cuda.is_available() else "cpu")
17
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
18
  model_cache[model_id] = generator
19
  return generator
 
23
  prompt = f"{item['question']}\nA. {item['choices'][0]}\nB. {item['choices'][1]}\nC. {item['choices'][2]}\nD. {item['choices'][3]}\nAnswer:"
24
  answer = item['answer']
25
  elif source == "TIGER-Lab/MMLU-Pro":
26
+ if all(opt in item for opt in ['A', 'B', 'C', 'D']):
27
+ prompt = f"{item['question']}\nA. {item['A']}\nB. {item['B']}\nC. {item['C']}\nD. {item['D']}\nAnswer:"
28
+ else:
29
+ choices = item.get("choices", ["", "", "", ""])
30
+ prompt = f"{item['question']}\nA. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\nAnswer:"
31
  answer = item['answer']
32
  elif source == "cais/hle":
33
  prompt = f"{item['question']}\n{item['A']}\n{item['B']}\n{item['C']}\n{item['D']}\nAnswer:"
 
38
 
39
  def evaluate(model_id, dataset_name, sample_count):
40
  gen = load_model(model_id)
41
+ dataset = load_dataset(dataset_name, token=HF_TOKEN)
42
  if 'test' in dataset:
43
  dataset = dataset['test']
44
  else:
 
57
  results.append((prompt, output.strip(), answer, output_letter, is_correct))
58
 
59
  accuracy = correct / len(dataset) * 100
60
+ return accuracy, results
61
 
62
  def run(model_id, benchmark, sample_count):
63
+ if benchmark != "cais/mmlu":
64
+ return "Only MMLU (cais/mmlu) is available now. MMLU-Pro and Humanity's Last Exam are coming soon.", ""
65
+
66
+ accuracy, details = evaluate(model_id, benchmark, sample_count)
67
  formatted = "\n\n".join([
68
  f"### Question:\n{q}\n\n**Model Answer:** {o}\n**Expected:** {a}\n**Predicted:** {g}\n**Correct:** {c}"
69
  for q, o, a, g, c in details
70
  ])
71
+
72
+ return f"Accuracy: {accuracy:.2f}%", formatted
73
+
74
+ def save_text(text):
75
+ return "evaluation_results.txt", text
76
 
77
  with gr.Blocks(css="body {font-family: Inter, sans-serif; padding: 1em; max-width: 900px; margin: auto;}", analytics_enabled=False) as demo:
78
  gr.Markdown("""
79
  # πŸ€– LLM Benchmark Evaluator
80
 
81
+ Currently, only **MMLU** (`cais/mmlu`) is available for evaluation.
82
+ **MMLU-Pro** and **Humanity's Last Exam** will be coming soon.
 
 
83
 
84
+ Enter your model ID, pick MMLU, and hit evaluate.
85
  """)
86
 
87
  with gr.Row():
88
  model_id = gr.Textbox(label="Your Hugging Face Model ID", placeholder="e.g., your-org/your-model")
89
  benchmark = gr.Dropdown(
90
  label="Choose Benchmark",
91
+ choices=["cais/mmlu"],
92
  value="cais/mmlu"
93
  )
94
  sample_count = gr.Slider(label="Number of Samples", minimum=1, maximum=100, value=10, step=1)
 
96
  run_button = gr.Button("πŸš€ Run Evaluation")
97
  acc_output = gr.Textbox(label="Benchmark Accuracy", interactive=False)
98
  detail_output = gr.Textbox(label="Evaluation Details", lines=20, interactive=False)
99
+ download_button = gr.Button("πŸ“₯ Download Full Evaluation")
100
 
101
  run_button.click(run, inputs=[model_id, benchmark, sample_count], outputs=[acc_output, detail_output])
102
+ download_button.click(save_text, inputs=detail_output, outputs=gr.File())
103
 
104
+ demo.launch(share=True)