boatbomber
commited on
Commit
·
523a99b
1
Parent(s):
685fb54
Add evaluator
Browse files- .gitignore +173 -0
- evaluator/Evaluator.ipynb +127 -0
- evaluator/benchmarks/__init__.py +3 -0
- evaluator/benchmarks/base.py +67 -0
- evaluator/benchmarks/robloxqa.py +69 -0
- evaluator/constants.py +17 -0
- evaluator/llm_utils/__init__.py +4 -0
- evaluator/llm_utils/llm.py +92 -0
- evaluator/llm_utils/message.py +6 -0
- evaluator/load_dataset.py +51 -0
- poetry.lock +0 -0
- pyproject.toml +28 -0
.gitignore
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cache/
|
2 |
+
|
3 |
+
# Byte-compiled / optimized / DLL files
|
4 |
+
__pycache__/
|
5 |
+
*.py[cod]
|
6 |
+
*$py.class
|
7 |
+
|
8 |
+
# C extensions
|
9 |
+
*.so
|
10 |
+
|
11 |
+
# Distribution / packaging
|
12 |
+
.Python
|
13 |
+
build/
|
14 |
+
develop-eggs/
|
15 |
+
dist/
|
16 |
+
downloads/
|
17 |
+
eggs/
|
18 |
+
.eggs/
|
19 |
+
lib/
|
20 |
+
lib64/
|
21 |
+
parts/
|
22 |
+
sdist/
|
23 |
+
var/
|
24 |
+
wheels/
|
25 |
+
share/python-wheels/
|
26 |
+
*.egg-info/
|
27 |
+
.installed.cfg
|
28 |
+
*.egg
|
29 |
+
MANIFEST
|
30 |
+
|
31 |
+
# PyInstaller
|
32 |
+
# Usually these files are written by a python script from a template
|
33 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
34 |
+
*.manifest
|
35 |
+
*.spec
|
36 |
+
|
37 |
+
# Installer logs
|
38 |
+
pip-log.txt
|
39 |
+
pip-delete-this-directory.txt
|
40 |
+
|
41 |
+
# Unit test / coverage reports
|
42 |
+
htmlcov/
|
43 |
+
.tox/
|
44 |
+
.nox/
|
45 |
+
.coverage
|
46 |
+
.coverage.*
|
47 |
+
.cache
|
48 |
+
nosetests.xml
|
49 |
+
coverage.xml
|
50 |
+
*.cover
|
51 |
+
*.py,cover
|
52 |
+
.hypothesis/
|
53 |
+
.pytest_cache/
|
54 |
+
cover/
|
55 |
+
|
56 |
+
# Translations
|
57 |
+
*.mo
|
58 |
+
*.pot
|
59 |
+
|
60 |
+
# Django stuff:
|
61 |
+
*.log
|
62 |
+
local_settings.py
|
63 |
+
db.sqlite3
|
64 |
+
db.sqlite3-journal
|
65 |
+
|
66 |
+
# Flask stuff:
|
67 |
+
instance/
|
68 |
+
.webassets-cache
|
69 |
+
|
70 |
+
# Scrapy stuff:
|
71 |
+
.scrapy
|
72 |
+
|
73 |
+
# Sphinx documentation
|
74 |
+
docs/_build/
|
75 |
+
|
76 |
+
# PyBuilder
|
77 |
+
.pybuilder/
|
78 |
+
target/
|
79 |
+
|
80 |
+
# Jupyter Notebook
|
81 |
+
.ipynb_checkpoints
|
82 |
+
|
83 |
+
# IPython
|
84 |
+
profile_default/
|
85 |
+
ipython_config.py
|
86 |
+
|
87 |
+
# pyenv
|
88 |
+
# For a library or package, you might want to ignore these files since the code is
|
89 |
+
# intended to run in multiple environments; otherwise, check them in:
|
90 |
+
# .python-version
|
91 |
+
|
92 |
+
# pipenv
|
93 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
94 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
95 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
96 |
+
# install all needed dependencies.
|
97 |
+
#Pipfile.lock
|
98 |
+
|
99 |
+
# UV
|
100 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
101 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
102 |
+
# commonly ignored for libraries.
|
103 |
+
#uv.lock
|
104 |
+
|
105 |
+
# poetry
|
106 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
107 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
108 |
+
# commonly ignored for libraries.
|
109 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
110 |
+
#poetry.lock
|
111 |
+
|
112 |
+
# pdm
|
113 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
114 |
+
#pdm.lock
|
115 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
116 |
+
# in version control.
|
117 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
118 |
+
.pdm.toml
|
119 |
+
.pdm-python
|
120 |
+
.pdm-build/
|
121 |
+
|
122 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
123 |
+
__pypackages__/
|
124 |
+
|
125 |
+
# Celery stuff
|
126 |
+
celerybeat-schedule
|
127 |
+
celerybeat.pid
|
128 |
+
|
129 |
+
# SageMath parsed files
|
130 |
+
*.sage.py
|
131 |
+
|
132 |
+
# Environments
|
133 |
+
.env
|
134 |
+
.venv
|
135 |
+
env/
|
136 |
+
venv/
|
137 |
+
ENV/
|
138 |
+
env.bak/
|
139 |
+
venv.bak/
|
140 |
+
|
141 |
+
# Spyder project settings
|
142 |
+
.spyderproject
|
143 |
+
.spyproject
|
144 |
+
|
145 |
+
# Rope project settings
|
146 |
+
.ropeproject
|
147 |
+
|
148 |
+
# mkdocs documentation
|
149 |
+
/site
|
150 |
+
|
151 |
+
# mypy
|
152 |
+
.mypy_cache/
|
153 |
+
.dmypy.json
|
154 |
+
dmypy.json
|
155 |
+
|
156 |
+
# Pyre type checker
|
157 |
+
.pyre/
|
158 |
+
|
159 |
+
# pytype static type analyzer
|
160 |
+
.pytype/
|
161 |
+
|
162 |
+
# Cython debug symbols
|
163 |
+
cython_debug/
|
164 |
+
|
165 |
+
# PyCharm
|
166 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
167 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
168 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
169 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
170 |
+
#.idea/
|
171 |
+
|
172 |
+
# PyPI configuration file
|
173 |
+
.pypirc
|
evaluator/Evaluator.ipynb
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"import pandas as pd\n",
|
10 |
+
"import psutil\n",
|
11 |
+
"from pathlib import Path\n",
|
12 |
+
"from dotenv import load_dotenv\n",
|
13 |
+
"from IPython.display import display\n",
|
14 |
+
"from pandarallel import pandarallel\n",
|
15 |
+
"from rich import print\n",
|
16 |
+
"from tqdm.auto import tqdm\n",
|
17 |
+
"from constants import RESULTS_DIR\n",
|
18 |
+
"import re\n",
|
19 |
+
"import json\n",
|
20 |
+
"\n",
|
21 |
+
"load_dotenv()\n",
|
22 |
+
"\n",
|
23 |
+
"pandarallel.initialize(\n",
|
24 |
+
" progress_bar=True,\n",
|
25 |
+
" verbose=0,\n",
|
26 |
+
" nb_workers=max(1, psutil.cpu_count(logical=False) - 5),\n",
|
27 |
+
")\n",
|
28 |
+
"\n",
|
29 |
+
"tqdm.pandas()\n",
|
30 |
+
"pd.set_option(\"display.max_rows\", 80)\n",
|
31 |
+
"pd.set_option(\"display.min_rows\", 60)\n"
|
32 |
+
]
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"cell_type": "markdown",
|
36 |
+
"metadata": {},
|
37 |
+
"source": [
|
38 |
+
"## Step 1: Pick a model to evaluate\n",
|
39 |
+
"\n"
|
40 |
+
]
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"cell_type": "code",
|
44 |
+
"execution_count": null,
|
45 |
+
"metadata": {},
|
46 |
+
"outputs": [],
|
47 |
+
"source": [
|
48 |
+
"MODEL_ID = \"deepseek-r1-distill-qwen-1.5b\"\n",
|
49 |
+
"MODEL_NAME = \"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B\"\n",
|
50 |
+
"MODEL_TEMP = 0.6\n"
|
51 |
+
]
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"cell_type": "markdown",
|
55 |
+
"metadata": {},
|
56 |
+
"source": [
|
57 |
+
"## Step 2: Run evaluations"
|
58 |
+
]
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"cell_type": "code",
|
62 |
+
"execution_count": null,
|
63 |
+
"metadata": {},
|
64 |
+
"outputs": [],
|
65 |
+
"source": [
|
66 |
+
"from benchmarks import RobloxQAEvaluator\n",
|
67 |
+
"\n",
|
68 |
+
"\n",
|
69 |
+
"print(f\"Evaluating [green]{MODEL_NAME}[/green]\")\n",
|
70 |
+
"\n",
|
71 |
+
"evaluation = {\n",
|
72 |
+
" \"Model\": MODEL_NAME,\n",
|
73 |
+
" \"Results\": {\n",
|
74 |
+
" \"RobloxQA\": RobloxQAEvaluator(\n",
|
75 |
+
" MODEL_ID,\n",
|
76 |
+
" temperature=MODEL_TEMP,\n",
|
77 |
+
" ).run_evaluations(),\n",
|
78 |
+
" },\n",
|
79 |
+
"}\n",
|
80 |
+
"\n",
|
81 |
+
"print(f\"Evaluation Result\\n[white]{json.dumps(evaluation, indent=2)}[/white]\")\n"
|
82 |
+
]
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"cell_type": "markdown",
|
86 |
+
"metadata": {},
|
87 |
+
"source": [
|
88 |
+
"## Step 3: Save evaluation results\n",
|
89 |
+
"\n"
|
90 |
+
]
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"cell_type": "code",
|
94 |
+
"execution_count": null,
|
95 |
+
"metadata": {},
|
96 |
+
"outputs": [],
|
97 |
+
"source": [
|
98 |
+
"RESULT_PATH = RESULTS_DIR / (re.sub(r\"\\W+\", \"_\", MODEL_NAME) + \".json\")\n",
|
99 |
+
"Path(RESULT_PATH).parent.mkdir(parents=True, exist_ok=True)\n",
|
100 |
+
"\n",
|
101 |
+
"with open(RESULT_PATH, \"w\") as f:\n",
|
102 |
+
" json.dump(evaluation, f, indent=2)\n"
|
103 |
+
]
|
104 |
+
}
|
105 |
+
],
|
106 |
+
"metadata": {
|
107 |
+
"kernelspec": {
|
108 |
+
"display_name": "roblox-llm-leaderboard-results-O7xMa1b9-py3.11",
|
109 |
+
"language": "python",
|
110 |
+
"name": "python3"
|
111 |
+
},
|
112 |
+
"language_info": {
|
113 |
+
"codemirror_mode": {
|
114 |
+
"name": "ipython",
|
115 |
+
"version": 3
|
116 |
+
},
|
117 |
+
"file_extension": ".py",
|
118 |
+
"mimetype": "text/x-python",
|
119 |
+
"name": "python",
|
120 |
+
"nbconvert_exporter": "python",
|
121 |
+
"pygments_lexer": "ipython3",
|
122 |
+
"version": "3.11.9"
|
123 |
+
}
|
124 |
+
},
|
125 |
+
"nbformat": 4,
|
126 |
+
"nbformat_minor": 2
|
127 |
+
}
|
evaluator/benchmarks/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .robloxqa import RobloxQAEvaluator
|
2 |
+
|
3 |
+
__all__ = ["RobloxQAEvaluator"]
|
evaluator/benchmarks/base.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from llm_utils import Message
|
3 |
+
|
4 |
+
|
5 |
+
class Evaluator:
|
6 |
+
model_id: str
|
7 |
+
temperature: float
|
8 |
+
max_tokens: int
|
9 |
+
stop: list[str] | None = None
|
10 |
+
|
11 |
+
def __init__(
|
12 |
+
self,
|
13 |
+
model_id: str,
|
14 |
+
temperature: float = 0.8,
|
15 |
+
max_tokens: int = 3000,
|
16 |
+
stop: list[str] | None = None,
|
17 |
+
):
|
18 |
+
self.model_id = model_id
|
19 |
+
self.temperature = temperature
|
20 |
+
self.max_tokens = max_tokens
|
21 |
+
self.stop = stop
|
22 |
+
|
23 |
+
def _fetch_dataset(self) -> pd.DataFrame:
|
24 |
+
raise NotImplementedError()
|
25 |
+
|
26 |
+
def _build_messages(self, row: pd.Series) -> list[Message]:
|
27 |
+
raise NotImplementedError()
|
28 |
+
|
29 |
+
def _score_response(self, row: pd.Series, response: str) -> float:
|
30 |
+
raise NotImplementedError()
|
31 |
+
|
32 |
+
def _process_response(self, response: str) -> str:
|
33 |
+
return response.strip()
|
34 |
+
|
35 |
+
def _evaluate(self, row: pd.Series) -> float:
|
36 |
+
"""
|
37 |
+
Run the evaluation for the given row.
|
38 |
+
This function should be called by the parallelized version of df apply.
|
39 |
+
"""
|
40 |
+
# To parallelize safely on Windows, the function must be self contained.
|
41 |
+
# This means we need to import the necessary modules inside the function.
|
42 |
+
from llm_utils import LLM
|
43 |
+
|
44 |
+
llm = LLM(self.model_id)
|
45 |
+
response = llm.generate(
|
46 |
+
temperature=self.temperature,
|
47 |
+
max_tokens=self.max_tokens,
|
48 |
+
stop=self.stop,
|
49 |
+
messages=self._build_messages(row),
|
50 |
+
)
|
51 |
+
response = self._process_response(response)
|
52 |
+
score = self._score_response(row, response)
|
53 |
+
return score
|
54 |
+
|
55 |
+
def run_evaluations(self) -> float:
|
56 |
+
"""
|
57 |
+
Run the evaluations for the entire dataset.
|
58 |
+
"""
|
59 |
+
dataset = self._fetch_dataset()
|
60 |
+
|
61 |
+
print(f"Running {self.__class__.__name__}")
|
62 |
+
|
63 |
+
scores = dataset.parallel_apply(self._evaluate, axis=1)
|
64 |
+
# scores = dataset.apply(self._evaluate, axis=1)
|
65 |
+
print("Score Counts:\n", scores.value_counts())
|
66 |
+
|
67 |
+
return float(scores.mean())
|
evaluator/benchmarks/robloxqa.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
from llm_utils import Message
|
5 |
+
from load_dataset import load_dataset
|
6 |
+
|
7 |
+
from .base import Evaluator
|
8 |
+
|
9 |
+
USER_MESSAGE_TEMPLATE = """\
|
10 |
+
Question:
|
11 |
+
{prompt}
|
12 |
+
|
13 |
+
Choices:
|
14 |
+
{choices}
|
15 |
+
|
16 |
+
|
17 |
+
Please choose the correct answer choice for the above question.
|
18 |
+
Write the letter of your selected choice in this exact format:
|
19 |
+
"Answer: letter_goes_here"
|
20 |
+
"""
|
21 |
+
|
22 |
+
DATASET_ID = "boatbomber/RobloxQA-v1.0"
|
23 |
+
DATASET_SPLIT = "test"
|
24 |
+
|
25 |
+
|
26 |
+
class RobloxQAEvaluator(Evaluator):
|
27 |
+
_answer_choices = ["a", "b", "c", "d", "e"]
|
28 |
+
_answer_pattern = re.compile(r"answer: ([a-e])")
|
29 |
+
|
30 |
+
def _fetch_dataset(self) -> pd.DataFrame:
|
31 |
+
dataset = load_dataset(
|
32 |
+
dataset_id=DATASET_ID,
|
33 |
+
split=DATASET_SPLIT,
|
34 |
+
)
|
35 |
+
return dataset
|
36 |
+
|
37 |
+
def _build_messages(self, row: pd.Series) -> list[Message]:
|
38 |
+
return [
|
39 |
+
{
|
40 |
+
"role": "user",
|
41 |
+
"content": USER_MESSAGE_TEMPLATE.format(
|
42 |
+
prompt=row["prompt"],
|
43 |
+
choices="\n".join(
|
44 |
+
f"{choice.upper()}: {row['choices'][index]}"
|
45 |
+
for index, choice in enumerate(self._answer_choices)
|
46 |
+
),
|
47 |
+
),
|
48 |
+
},
|
49 |
+
]
|
50 |
+
|
51 |
+
def _process_response(self, response: str) -> str:
|
52 |
+
return response.strip().lower()
|
53 |
+
|
54 |
+
def _score_response(self, row: pd.Series, response: str) -> float:
|
55 |
+
# Find the last occurrence of _answer_pattern in response
|
56 |
+
matches = self._answer_pattern.findall(response)
|
57 |
+
if not matches:
|
58 |
+
return 0.0
|
59 |
+
|
60 |
+
chosen_answer = matches[-1]
|
61 |
+
if chosen_answer not in self._answer_choices:
|
62 |
+
return 0.0
|
63 |
+
|
64 |
+
chosen_answer_index = self._answer_choices.index(chosen_answer)
|
65 |
+
correct_answer_index = row["answer"]
|
66 |
+
if chosen_answer_index == correct_answer_index:
|
67 |
+
return 100.0
|
68 |
+
|
69 |
+
return 0.0
|
evaluator/constants.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
|
3 |
+
INFERENCE_PROVIDER_URL = "http://localhost:8020/v1" # "https://api.together.xyz/v1" #
|
4 |
+
|
5 |
+
ROOT_DIR = Path(__file__).resolve().parent.parent
|
6 |
+
|
7 |
+
RESULTS_DIR = ROOT_DIR / "results"
|
8 |
+
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
|
9 |
+
|
10 |
+
CACHE_DIR = ROOT_DIR / "cache"
|
11 |
+
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
12 |
+
|
13 |
+
DATASET_CACHE_DIR = CACHE_DIR / "datasets"
|
14 |
+
DATASET_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
15 |
+
|
16 |
+
RESPONSE_CACHE_DIR = CACHE_DIR / "llm_responses"
|
17 |
+
RESPONSE_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
evaluator/llm_utils/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .llm import LLM
|
2 |
+
from .message import Message
|
3 |
+
|
4 |
+
__all__ = ["LLM", "Message"]
|
evaluator/llm_utils/llm.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import time
|
4 |
+
import traceback
|
5 |
+
from hashlib import md5
|
6 |
+
|
7 |
+
from constants import INFERENCE_PROVIDER_URL, RESPONSE_CACHE_DIR
|
8 |
+
from openai import OpenAI
|
9 |
+
|
10 |
+
client = OpenAI(
|
11 |
+
base_url=INFERENCE_PROVIDER_URL,
|
12 |
+
api_key=os.getenv("API_KEY", ""),
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
class LLM:
|
17 |
+
model_id: str
|
18 |
+
model_id_sanitized: str
|
19 |
+
|
20 |
+
def __repr__(self) -> str:
|
21 |
+
return f"LLM(model_id='{self.model_id})"
|
22 |
+
|
23 |
+
def __str__(self) -> str:
|
24 |
+
return self.__repr__()
|
25 |
+
|
26 |
+
def __init__(self, model_id: str):
|
27 |
+
self.model_id = model_id
|
28 |
+
self.model_id_sanitized = re.sub(r"[^a-zA-Z0-9_-]", "_", self.model_id)
|
29 |
+
|
30 |
+
def _get_cache_key(self, *args, **kwargs) -> str:
|
31 |
+
return md5(
|
32 |
+
str(args).encode() + str(kwargs).encode(), usedforsecurity=False
|
33 |
+
).hexdigest()
|
34 |
+
|
35 |
+
def _read_cache(self, cache_key: str) -> str | None:
|
36 |
+
path = RESPONSE_CACHE_DIR / self.model_id_sanitized / f"{cache_key}.txt"
|
37 |
+
if path.exists():
|
38 |
+
content = path.read_text(encoding="utf-8")
|
39 |
+
return content
|
40 |
+
return None
|
41 |
+
|
42 |
+
def _write_cache(self, cache_key: str, response: str) -> None:
|
43 |
+
path = RESPONSE_CACHE_DIR / self.model_id_sanitized / f"{cache_key}.txt"
|
44 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
45 |
+
path.write_text(response, encoding="utf-8")
|
46 |
+
|
47 |
+
def _run_stream(self, cache_key: str, **kwargs) -> str:
|
48 |
+
stream = client.chat.completions.create(
|
49 |
+
model=self.model_id,
|
50 |
+
stream=True,
|
51 |
+
**kwargs,
|
52 |
+
)
|
53 |
+
|
54 |
+
response_builder = []
|
55 |
+
for chunk in stream:
|
56 |
+
content = chunk.choices[0].delta.content
|
57 |
+
if content is not None:
|
58 |
+
response_builder.append(content)
|
59 |
+
|
60 |
+
response = "".join(response_builder)
|
61 |
+
self._write_cache(cache_key, response)
|
62 |
+
return response
|
63 |
+
|
64 |
+
def generate(self, **kwargs) -> str:
|
65 |
+
cache_key = self._get_cache_key(
|
66 |
+
model=self.model_id,
|
67 |
+
**kwargs,
|
68 |
+
)
|
69 |
+
|
70 |
+
cached_response = self._read_cache(cache_key)
|
71 |
+
if cached_response is not None:
|
72 |
+
return cached_response
|
73 |
+
|
74 |
+
attempts = 0
|
75 |
+
max_attempts = 3
|
76 |
+
while attempts < max_attempts:
|
77 |
+
try:
|
78 |
+
return self._run_stream(cache_key, **kwargs)
|
79 |
+
except Exception as e:
|
80 |
+
print(f"\nError running stream for {self.model_id}:")
|
81 |
+
traceback.print_exc()
|
82 |
+
attempts += 1
|
83 |
+
if attempts >= max_attempts:
|
84 |
+
raise e
|
85 |
+
|
86 |
+
print(f"\nRetrying after {2**attempts} seconds...")
|
87 |
+
time.sleep(2**attempts) # Exponential backoff
|
88 |
+
|
89 |
+
print(
|
90 |
+
f"\nFailed to generate response from {self.model_id} after {max_attempts} attempts."
|
91 |
+
)
|
92 |
+
return None
|
evaluator/llm_utils/message.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import TypedDict
|
2 |
+
|
3 |
+
|
4 |
+
class Message(TypedDict):
|
5 |
+
role: str
|
6 |
+
content: str
|
evaluator/load_dataset.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import pandas as pd
|
3 |
+
from constants import DATASET_CACHE_DIR
|
4 |
+
from rich import print
|
5 |
+
|
6 |
+
PD_CACHE_DIR = DATASET_CACHE_DIR / "pandas"
|
7 |
+
|
8 |
+
|
9 |
+
def load_from_pd_cache(dataset_id: str, split: str) -> pd.DataFrame | None:
|
10 |
+
"""
|
11 |
+
Load the dataset from the pandas cache if it exists.
|
12 |
+
"""
|
13 |
+
cached_path = PD_CACHE_DIR / dataset_id / f"{split}.parquet"
|
14 |
+
if cached_path.exists() and cached_path.is_file():
|
15 |
+
print(
|
16 |
+
f"[bold white]Loading [red]{dataset_id}:{split}[/red] from cache[/bold white]"
|
17 |
+
)
|
18 |
+
return pd.read_parquet(cached_path)
|
19 |
+
return None
|
20 |
+
|
21 |
+
|
22 |
+
def load_from_hf_dataset(dataset_id: str, split: str) -> pd.DataFrame:
|
23 |
+
"""
|
24 |
+
Load the dataset from the Hugging Face dataset hub.
|
25 |
+
"""
|
26 |
+
print(
|
27 |
+
f"[bold white]Loading [red]{dataset_id}:{split}[/red] from HuggingFace[/bold white]"
|
28 |
+
)
|
29 |
+
|
30 |
+
dataset = datasets.load_dataset(
|
31 |
+
dataset_id,
|
32 |
+
split=split,
|
33 |
+
cache_dir=DATASET_CACHE_DIR,
|
34 |
+
verification_mode=datasets.VerificationMode.NO_CHECKS,
|
35 |
+
).to_pandas()
|
36 |
+
|
37 |
+
# Save the dataset to the pandas cache
|
38 |
+
print(f"[bold white]Writing [red]{dataset_id}:{split}[/red] to cache[/bold white]")
|
39 |
+
cached_path = PD_CACHE_DIR / dataset_id / f"{split}.parquet"
|
40 |
+
cached_path.parent.mkdir(parents=True, exist_ok=True)
|
41 |
+
dataset.to_parquet(cached_path)
|
42 |
+
|
43 |
+
return dataset
|
44 |
+
|
45 |
+
|
46 |
+
def load_dataset(dataset_id: str, split: str) -> pd.DataFrame:
|
47 |
+
cached = load_from_pd_cache(dataset_id, split)
|
48 |
+
if cached is not None:
|
49 |
+
return cached
|
50 |
+
|
51 |
+
return load_from_hf_dataset(dataset_id, split)
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "Roblox-LLM-Leaderboard-results"
|
3 |
+
version = "1.0.0"
|
4 |
+
description = "Dataset containing LLM leaderboard results"
|
5 |
+
authors = [{ name = "boatbomber", email = "[email protected]" }]
|
6 |
+
readme = "README.md"
|
7 |
+
requires-python = "3.11.*"
|
8 |
+
dependencies = [
|
9 |
+
"huggingface-hub (>=0.29.1,<0.30.0)",
|
10 |
+
"openai (>=1.63.2,<2.0.0)",
|
11 |
+
"datasets (>=3.3.2,<4.0.0)",
|
12 |
+
"pandas (>=2.2.3,<3.0.0)",
|
13 |
+
"tqdm (>=4.67.1,<5.0.0)",
|
14 |
+
"rich (>=13.9.4,<14.0.0)",
|
15 |
+
"pandarallel (>=1.6.5,<2.0.0)",
|
16 |
+
"python-dotenv (>=1.0.1,<2.0.0)",
|
17 |
+
"ipywidgets (>=8.1.5,<9.0.0)",
|
18 |
+
"ipykernel (>=6.29.5,<7.0.0)",
|
19 |
+
"scikit-learn (>=1.6.1,<2.0.0)",
|
20 |
+
"pyyaml (>=6.0.2,<7.0.0)",
|
21 |
+
]
|
22 |
+
|
23 |
+
[tool.poetry]
|
24 |
+
package-mode = false
|
25 |
+
|
26 |
+
[build-system]
|
27 |
+
requires = ["poetry-core>=2.0.0,<3.0.0"]
|
28 |
+
build-backend = "poetry.core.masonry.api"
|