yibum commited on
Commit
b27b717
1 Parent(s): c470ddc

draft V0: test with sample input

Browse files
README.md CHANGED
@@ -38,7 +38,7 @@ If you encounter problem on the space, don't hesitate to restart it to remove th
38
 
39
  # Code logic for more complex edits
40
 
41
- You'll find
42
  - the main table' columns names and properties in `src/display/utils.py`
43
  - the logic to read all results and request files, then convert them in dataframe lines, in `src/leaderboard/read_evals.py`, and `src/populate.py`
44
- - teh logic to allow or filter submissions in `src/submission/submit.py` and `src/submission/check_validity.py`
 
38
 
39
  # Code logic for more complex edits
40
 
41
+ You'll find
42
  - the main table' columns names and properties in `src/display/utils.py`
43
  - the logic to read all results and request files, then convert them in dataframe lines, in `src/leaderboard/read_evals.py`, and `src/populate.py`
44
+ - teh logic to allow or filter submissions in `src/submission/submit.py` and `src/submission/check_validity.py`
app.py CHANGED
@@ -1,103 +1,58 @@
1
- import subprocess
2
  import gradio as gr
3
  import pandas as pd
4
 
5
- # from apscheduler.schedulers.background import BackgroundScheduler
6
-
7
- # from huggingface_hub import snapshot_download
8
-
9
- from src.about import (
10
- # CITATION_BUTTON_LABEL,
11
- # CITATION_BUTTON_TEXT,
12
- # EVALUATION_QUEUE_TEXT,
13
  INTRODUCTION_TEXT,
14
  LLM_BENCHMARKS_TEXT,
15
  TITLE,
16
  )
17
  from src.display.css_html_js import custom_css
18
- from src.display.utils import (
19
- BENCHMARK_COLS,
20
  COLS,
21
- EVAL_COLS,
22
- EVAL_TYPES,
23
- NUMERIC_INTERVALS,
24
  TYPES,
25
  AutoEvalColumn,
26
- ModelType,
27
  fields,
28
- WeightType,
29
- Precision,
30
  )
31
 
32
  # from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
33
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, REPO_ID
34
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
35
- from src.submission.submit import add_new_eval
36
-
37
-
38
- def restart_space():
39
- API.restart_space(repo_id=REPO_ID)
40
-
41
 
42
- # try:
43
- # print(EVAL_REQUESTS_PATH)
44
- # snapshot_download(
45
- # repo_id=QUEUE_REPO,
46
- # local_dir=EVAL_REQUESTS_PATH,
47
- # repo_type="dataset",
48
- # tqdm_class=None,
49
- # etag_timeout=30,
50
- # token=TOKEN,
51
- # )
52
- # except Exception:
53
- # restart_space()
54
- # try:
55
- # print(EVAL_RESULTS_PATH)
56
- # snapshot_download(
57
- # repo_id=RESULTS_REPO,
58
- # local_dir=EVAL_RESULTS_PATH,
59
- # repo_type="dataset",
60
- # tqdm_class=None,
61
- # etag_timeout=30,
62
- # token=TOKEN,
63
- # )
64
- # except Exception:
65
- # restart_space()
66
 
67
-
68
- raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
69
  leaderboard_df = original_df.copy()
70
-
71
- (
72
- finished_eval_queue_df,
73
- running_eval_queue_df,
74
- pending_eval_queue_df,
75
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
76
 
77
 
78
  # Searching and filtering
79
  def update_table(
80
  hidden_df: pd.DataFrame,
81
  columns: list,
82
- type_query: list,
83
- precision_query: str,
84
- size_query: list,
85
- show_deleted: bool,
86
- query: str,
 
87
  ):
88
- filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
89
- filtered_df = filter_queries(query, filtered_df)
 
90
  df = select_columns(filtered_df, columns)
91
  return df
92
 
93
 
94
- def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
95
- return df[(df[AutoEvalColumn.model.name].str.contains(query, case=False))]
 
 
 
 
96
 
97
 
98
  def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
99
  always_here_cols = [
100
- AutoEvalColumn.model_type_symbol.name,
101
  AutoEvalColumn.model.name,
102
  ]
103
  # We use COLS to maintain sorting
@@ -105,44 +60,45 @@ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
105
  return filtered_df
106
 
107
 
108
- def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
109
- final_df = []
110
- if query != "":
111
- queries = [q.strip() for q in query.split(";")]
112
- for _q in queries:
113
- _q = _q.strip()
114
- if _q != "":
115
- temp_filtered_df = search_table(filtered_df, _q)
116
- if len(temp_filtered_df) > 0:
117
- final_df.append(temp_filtered_df)
118
- if len(final_df) > 0:
119
- filtered_df = pd.concat(final_df)
120
- filtered_df = filtered_df.drop_duplicates(
121
- subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
122
- )
123
 
124
- return filtered_df
125
 
126
 
127
- def filter_models(
128
- df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
129
- ) -> pd.DataFrame:
130
- # Show all models
131
- if show_deleted:
132
- filtered_df = df
133
- else: # Show only still on the hub models
134
- filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
 
135
 
136
- type_emoji = [t[0] for t in type_query]
137
- filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
138
- filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
139
 
140
- numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
141
- params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
142
- mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
143
- filtered_df = filtered_df.loc[mask]
144
 
145
- return filtered_df
146
 
147
 
148
  demo = gr.Blocks(css=custom_css)
@@ -154,12 +110,12 @@ with demo:
154
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
155
  with gr.Row():
156
  with gr.Column():
157
- with gr.Row():
158
- search_bar = gr.Textbox(
159
- placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
160
- show_label=False,
161
- elem_id="search-bar",
162
- )
163
  with gr.Row():
164
  shown_columns = gr.CheckboxGroup(
165
  choices=[c.name for c in fields(AutoEvalColumn) if not c.hidden and not c.never_hidden],
@@ -172,32 +128,89 @@ with demo:
172
  elem_id="column-select",
173
  interactive=True,
174
  )
175
- with gr.Row():
176
- deleted_models_visibility = gr.Checkbox(
177
- value=False, label="Show gated/private/deleted models", interactive=True
178
- )
179
- with gr.Column(min_width=320):
180
- # with gr.Box(elem_id="box-filter"):
181
- filter_columns_type = gr.CheckboxGroup(
182
- label="Model types",
183
- choices=[t.to_str() for t in ModelType],
184
- value=[t.to_str() for t in ModelType],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  interactive=True,
186
- elem_id="filter-columns-type",
187
  )
188
- filter_columns_precision = gr.CheckboxGroup(
189
- label="Precision",
190
- choices=[i.value.name for i in Precision],
191
- value=[i.value.name for i in Precision],
 
 
192
  interactive=True,
193
- elem_id="filter-columns-precision",
194
  )
195
- filter_columns_size = gr.CheckboxGroup(
196
- label="Model sizes (in billions of parameters)",
197
- choices=list(NUMERIC_INTERVALS.keys()),
198
- value=list(NUMERIC_INTERVALS.keys()),
 
 
199
  interactive=True,
200
- elem_id="filter-columns-size",
201
  )
202
 
203
  leaderboard_table = gr.components.Dataframe(
@@ -216,36 +229,38 @@ with demo:
216
  datatype=TYPES,
217
  visible=False,
218
  )
219
- search_bar.submit(
220
- update_table,
221
- [
222
- hidden_leaderboard_table_for_search,
223
- shown_columns,
224
- filter_columns_type,
225
- filter_columns_precision,
226
- filter_columns_size,
227
- deleted_models_visibility,
228
- search_bar,
229
- ],
230
- leaderboard_table,
231
- )
232
  for selector in [
233
  shown_columns,
234
- filter_columns_type,
235
- filter_columns_precision,
236
- filter_columns_size,
237
- deleted_models_visibility,
 
238
  ]:
239
  selector.change(
240
  update_table,
241
  [
242
  hidden_leaderboard_table_for_search,
243
  shown_columns,
244
- filter_columns_type,
245
- filter_columns_precision,
246
- filter_columns_size,
247
- deleted_models_visibility,
248
- search_bar,
 
249
  ],
250
  leaderboard_table,
251
  queue=True,
 
 
1
  import gradio as gr
2
  import pandas as pd
3
 
4
+ from src.about import ( # CITATION_BUTTON_LABEL,; CITATION_BUTTON_TEXT,; EVALUATION_QUEUE_TEXT,
 
 
 
 
 
 
 
5
  INTRODUCTION_TEXT,
6
  LLM_BENCHMARKS_TEXT,
7
  TITLE,
8
  )
9
  from src.display.css_html_js import custom_css
10
+ from src.display.utils import ( # EVAL_TYPES,; WeightType,; BENCHMARK_COLS,; EVAL_COLS,; NUMERIC_INTERVALS,; ModelType,; Precision,
 
11
  COLS,
 
 
 
12
  TYPES,
13
  AutoEvalColumn,
 
14
  fields,
 
 
15
  )
16
 
17
  # from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
18
+ from src.envs import CRM_RESULTS_PATH
19
+ from src.populate import get_leaderboard_df_crm
 
 
 
 
 
 
20
 
21
+ original_df = get_leaderboard_df_crm(CRM_RESULTS_PATH, COLS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ # raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
 
24
  leaderboard_df = original_df.copy()
25
+ # leaderboard_df = leaderboard_df.style.format({"accuracy_metric_average": "{0:.2f}"})
 
 
 
 
 
26
 
27
 
28
  # Searching and filtering
29
  def update_table(
30
  hidden_df: pd.DataFrame,
31
  columns: list,
32
+ accuracy_method_query: str,
33
+ # type_query: list,
34
+ # precision_query: str,
35
+ # size_query: list,
36
+ # show_deleted: bool,
37
+ # query: str,
38
  ):
39
+ # filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
40
+ # filtered_df = filter_queries(query, filtered_df)
41
+ filtered_df = filter_accuracy_method_func(hidden_df, accuracy_method_query)
42
  df = select_columns(filtered_df, columns)
43
  return df
44
 
45
 
46
+ def filter_accuracy_method_func(df: pd.DataFrame, accuracy_method_query: str) -> pd.DataFrame:
47
+ return df[df["Accuracy Method"] == accuracy_method_query]
48
+
49
+
50
+ # def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
51
+ # return df[(df[AutoEvalColumn.model.name].str.contains(query, case=False))]
52
 
53
 
54
  def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
55
  always_here_cols = [
 
56
  AutoEvalColumn.model.name,
57
  ]
58
  # We use COLS to maintain sorting
 
60
  return filtered_df
61
 
62
 
63
+ # def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
64
+ # final_df = []
65
+ # if query != "":
66
+ # queries = [q.strip() for q in query.split(";")]
67
+ # for _q in queries:
68
+ # _q = _q.strip()
69
+ # if _q != "":
70
+ # temp_filtered_df = search_table(filtered_df, _q)
71
+ # if len(temp_filtered_df) > 0:
72
+ # final_df.append(temp_filtered_df)
73
+ # if len(final_df) > 0:
74
+ # filtered_df = pd.concat(final_df)
75
+ # filtered_df = filtered_df.drop_duplicates(
76
+ # subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
77
+ # )
78
 
79
+ # return filtered_df
80
 
81
 
82
+ # def filter_models(
83
+ # df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
84
+ # ) -> pd.DataFrame:
85
+ # # Show all models
86
+ # filtered_df = df
87
+ # # if show_deleted:
88
+ # # filtered_df = df
89
+ # # else: # Show only still on the hub models
90
+ # # filtered_df = df[df[AutoEvalColumn.still_on_hub.name] is True]
91
 
92
+ # type_emoji = [t[0] for t in type_query]
93
+ # filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
94
+ # filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
95
 
96
+ # numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
97
+ # params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
98
+ # mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
99
+ # filtered_df = filtered_df.loc[mask]
100
 
101
+ # return filtered_df
102
 
103
 
104
  demo = gr.Blocks(css=custom_css)
 
110
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
111
  with gr.Row():
112
  with gr.Column():
113
+ # with gr.Row():
114
+ # search_bar = gr.Textbox(
115
+ # placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
116
+ # show_label=False,
117
+ # elem_id="search-bar",
118
+ # )
119
  with gr.Row():
120
  shown_columns = gr.CheckboxGroup(
121
  choices=[c.name for c in fields(AutoEvalColumn) if not c.hidden and not c.never_hidden],
 
128
  elem_id="column-select",
129
  interactive=True,
130
  )
131
+ # with gr.Row():
132
+ # deleted_models_visibility = gr.Checkbox(
133
+ # value=False, label="Show gated/private/deleted models", interactive=True
134
+ # )
135
+ # with gr.Column(min_width=320):
136
+ # # with gr.Box(elem_id="box-filter"):
137
+ # filter_columns_type = gr.CheckboxGroup(
138
+ # label="Model types",
139
+ # choices=[t.to_str() for t in ModelType],
140
+ # value=[t.to_str() for t in ModelType],
141
+ # interactive=True,
142
+ # elem_id="filter-columns-type",
143
+ # )
144
+ # filter_columns_precision = gr.CheckboxGroup(
145
+ # label="Precision",
146
+ # choices=[i.value.name for i in Precision],
147
+ # value=[i.value.name for i in Precision],
148
+ # interactive=True,
149
+ # elem_id="filter-columns-precision",
150
+ # )
151
+ # filter_columns_size = gr.CheckboxGroup(
152
+ # label="Model sizes (in billions of parameters)",
153
+ # choices=list(NUMERIC_INTERVALS.keys()),
154
+ # value=list(NUMERIC_INTERVALS.keys()),
155
+ # interactive=True,
156
+ # elem_id="filter-columns-size",
157
+ # )
158
+ with gr.Row():
159
+ with gr.Column():
160
+ filter_use_case_type = gr.CheckboxGroup(
161
+ choices=["Summary", "Generation"],
162
+ value=["Summary", "Generation"],
163
+ label="Use Case Type",
164
+ info="",
165
+ interactive=True,
166
+ )
167
+ with gr.Column():
168
+ filter_use_case = gr.Dropdown(
169
+ choices=list(original_df["Use Case Name"].unique()),
170
+ # value=list(original_df["Use Case Name"].unique()),
171
+ label="Use Case",
172
+ info="",
173
+ multiselect=True,
174
+ interactive=True,
175
+ )
176
+ with gr.Column():
177
+ filter_metric_area = gr.CheckboxGroup(
178
+ choices=["Accuracy", "Speed (Latency)", "Trust & Safety", "Cost"],
179
+ value=["Accuracy", "Speed (Latency)", "Trust & Safety", "Cost"],
180
+ label="Metric Area",
181
+ info="",
182
+ interactive=True,
183
+ )
184
+ with gr.Column():
185
+ filter_accuracy_method = gr.Radio(
186
+ choices=["Manual", "Auto"],
187
+ value="Manual",
188
+ label="Accuracy Method",
189
+ info="accuracy method",
190
+ interactive=True,
191
+ )
192
+ with gr.Column():
193
+ filter_accuracy_threshold = gr.Number(
194
+ value="3",
195
+ label="Accuracy Threshold",
196
+ info="",
197
  interactive=True,
 
198
  )
199
+ with gr.Column():
200
+ filter_llm = gr.CheckboxGroup(
201
+ choices=list(original_df["Model Name"].unique()),
202
+ value=list(original_df["Model Name"].unique()),
203
+ label="Model Name",
204
+ info="",
205
  interactive=True,
 
206
  )
207
+ with gr.Column():
208
+ filter_llm_provider = gr.CheckboxGroup(
209
+ choices=list(original_df["LLM Provider"].unique()),
210
+ value=list(original_df["LLM Provider"].unique()),
211
+ label="LLM Provider",
212
+ info="",
213
  interactive=True,
 
214
  )
215
 
216
  leaderboard_table = gr.components.Dataframe(
 
229
  datatype=TYPES,
230
  visible=False,
231
  )
232
+ # search_bar.submit(
233
+ # update_table,
234
+ # [
235
+ # hidden_leaderboard_table_for_search,
236
+ # shown_columns,
237
+ # filter_columns_type,
238
+ # filter_columns_precision,
239
+ # filter_columns_size,
240
+ # deleted_models_visibility,
241
+ # search_bar,
242
+ # ],
243
+ # leaderboard_table,
244
+ # )
245
  for selector in [
246
  shown_columns,
247
+ filter_accuracy_method,
248
+ # filter_columns_type,
249
+ # filter_columns_precision,
250
+ # filter_columns_size,
251
+ # deleted_models_visibility,
252
  ]:
253
  selector.change(
254
  update_table,
255
  [
256
  hidden_leaderboard_table_for_search,
257
  shown_columns,
258
+ filter_accuracy_method,
259
+ # filter_columns_type,
260
+ # filter_columns_precision,
261
+ # filter_columns_size,
262
+ # deleted_models_visibility,
263
+ # search_bar,
264
  ],
265
  leaderboard_table,
266
  queue=True,
crm-results/hf_leaderboard_accuracy.csv ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Use Case Name,Use Case Type,Accuracy Method,Model Name,Model Version,LLM Provider,Instruction Following,Completeness,Conciseness,Factuality,Average
2
+ Service: Conversation summary,Summary,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.9834791059280854,3.9193391642371234,3.847424684159378,3.871720116618076,3.9054907677356656
3
+ Service: Conversation summary,Summary,Auto,GPT4-o,GPT4-o,OpenAI,3.9961127308066082,3.9844509232264333,3.9300291545189503,3.9669582118561713,3.9693877551020407
4
+ Service: Conversation summary,Summary,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.998056365403304,3.9737609329446064,3.8104956268221573,3.9310009718172982,3.9283284742468414
5
+ Service: Conversation summary,Summary,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,4.0,3.9805636540330416,3.9067055393586005,3.9504373177842567,3.9594266277939747
6
+ Service: Conversation summary,Summary,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.998056365403304,3.992225461613217,3.881438289601555,3.9591836734693877,3.957725947521866
7
+ Service: Conversation summary,Summary,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.999028182701652,3.993197278911565,3.6997084548104957,3.9591836734693877,3.912779397473275
8
+ Service: Conversation summary,Summary,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,4.0,3.9951409135082603,3.8328474246841595,3.954324586977648,3.945578231292517
9
+ Service: Conversation summary,Summary,Auto,XGen 22B,XGen 22B (1228),Salesforce,3.9941690962099123,3.836734693877551,3.9047619047619047,3.880466472303207,3.9040330417881437
10
+ Service: Conversation summary,Summary,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.991253644314869,3.9407191448007777,3.7862001943634596,3.8746355685131197,3.898202137998057
11
+ Service: Conversation summary,Summary,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.999028182701652,3.990281827016521,3.9280855199222544,3.9591836734693877,3.9691448007774537
12
+ Service: Conversation summary,Summary,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,3.938775510204082,3.938775510204082,3.6530612244897958,3.7560738581146746,3.8216715257531586
13
+ Service: Conversation summary,Summary,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.9961127308066082,3.935860058309038,3.9834791059280854,3.9310009718172982,3.961613216715257
14
+ Service: Conversation summary,Summary,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,3.998056365403304,3.989310009718173,3.7657920310981536,3.9640427599611274,3.9293002915451893
15
+ Service: Conversation summary,Summary,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.9961127308066082,3.9727891156462585,3.943634596695821,3.9494655004859087,3.9655004859086493
16
+ Service: Conversation summary,Summary,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,4.0,3.9961127308066082,3.565597667638484,3.922254616132167,3.8709912536443145
17
+ Service: Conversation summary,Summary,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.927113702623907,3.924198250728863,3.327502429543246,3.7764820213799806,3.738824101068999
18
+ Service: Conversation summary,Summary,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.987366375121477,3.9825072886297375,3.3751214771622933,3.9115646258503403,3.814139941690962
19
+ Service: Conversation summary,Summary,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.997084548104956,3.9280855199222544,3.7755102040816326,3.9037900874635567,3.9011175898931
20
+ Sales: Email Generation,Generation,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,1.9811320754716981,2.018867924528302,1.8867924528301887,2.2452830188679247,2.0330188679245285
21
+ Sales: Email Generation,Generation,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.207547169811321,3.0377358490566038,3.0754716981132075,3.188679245283019,3.1273584905660377
22
+ Sales: Email Generation,Generation,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.7358490566037736,3.0754716981132075,3.5849056603773586,3.660377358490566,3.5141509433962264
23
+ Sales: Email Generation,Generation,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.4150943396226414,2.9245283018867925,3.5283018867924527,3.4716981132075473,3.334905660377359
24
+ Sales: Email Generation,Generation,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,2.5283018867924527,2.811320754716981,2.5283018867924527,3.0377358490566038,2.7264150943396226
25
+ Sales: Email Generation,Generation,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,3.6792452830188678,3.018867924528302,3.547169811320755,3.7358490566037736,3.4952830188679247
26
+ Sales: Email Generation,Generation,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,2.358490566037736,2.452830188679245,2.2641509433962264,2.7547169811320753,2.4575471698113205
27
+ Sales: Email Generation,Generation,Auto,XGen 22B,XGen 22B (1228),Salesforce,2.339622641509434,2.2830188679245285,2.3962264150943398,2.509433962264151,2.3820754716981134
28
+ Sales: Email Generation,Generation,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.849056603773585,3.018867924528302,3.69811320754717,3.811320754716981,3.5943396226415096
29
+ Sales: Email Generation,Generation,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.849056603773585,3.0,3.7358490566037736,3.811320754716981,3.599056603773585
30
+ Sales: Email Generation,Generation,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,3.056603773584906,3.0,3.509433962264151,3.452830188679245,3.2547169811320753
31
+ Sales: Email Generation,Generation,Auto,GPT4-o,GPT4-o,OpenAI,3.830188679245283,3.0377358490566038,3.4716981132075473,3.792452830188679,3.533018867924528
32
+ Sales: Email Generation,Generation,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,2.9245283018867925,2.69811320754717,2.7547169811320753,3.0377358490566038,2.8537735849056607
33
+ Sales: Email Generation,Generation,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.7735849056603774,3.0,3.69811320754717,3.6792452830188678,3.537735849056604
34
+ Sales: Email Generation,Generation,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.056603773584906,2.69811320754717,3.056603773584906,3.4339622641509435,3.061320754716981
35
+ Sales: Email Generation,Generation,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,3.547169811320755,3.0,3.30188679245283,3.7358490566037736,3.3962264150943398
36
+ Sales: Email Generation,Generation,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,2.169811320754717,2.490566037735849,2.150943396226415,2.6226415094339623,2.3584905660377355
37
+ Sales: Email Generation,Generation,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.150943396226415,2.8867924528301887,3.150943396226415,3.4716981132075473,3.165094339622642
38
+ Service: Reply Recommendations,Generation,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.3333333333333335,2.7222222222222223,2.9444444444444446,2.7777777777777777,2.9444444444444446
39
+ Service: Reply Recommendations,Generation,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.7222222222222223,3.5,2.611111111111111,3.1666666666666665,3.25
40
+ Service: Reply Recommendations,Generation,Auto,GPT4-o,GPT4-o,OpenAI,3.9444444444444446,3.111111111111111,3.8333333333333335,3.388888888888889,3.5694444444444446
41
+ Service: Reply Recommendations,Generation,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.9444444444444446,3.3333333333333335,3.6666666666666665,3.2222222222222223,3.541666666666667
42
+ Service: Reply Recommendations,Generation,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.9444444444444446,3.111111111111111,3.5555555555555554,3.111111111111111,3.4305555555555554
43
+ Service: Reply Recommendations,Generation,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.7777777777777777,3.1666666666666665,3.2777777777777777,3.111111111111111,3.333333333333333
44
+ Service: Reply Recommendations,Generation,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,3.611111111111111,3.1666666666666665,2.888888888888889,3.0,3.1666666666666665
45
+ Service: Reply Recommendations,Generation,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,3.2777777777777777,3.1666666666666665,2.5555555555555554,3.0555555555555554,3.013888888888889
46
+ Service: Reply Recommendations,Generation,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.5555555555555554,2.8333333333333335,3.111111111111111,3.2777777777777777,3.1944444444444446
47
+ Service: Reply Recommendations,Generation,Auto,XGen 22B,XGen 22B (1228),Salesforce,3.7222222222222223,3.111111111111111,3.2777777777777777,3.0,3.2777777777777777
48
+ Service: Reply Recommendations,Generation,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,4.0,3.2777777777777777,3.888888888888889,3.1666666666666665,3.583333333333333
49
+ Service: Reply Recommendations,Generation,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,4.0,3.5555555555555554,3.388888888888889,3.3333333333333335,3.5694444444444446
50
+ Service: Reply Recommendations,Generation,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.888888888888889,3.1666666666666665,3.611111111111111,3.2222222222222223,3.4722222222222223
51
+ Service: Reply Recommendations,Generation,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.7777777777777777,2.888888888888889,3.8333333333333335,3.2777777777777777,3.4444444444444446
52
+ Service: Reply Recommendations,Generation,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.888888888888889,3.0555555555555554,3.7777777777777777,3.1666666666666665,3.472222222222222
53
+ Service: Reply Recommendations,Generation,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,3.9444444444444446,3.1666666666666665,3.5,3.2222222222222223,3.458333333333333
54
+ Service: Reply Recommendations,Generation,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.8333333333333335,3.2222222222222223,3.2777777777777777,3.0,3.333333333333333
55
+ Service: Reply Recommendations,Generation,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.888888888888889,3.1666666666666665,3.2777777777777777,3.2777777777777777,3.4027777777777777
56
+ Sales & Service: Update CRM Info,Generation,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.4,3.4242424242424243,3.4451219512195124,3.466666666666667,3.434007760532151
57
+ Sales & Service: Update CRM Info,Generation,Auto,GPT4-o,GPT4-o,OpenAI,3.8848484848484848,3.6484848484848484,3.915151515151515,3.909090909090909,3.83939393939394
58
+ Sales & Service: Update CRM Info,Generation,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.581818181818182,3.533333333333333,3.709090909090909,3.6787878787878787,3.625757575757576
59
+ Sales & Service: Update CRM Info,Generation,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,3.709090909090909,3.6363636363636362,3.806060606060606,3.7333333333333334,3.7212121212121216
60
+ Sales & Service: Update CRM Info,Generation,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.824242424242424,3.775757575757576,3.83030303030303,3.806060606060606,3.809090909090909
61
+ Sales & Service: Update CRM Info,Generation,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.618181818181818,3.624242424242424,3.7151515151515153,3.6666666666666665,3.656060606060606
62
+ Sales & Service: Update CRM Info,Generation,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,3.7636363636363637,3.624242424242424,3.757575757575758,3.812121212121212,3.7393939393939393
63
+ Sales & Service: Update CRM Info,Generation,Auto,XGen 22B,XGen 22B (1228),Salesforce,3.7333333333333334,3.6787878787878787,3.7818181818181817,3.7454545454545456,3.734848484848485
64
+ Sales & Service: Update CRM Info,Generation,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,2.9696969696969697,3.0545454545454547,2.8,3.096969696969697,2.9803030303030305
65
+ Sales & Service: Update CRM Info,Generation,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.806060606060606,3.7515151515151515,3.8181818181818183,3.812121212121212,3.796969696969697
66
+ Sales & Service: Update CRM Info,Generation,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,2.909090909090909,2.896969696969697,2.8666666666666667,2.903030303030303,2.893939393939394
67
+ Sales & Service: Update CRM Info,Generation,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.703030303030303,3.6606060606060606,3.7515151515151515,3.7515151515151515,3.716666666666667
68
+ Sales & Service: Update CRM Info,Generation,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,3.7454545454545456,3.606060606060606,3.812121212121212,3.7636363636363637,3.731818181818182
69
+ Sales & Service: Update CRM Info,Generation,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.478787878787879,3.3575757575757574,3.5393939393939395,3.503030303030303,3.4696969696969697
70
+ Sales & Service: Update CRM Info,Generation,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,2.787878787878788,3.242424242424242,2.3333333333333335,3.3575757575757574,2.93030303030303
71
+ Sales & Service: Update CRM Info,Generation,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,1.6606060606060606,1.6848484848484848,1.6424242424242423,1.6787878787878787,1.6666666666666667
72
+ Sales & Service: Update CRM Info,Generation,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.6666666666666665,3.6363636363636362,3.690909090909091,3.690909090909091,3.6712121212121214
73
+ Sales & Service: Update CRM Info,Generation,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,2.981818181818182,3.139393939393939,3.012121212121212,3.1575757575757577,3.0727272727272728
74
+ Service: Live Chat Summary,Summary,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.542,3.479,3.303,3.381,3.42625
75
+ Service: Live Chat Summary,Summary,Auto,GPT4-o,GPT4-o,OpenAI,3.965,3.436,3.946,3.641,3.747
76
+ Service: Live Chat Summary,Summary,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.715,3.331,3.714,3.6236236236236237,3.595905905905906
77
+ Service: Live Chat Summary,Summary,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.882,3.4004004004004003,3.904,3.718,3.7261001001001
78
+ Service: Live Chat Summary,Summary,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.927,3.566,3.607,3.588,3.6720000000000006
79
+ Service: Live Chat Summary,Summary,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,3.945,3.725,3.666,3.453,3.69725
80
+ Service: Live Chat Summary,Summary,Auto,XGen 22B,XGen 22B (1228),Salesforce,3.703,3.247,3.689,3.605,3.561
81
+ Service: Live Chat Summary,Summary,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.694,3.158,3.75,3.593,3.54875
82
+ Service: Live Chat Summary,Summary,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.903,3.309,3.947,3.765,3.731
83
+ Service: Live Chat Summary,Summary,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,3.744,3.497,3.533,3.502,3.569
84
+ Service: Live Chat Summary,Summary,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,3.992,3.822,3.914,3.587,3.82875
85
+ Service: Live Chat Summary,Summary,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.835,3.164164164164164,3.891,3.717,3.651791041041041
86
+ Service: Live Chat Summary,Summary,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,3.862,3.492,3.864,3.574,3.698
87
+ Service: Live Chat Summary,Summary,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.724,3.635,3.567,3.396,3.5805
88
+ Service: Live Chat Summary,Summary,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.93,3.7,3.772,3.681,3.7707500000000005
89
+ Service: Live Chat Summary,Summary,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.942,3.353,3.831,3.492,3.6544999999999996
90
+ Service: Live Chat Summary,Summary,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.866,3.379,3.846,3.609,3.6750000000000003
91
+ Service: Live Chat Summary,Summary,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.595,3.129129129129129,3.709,3.504,3.4842822822822823
92
+ Service: Email Summary,Summary,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.7755102040816326,3.7857142857142856,3.4285714285714284,3.561224489795918,3.6377551020408165
93
+ Service: Email Summary,Summary,Auto,GPT4-o,GPT4-o,OpenAI,3.9381443298969074,3.9587628865979383,3.8762886597938144,3.752577319587629,3.8814432989690726
94
+ Service: Email Summary,Summary,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.6122448979591835,3.5306122448979593,3.4285714285714284,3.377551020408163,3.4872448979591835
95
+ Service: Email Summary,Summary,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.989795918367347,3.979591836734694,3.9693877551020407,3.673469387755102,3.9030612244897958
96
+ Service: Email Summary,Summary,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.979591836734694,3.9591836734693877,3.673469387755102,3.857142857142857,3.86734693877551
97
+ Service: Email Summary,Summary,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,4.0,4.0,3.857142857142857,3.693877551020408,3.8877551020408165
98
+ Service: Email Summary,Summary,Auto,XGen 22B,XGen 22B (1228),Salesforce,3.5670103092783507,3.268041237113402,3.5463917525773194,3.288659793814433,3.4175257731958766
99
+ Service: Email Summary,Summary,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.7755102040816326,3.8979591836734695,3.5816326530612246,3.479591836734694,3.683673469387755
100
+ Service: Email Summary,Summary,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,4.0,3.989795918367347,3.979591836734694,3.622448979591837,3.8979591836734695
101
+ Service: Email Summary,Summary,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,3.9591836734693877,3.836734693877551,3.7448979591836733,3.3877551020408165,3.732142857142857
102
+ Service: Email Summary,Summary,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,3.989795918367347,4.0,3.8469387755102042,3.795918367346939,3.908163265306123
103
+ Service: Email Summary,Summary,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.989795918367347,3.86734693877551,3.9693877551020407,3.5918367346938775,3.854591836734694
104
+ Service: Email Summary,Summary,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,4.0,3.979591836734694,3.8469387755102042,3.795918367346939,3.9056122448979593
105
+ Service: Email Summary,Summary,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.989795918367347,3.9183673469387754,3.938775510204082,3.5510204081632653,3.849489795918367
106
+ Service: Email Summary,Summary,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,4.0,3.979591836734694,3.86734693877551,3.693877551020408,3.885204081632653
107
+ Service: Email Summary,Summary,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.8877551020408165,3.8979591836734695,3.520408163265306,3.642857142857143,3.737244897959184
108
+ Service: Email Summary,Summary,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.979591836734694,3.9591836734693877,3.7346938775510203,3.673469387755102,3.836734693877551
109
+ Service: Email Summary,Summary,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.377551020408163,3.6530612244897958,3.36734693877551,3.377551020408163,3.443877551020408
110
+ Service: Knowledge creation from Case Info,Generation,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.1875,3.0625,2.8125,3.0,3.015625
111
+ Service: Knowledge creation from Case Info,Generation,Auto,GPT4-o,GPT4-o,OpenAI,3.625,3.6875,3.1875,3.3125,3.453125
112
+ Service: Knowledge creation from Case Info,Generation,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.125,3.125,2.8125,3.125,3.046875
113
+ Service: Knowledge creation from Case Info,Generation,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.5625,3.4375,3.25,3.1875,3.359375
114
+ Service: Knowledge creation from Case Info,Generation,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.5625,3.25,3.1875,3.125,3.28125
115
+ Service: Knowledge creation from Case Info,Generation,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,3.5625,3.1875,3.125,3.0,3.21875
116
+ Service: Knowledge creation from Case Info,Generation,Auto,XGen 22B,XGen 22B (1228),Salesforce,3.25,3.1875,3.0,3.1875,3.15625
117
+ Service: Knowledge creation from Case Info,Generation,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.5625,3.375,3.0625,3.3125,3.328125
118
+ Service: Knowledge creation from Case Info,Generation,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,2.4375,2.3125,2.0625,3.0625,2.46875
119
+ Service: Knowledge creation from Case Info,Generation,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,3.0625,3.0625,2.8125,3.125,3.015625
120
+ Service: Knowledge creation from Case Info,Generation,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,3.6875,3.375,3.1875,3.3125,3.390625
121
+ Service: Knowledge creation from Case Info,Generation,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.5625,3.4375,3.3125,3.25,3.390625
122
+ Service: Knowledge creation from Case Info,Generation,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,3.375,3.1875,2.9375,3.125,3.15625
123
+ Service: Knowledge creation from Case Info,Generation,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.4375,3.125,3.125,3.1875,3.21875
124
+ Service: Knowledge creation from Case Info,Generation,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.5625,3.4375,3.0,3.1875,3.296875
125
+ Service: Knowledge creation from Case Info,Generation,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.5625,3.375,2.9375,3.0625,3.234375
126
+ Service: Knowledge creation from Case Info,Generation,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.4375,3.375,3.125,3.25,3.296875
127
+ Service: Knowledge creation from Case Info,Generation,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.4375,3.3125,3.0,3.125,3.21875
128
+ Sales: Email Summary,Summary,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.3877551020408165,3.4285714285714284,3.193877551020408,3.377551020408163,3.346938775510204
129
+ Sales: Email Summary,Summary,Auto,GPT4-o,GPT4-o,OpenAI,3.9693877551020407,3.8979591836734695,3.86734693877551,3.8877551020408165,3.9056122448979593
130
+ Sales: Email Summary,Summary,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.8979591836734695,3.663265306122449,3.9183673469387754,3.5714285714285716,3.7627551020408165
131
+ Sales: Email Summary,Summary,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.979591836734694,3.836734693877551,3.9591836734693877,3.663265306122449,3.8596938775510203
132
+ Sales: Email Summary,Summary,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.989795918367347,3.7653061224489797,3.9183673469387754,3.857142857142857,3.88265306122449
133
+ Sales: Email Summary,Summary,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,3.8979591836734695,3.7551020408163267,3.795918367346939,3.7244897959183674,3.793367346938776
134
+ Sales: Email Summary,Summary,Auto,XGen 22B,XGen 22B (1228),Salesforce,2.979381443298969,2.8969072164948453,3.142857142857143,3.2783505154639174,3.0743740795287184
135
+ Sales: Email Summary,Summary,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.6020408163265305,3.510204081632653,3.663265306122449,3.357142857142857,3.5331632653061225
136
+ Sales: Email Summary,Summary,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.9693877551020407,3.8979591836734695,3.9591836734693877,3.642857142857143,3.86734693877551
137
+ Sales: Email Summary,Summary,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,2.9690721649484537,3.2783505154639174,2.649484536082474,3.154639175257732,3.0128865979381443
138
+ Sales: Email Summary,Summary,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,4.0,3.9591836734693877,3.9285714285714284,3.826530612244898,3.928571428571429
139
+ Sales: Email Summary,Summary,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.9591836734693877,3.693877551020408,3.9693877551020407,3.7857142857142856,3.8520408163265305
140
+ Sales: Email Summary,Summary,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,4.0,3.938775510204082,3.9183673469387754,3.826530612244898,3.920918367346939
141
+ Sales: Email Summary,Summary,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.9285714285714284,3.663265306122449,3.9183673469387754,3.7653061224489797,3.8188775510204085
142
+ Sales: Email Summary,Summary,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,4.0,3.806122448979592,3.9693877551020407,3.7346938775510203,3.877551020408163
143
+ Sales: Email Summary,Summary,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.948453608247423,3.948453608247423,3.7653061224489797,3.6288659793814433,3.822769829581317
144
+ Sales: Email Summary,Summary,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.9591836734693877,3.816326530612245,3.8469387755102042,3.5306122448979593,3.788265306122449
145
+ Sales: Email Summary,Summary,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.836734693877551,3.642857142857143,3.7346938775510203,3.5306122448979593,3.686224489795918
146
+ Service: Call Summary,Summary,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.95,3.925,3.925,3.75,3.8875
147
+ Service: Call Summary,Summary,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.95,3.85,3.9,3.775,3.8687500000000004
148
+ Service: Call Summary,Summary,Auto,GPT4-o,GPT4-o,OpenAI,3.95,3.925,3.95,3.925,3.9375
149
+ Service: Call Summary,Summary,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.95,3.95,3.95,3.975,3.9562500000000003
150
+ Service: Call Summary,Summary,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.95,3.95,3.95,3.9,3.9375000000000004
151
+ Service: Call Summary,Summary,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.925,3.925,3.775,3.8,3.85625
152
+ Service: Call Summary,Summary,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,3.95,3.925,3.95,3.925,3.9375
153
+ Service: Call Summary,Summary,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,3.75,3.85,3.675,3.6,3.7187499999999996
154
+ Service: Call Summary,Summary,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,4.0,3.875,3.975,3.95,3.95
155
+ Service: Call Summary,Summary,Auto,XGen 22B,XGen 22B (1228),Salesforce,3.95,3.8,3.95,3.65,3.8375
156
+ Service: Call Summary,Summary,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.95,3.925,3.95,3.875,3.925
157
+ Service: Call Summary,Summary,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,4.0,4.0,4.0,3.95,3.9875
158
+ Service: Call Summary,Summary,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.975,3.925,3.95,3.825,3.91875
159
+ Service: Call Summary,Summary,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.95,3.925,4.0,3.95,3.95625
160
+ Service: Call Summary,Summary,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.95,3.875,3.95,3.85,3.90625
161
+ Service: Call Summary,Summary,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,3.95,3.925,3.95,3.9,3.93125
162
+ Service: Call Summary,Summary,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.95,3.9,3.925,3.825,3.8999999999999995
163
+ Service: Call Summary,Summary,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.95,3.875,3.95,3.875,3.9125
164
+ Sales: Call Summary,Summary,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.0526315789473686,2.8947368421052633,2.473684210526316,2.789473684210526,2.8026315789473686
165
+ Sales: Call Summary,Summary,Auto,GPT4-o,GPT4-o,OpenAI,3.9473684210526314,3.210526315789474,3.473684210526316,3.9473684210526314,3.6447368421052633
166
+ Sales: Call Summary,Summary,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.3157894736842106,3.0,3.1052631578947367,3.3684210526315788,3.1973684210526314
167
+ Sales: Call Summary,Summary,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.3684210526315788,2.789473684210526,3.263157894736842,3.6842105263157894,3.276315789473684
168
+ Sales: Call Summary,Summary,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.473684210526316,3.0526315789473686,3.3157894736842106,3.6315789473684212,3.368421052631579
169
+ Sales: Call Summary,Summary,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,3.6842105263157894,3.0526315789473686,2.8421052631578947,3.789473684210526,3.3421052631578942
170
+ Sales: Call Summary,Summary,Auto,XGen 22B,XGen 22B (1228),Salesforce,2.736842105263158,2.6315789473684212,2.5789473684210527,2.789473684210526,2.6842105263157894
171
+ Sales: Call Summary,Summary,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.1052631578947367,2.6315789473684212,3.210526315789474,3.210526315789474,3.0394736842105265
172
+ Sales: Call Summary,Summary,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.263157894736842,2.8421052631578947,3.473684210526316,3.4210526315789473,3.2499999999999996
173
+ Sales: Call Summary,Summary,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,2.473684210526316,2.3157894736842106,2.1052631578947367,1.7894736842105263,2.1710526315789473
174
+ Sales: Call Summary,Summary,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,3.736842105263158,3.0,3.3157894736842106,4.0,3.513157894736842
175
+ Sales: Call Summary,Summary,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.3684210526315788,2.888888888888889,3.8421052631578947,3.8421052631578947,3.485380116959064
176
+ Sales: Call Summary,Summary,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,3.8947368421052633,3.0,3.5789473684210527,3.9473684210526314,3.605263157894737
177
+ Sales: Call Summary,Summary,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.6315789473684212,3.0526315789473686,3.0526315789473686,3.736842105263158,3.3684210526315788
178
+ Sales: Call Summary,Summary,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.1578947368421053,2.8947368421052633,2.6315789473684212,2.9473684210526314,2.9078947368421053
179
+ Sales: Call Summary,Summary,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.4210526315789473,3.1052631578947367,3.210526315789474,3.5555555555555554,3.323099415204678
180
+ Sales: Call Summary,Summary,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.263157894736842,3.0,2.6315789473684212,3.263157894736842,3.0394736842105265
181
+ Sales: Call Summary,Summary,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,2.9473684210526314,2.736842105263158,3.0,3.0526315789473686,2.9342105263157894
182
+ Service: Live Chat Insights,Summary,Auto,Gemini Pro 1,Gemini Pro 1,Google,3.97265625,3.8359375,3.87109375,3.76953125,3.8623046875
183
+ Service: Live Chat Insights,Summary,Auto,GPT4-o,GPT4-o,OpenAI,4.0,3.91796875,3.9765625,3.91015625,3.951171875
184
+ Service: Live Chat Insights,Summary,Auto,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.97265625,3.8203125,3.8515625,3.859375,3.8759765625
185
+ Service: Live Chat Insights,Summary,Auto,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.99609375,3.96875,3.94140625,3.85546875,3.9404296875
186
+ Service: Live Chat Insights,Summary,Auto,Claude 3 Haiku,Claude 3 Haiku,Anthropic,4.0,3.98046875,3.796875,3.8828125,3.9150390625
187
+ Service: Live Chat Insights,Summary,Auto,Cohere Command R+,cohere.cmd-R+,Cohere AI,3.984375,3.9375,3.8671875,3.87109375,3.9150390625
188
+ Service: Live Chat Insights,Summary,Auto,XGen 22B,XGen 22B (1228),Salesforce,3.9765625,3.4375,3.921875,3.74609375,3.7705078125
189
+ Service: Live Chat Insights,Summary,Auto,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.97265625,3.6875,3.90625,3.71484375,3.8203125
190
+ Service: Live Chat Insights,Summary,Auto,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.9921875,3.93359375,3.97265625,3.85546875,3.9384765625
191
+ Service: Live Chat Insights,Summary,Auto,Cohere Command Text,cohere.command-text-v14,Cohere AI,3.9375,3.8671875,3.484375,3.51171875,3.7001953125
192
+ Service: Live Chat Insights,Summary,Auto,GPT 4 Turbo,gpt-4-0613,OpenAI,3.9921875,3.9375,3.93359375,3.91015625,3.943359375
193
+ Service: Live Chat Insights,Summary,Auto,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.96875,3.71875,4.0,3.90234375,3.8974609375
194
+ Service: Live Chat Insights,Summary,Auto,Claude 3 Opus,Claude 3 (Opus),Anthropic,4.0,3.98046875,3.82421875,3.9140625,3.9296875
195
+ Service: Live Chat Insights,Summary,Auto,Gemini Pro 1.5,Gemini Pro 1.5,Google,3.98828125,3.88671875,3.9375,3.77734375,3.8974609375
196
+ Service: Live Chat Insights,Summary,Auto,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.9921875,3.96484375,3.62890625,3.8359375,3.85546875
197
+ Service: Live Chat Insights,Summary,Auto,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.9921875,3.875,3.7734375,3.8125,3.86328125
198
+ Service: Live Chat Insights,Summary,Auto,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.96875,3.953125,3.6015625,3.8046875,3.83203125
199
+ Service: Live Chat Insights,Summary,Auto,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.9765625,3.7734375,3.92578125,3.80859375,3.87109375
200
+ Service: Reply Recommendations,Generation,Manual,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.0935185185185183,3.22037037037037,3.443518518518519,3.0453703703703705,3.2006944444444443
201
+ Service: Reply Recommendations,Generation,Manual,GPT 4 Turbo,gpt-4-0613,OpenAI,3.525925925925926,3.3203703703703704,3.5129629629629635,3.52962962962963,3.4722222222222228
202
+ Service: Reply Recommendations,Generation,Manual,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.015740740740741,3.0203703703703706,3.2111111111111112,2.837037037037037,3.021064814814815
203
+ Service: Reply Recommendations,Generation,Manual,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.175,2.733333333333333,2.977777777777778,3.0962962962962965,2.995601851851852
204
+ Service: Reply Recommendations,Generation,Manual,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,2.5185185185185186,2.4657407407407406,2.660185185185185,2.4314814814814816,2.5189814814814815
205
+ Service: Reply Recommendations,Generation,Manual,XGen 22B,XGen 22B (1228),Salesforce,2.892592592592593,2.9074074074074074,3.1231481481481485,2.486111111111111,2.852314814814815
206
+ Service: Reply Recommendations,Generation,Manual,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.3435185185185192,3.1203703703703702,3.184259259259259,3.28425925925926,3.233101851851852
207
+ Service: Reply Recommendations,Generation,Manual,Cohere Command Text,cohere.command-text-v14,Cohere AI,2.3768518518518515,2.8157407407407415,2.813888888888889,2.641666666666667,2.6620370370370376
208
+ Service: Reply Recommendations,Generation,Manual,Gemini Pro 1,Gemini Pro 1,Google,2.851851851851852,2.761111111111111,2.2962962962962963,3.2379629629629627,2.7868055555555555
209
+ Service: Reply Recommendations,Generation,Manual,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.2796296296296297,3.2101851851851846,3.230555555555556,3.021296296296297,3.185416666666667
210
+ Service: Reply Recommendations,Generation,Manual,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.087037037037037,3.1166666666666667,3.249074074074074,2.888888888888889,3.0854166666666667
211
+ Sales: Email Generation,Generation,Manual,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.037735849056604,2.515723270440251,3.204402515723271,2.811320754716981,2.892295597484277
212
+ Sales: Email Generation,Generation,Manual,GPT 4 Turbo,gpt-4-0613,OpenAI,3.2169811320754724,3.067610062893082,2.9088050314465415,3.248427672955976,3.1104559748427683
213
+ Sales: Email Generation,Generation,Manual,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.1163522012578615,2.5345911949685536,2.9937106918238996,2.8899371069182394,2.8836477987421385
214
+ Sales: Email Generation,Generation,Manual,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.0817610062893084,2.1132075471698113,2.7106918238993716,2.880503144654088,2.6965408805031448
215
+ Sales: Email Generation,Generation,Manual,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,2.9056603773584904,1.9182389937106918,2.883647798742138,2.60062893081761,2.5770440251572326
216
+ Sales: Email Generation,Generation,Manual,XGen 22B,XGen 22B (1228),Salesforce,3.0031446540880506,2.1540880503144657,2.90880503144654,2.7044025157232707,2.6926100628930816
217
+ Sales: Email Generation,Generation,Manual,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.0880503144654092,2.5283018867924527,3.0220125786163528,3.1635220125786163,2.9504716981132075
218
+ Sales: Email Generation,Generation,Manual,Cohere Command Text,cohere.command-text-v14,Cohere AI,2.878930817610063,2.1635220125786163,2.858490566037736,2.7106918238993707,2.6529088050314464
219
+ Sales: Email Generation,Generation,Manual,Gemini Pro 1,Gemini Pro 1,Google,3.1163522012578633,2.5911949685534585,2.757861635220126,2.9622641509433967,2.8569182389937113
220
+ Sales: Email Generation,Generation,Manual,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.0943396226415096,3.0062893081761004,3.2232704402515724,3.1415094339622636,3.116352201257861
221
+ Sales: Email Generation,Generation,Manual,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.213836477987421,3.0062893081761013,2.9937106918238996,3.0534591194968557,3.066823899371069
222
+ Sales: Email Generation,Generation,Manual,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.169871794871795,2.4839743589743586,2.9519230769230775,2.7660256410256405,2.8429487179487176
223
+ Sales: Email Generation,Generation,Manual,GPT4-o,GPT4-o,OpenAI,3.286163522012579,3.2389937106918247,3.2955974842767293,3.2421383647798745,3.265723270440252
224
+ Sales: Email Generation,Generation,Manual,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.1729559748427683,3.09119496855346,3.172955974842768,3.2012578616352205,3.159591194968554
225
+ Service: Call Summary,Summary,Manual,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.5694444444444446,3.3541666666666665,3.7948717948717947,3.4193548387096775,3.534459436173146
226
+ Service: Call Summary,Summary,Manual,GPT 4 Turbo,gpt-4-0613,OpenAI,3.4743589743589745,3.4871794871794872,3.5128205128205128,3.4615384615384617,3.483974358974359
227
+ Service: Call Summary,Summary,Manual,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,2.725,2.6875,2.782051282051282,2.675,2.717387820512821
228
+ Service: Call Summary,Summary,Manual,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,2.58974358974359,2.7125,2.7435897435897436,2.6538461538461537,2.674919871794872
229
+ Service: Call Summary,Summary,Manual,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,2.75,2.775,2.8684210526315788,2.7,2.773355263157895
230
+ Service: Call Summary,Summary,Manual,XGen 22B,XGen 22B (1228),Salesforce,2.7,2.8125,2.9615384615384617,2.7,2.7935096153846155
231
+ Service: Call Summary,Summary,Manual,Claude 3 Haiku,Claude 3 Haiku,Anthropic,2.525,2.6,2.7222222222222223,2.5875,2.6086805555555554
232
+ Service: Call Summary,Summary,Manual,Cohere Command Text,cohere.command-text-v14,Cohere AI,2.35,2.4125,2.527027027027027,2.375,2.416131756756757
233
+ Service: Call Summary,Summary,Manual,Gemini Pro 1,Gemini Pro 1,Google,2.2051282051282053,2.3684210526315788,2.442857142857143,2.217948717948718,2.3085887796414113
234
+ Service: Call Summary,Summary,Manual,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,2.7435897435897436,2.769230769230769,2.9285714285714284,2.6666666666666665,2.777014652014652
235
+ Service: Call Summary,Summary,Manual,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,2.5135135135135136,2.6052631578947367,2.6818181818181817,2.4871794871794872,2.57194358510148
236
+ Service: Live Chat Insights,Summary,Manual,GPT 3.5 Turbo,gpt-3.5-turbo,OpenAI,3.72,3.7,3.7,3.74,3.7150000000000003
237
+ Service: Live Chat Insights,Summary,Manual,GPT 4 Turbo,gpt-4-0613,OpenAI,3.72,3.72,3.04,3.68,3.54
238
+ Service: Live Chat Insights,Summary,Manual,Mixtral 8x7B,Mixtral-8x7B-v0.1,Mistral,3.56,3.6,3.1,3.68,3.485
239
+ Service: Live Chat Insights,Summary,Manual,AI21 Jamba-Instruct,AI21 (jamba-instruct-preview),AI21,3.56,3.64,3.2,3.68,3.52
240
+ Service: Live Chat Insights,Summary,Manual,Mistral 7B,Mistral-7B-Instruct-v0.1,Mistral,3.56,3.54,3.2,3.6,3.475
241
+ Service: Live Chat Insights,Summary,Manual,XGen 22B,XGen 22B (1228),Salesforce,3.54,3.6,3.52,3.64,3.575
242
+ Service: Live Chat Insights,Summary,Manual,Claude 3 Haiku,Claude 3 Haiku,Anthropic,3.64,3.6,2.92,3.64,3.45
243
+ Service: Live Chat Insights,Summary,Manual,Cohere Command Text,cohere.command-text-v14,Cohere AI,3.26,3.36,3.0,3.56,3.295
244
+ Service: Live Chat Insights,Summary,Manual,Gemini Pro 1,Gemini Pro 1,Google,3.42,3.5,3.46,3.58,3.4899999999999998
245
+ Service: Live Chat Insights,Summary,Manual,LLaMA 3 70B,Meta-Llama-3-70B-Instruct,Meta,3.7,3.64,3.24,3.7,3.5700000000000003
246
+ Service: Live Chat Insights,Summary,Manual,LLaMA 3 8B,Meta-Llama-3-8B-Instruct,Meta,3.66,3.62,3.14,3.66,3.52
247
+ Service: Live Chat Insights,Summary,Manual,SF-TextSum,Summarization model 7B for Service (Mistral FT),Salesforce,3.46,3.56,3.58,3.56,3.54
248
+ Service: Live Chat Insights,Summary,Manual,SF-TextBase 7B,TextBase-7B (Mistral FT),Salesforce,3.56,3.64,3.68,3.66,3.6350000000000002
249
+ Service: Live Chat Insights,Summary,Manual,GPT4-o,GPT4-o,OpenAI,3.78,3.78,3.78,3.78,3.78
250
+ Service: Live Chat Insights,Summary,Manual,SF-TextBase 70B,TextBase-70B (Llama FT),Salesforce,3.72,3.74,3.6,3.72,3.6950000000000003
requirements.txt CHANGED
@@ -1,3 +1,4 @@
 
1
  APScheduler
2
  black
3
  click
@@ -5,14 +6,13 @@ datasets
5
  gradio
6
  gradio_client
7
  huggingface-hub>=0.18.0
 
8
  matplotlib
9
  numpy
10
  pandas
11
  python-dateutil
12
  requests
 
 
13
  tqdm
14
  transformers
15
- tokenizers>=0.15.0
16
- git+https://github.com/EleutherAI/lm-evaluation-harness.git@b281b0921b636bc36ad05c0b0b0763bd6dd43463#egg=lm-eval
17
- accelerate
18
- sentencepiece
 
1
+ accelerate
2
  APScheduler
3
  black
4
  click
 
6
  gradio
7
  gradio_client
8
  huggingface-hub>=0.18.0
9
+ git+https://github.com/EleutherAI/lm-evaluation-harness.git@b281b0921b636bc36ad05c0b0b0763bd6dd43463#egg=lm-eval
10
  matplotlib
11
  numpy
12
  pandas
13
  python-dateutil
14
  requests
15
+ sentencepiece
16
+ tokenizers>=0.15.0
17
  tqdm
18
  transformers
 
 
 
 
src/about.py CHANGED
@@ -1,6 +1,7 @@
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
 
4
  @dataclass
5
  class Task:
6
  benchmark: str
@@ -11,29 +12,27 @@ class Task:
11
  # Select your tasks here
12
  # ---------------------------------------------------
13
  class Tasks(Enum):
14
- # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
  task0 = Task("anli_r1", "acc", "ANLI")
16
  task1 = Task("logiqa", "acc_norm", "LogiQA")
17
 
18
- NUM_FEWSHOT = 0 # Change with your few shot
19
- # ---------------------------------------------------
20
 
 
 
21
 
22
 
23
  # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
 
 
25
 
26
  # What does your leaderboard evaluate?
27
  INTRODUCTION_TEXT = """
28
- Intro text
29
  """
30
 
31
  # Which evaluations are you running? how can people reproduce what you have?
32
- LLM_BENCHMARKS_TEXT = f"""
33
- ## How it works
34
-
35
- ## Reproducibility
36
- To reproduce our results, here is the commands you can run:
37
 
38
  """
39
 
 
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
4
+
5
  @dataclass
6
  class Task:
7
  benchmark: str
 
12
  # Select your tasks here
13
  # ---------------------------------------------------
14
  class Tasks(Enum):
15
+ # task_key in the json file, metric_key in the json file, name to display in the leaderboard
16
  task0 = Task("anli_r1", "acc", "ANLI")
17
  task1 = Task("logiqa", "acc_norm", "LogiQA")
18
 
 
 
19
 
20
+ NUM_FEWSHOT = 0 # Change with your few shot
21
+ # ---------------------------------------------------
22
 
23
 
24
  # Your leaderboard name
25
+ TITLE = """<h1 align="center" id="space-title">🤗 LLM Leaderboard for CRM</h1>
26
+ <h3>Assess which LLMs are accurate enough or need fine-tuning, and weigh this versus tradeoffs of speed, costs, and trust and safety. This is based on human manual and automated evaluation with real operational CRM data per use case.</h3>
27
+ """
28
 
29
  # What does your leaderboard evaluate?
30
  INTRODUCTION_TEXT = """
31
+
32
  """
33
 
34
  # Which evaluations are you running? how can people reproduce what you have?
35
+ LLM_BENCHMARKS_TEXT = """
 
 
 
 
36
 
37
  """
38
 
src/display/css_html_js.py CHANGED
@@ -33,7 +33,7 @@ custom_css = """
33
  background: none;
34
  border: none;
35
  }
36
-
37
  #search-bar {
38
  padding: 0px;
39
  }
@@ -77,7 +77,7 @@ table th:first-child {
77
  #filter_type label > .wrap{
78
  width: 103px;
79
  }
80
- #filter_type label > .wrap .wrap-inner{
81
  padding: 2px;
82
  }
83
  #filter_type label > .wrap .wrap-inner input{
 
33
  background: none;
34
  border: none;
35
  }
36
+
37
  #search-bar {
38
  padding: 0px;
39
  }
 
77
  #filter_type label > .wrap{
78
  width: 103px;
79
  }
80
+ #filter_type label > .wrap .wrap-inner{
81
  padding: 2px;
82
  }
83
  #filter_type label > .wrap .wrap-inner input{
src/display/utils.py CHANGED
@@ -5,6 +5,7 @@ import pandas as pd
5
 
6
  from src.about import Tasks
7
 
 
8
  def fields(raw_class):
9
  return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
 
@@ -20,29 +21,66 @@ class ColumnContent:
20
  hidden: bool = False
21
  never_hidden: bool = False
22
 
 
23
  ## Leaderboard columns
24
  auto_eval_column_dict = []
25
  # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
- auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
- #Scores
29
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
- for task in Tasks:
31
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  # Model information
33
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
 
43
  # We use make dataclass to dynamically fill the scores from Tasks
44
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
45
 
 
46
  ## For the queue columns in the submission tab
47
  @dataclass(frozen=True)
48
  class EvalQueueColumn: # Queue column
@@ -53,12 +91,13 @@ class EvalQueueColumn: # Queue column
53
  weight_type = ColumnContent("weight_type", "str", "Original")
54
  status = ColumnContent("status", "str", True)
55
 
 
56
  ## All the model information that we might need
57
  @dataclass
58
  class ModelDetails:
59
  name: str
60
  display_name: str = ""
61
- symbol: str = "" # emoji
62
 
63
 
64
  class ModelType(Enum):
@@ -83,18 +122,20 @@ class ModelType(Enum):
83
  return ModelType.IFT
84
  return ModelType.Unknown
85
 
 
86
  class WeightType(Enum):
87
  Adapter = ModelDetails("Adapter")
88
  Original = ModelDetails("Original")
89
  Delta = ModelDetails("Delta")
90
 
 
91
  class Precision(Enum):
92
  float16 = ModelDetails("float16")
93
  bfloat16 = ModelDetails("bfloat16")
94
  float32 = ModelDetails("float32")
95
- #qt_8bit = ModelDetails("8bit")
96
- #qt_4bit = ModelDetails("4bit")
97
- #qt_GPTQ = ModelDetails("GPTQ")
98
  Unknown = ModelDetails("?")
99
 
100
  def from_str(precision):
@@ -104,14 +145,15 @@ class Precision(Enum):
104
  return Precision.bfloat16
105
  if precision in ["float32"]:
106
  return Precision.float32
107
- #if precision in ["8bit"]:
108
  # return Precision.qt_8bit
109
- #if precision in ["4bit"]:
110
  # return Precision.qt_4bit
111
- #if precision in ["GPTQ", "None"]:
112
  # return Precision.qt_GPTQ
113
  return Precision.Unknown
114
 
 
115
  # Column selection
116
  COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
117
  TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
 
5
 
6
  from src.about import Tasks
7
 
8
+
9
  def fields(raw_class):
10
  return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
11
 
 
21
  hidden: bool = False
22
  never_hidden: bool = False
23
 
24
+
25
  ## Leaderboard columns
26
  auto_eval_column_dict = []
27
  # Init
28
+ # auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
29
+ auto_eval_column_dict.append(
30
+ ["model", ColumnContent, ColumnContent("Model Name", "markdown", True, never_hidden=True)]
31
+ )
32
+ auto_eval_column_dict.append(
33
+ ["model_provider", ColumnContent, ColumnContent("LLM Provider", "markdown", True, never_hidden=True)]
34
+ )
35
+ auto_eval_column_dict.append(["use_case_name", ColumnContent, ColumnContent("Use Case Name", "markdown", True)])
36
+ auto_eval_column_dict.append(["use_case_type", ColumnContent, ColumnContent("Use Case Type", "markdown", True)])
37
+
38
+ auto_eval_column_dict.append(["accuracy_method", ColumnContent, ColumnContent("Accuracy Method", "markdown", True)])
39
+ # Accuracy metrics
40
+ auto_eval_column_dict.append(["accuracy_metric_average", ColumnContent, ColumnContent("Average", "markdown", True)])
41
+ auto_eval_column_dict.append(
42
+ [
43
+ "accuracy_metric_instruction_following",
44
+ ColumnContent,
45
+ ColumnContent("Instruction Following", "markdown", True),
46
+ ]
47
+ )
48
+ auto_eval_column_dict.append(
49
+ ["accuracy_metric_completeness", ColumnContent, ColumnContent("Completeness", "markdown", True)]
50
+ )
51
+ auto_eval_column_dict.append(
52
+ ["accuracy_metric_conciseness", ColumnContent, ColumnContent("Conciseness", "markdown", True)]
53
+ )
54
+ auto_eval_column_dict.append(
55
+ ["accuracy_metric_factuality", ColumnContent, ColumnContent("Factuality", "markdown", True)]
56
+ )
57
+ # Speed (Latency) metrics
58
+
59
+
60
+ # Cost metrics
61
+
62
+ # Trust & Safety metrics
63
+
64
+
65
+ # Scores
66
+ # auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
67
+ # for task in Tasks:
68
+ # auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
69
  # Model information
70
+ # auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
71
+ # auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
72
+ # auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
73
+ # auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
74
+ # auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
75
+ # auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
76
+ # auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
77
+ # auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
78
+ # auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
79
 
80
  # We use make dataclass to dynamically fill the scores from Tasks
81
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
82
 
83
+
84
  ## For the queue columns in the submission tab
85
  @dataclass(frozen=True)
86
  class EvalQueueColumn: # Queue column
 
91
  weight_type = ColumnContent("weight_type", "str", "Original")
92
  status = ColumnContent("status", "str", True)
93
 
94
+
95
  ## All the model information that we might need
96
  @dataclass
97
  class ModelDetails:
98
  name: str
99
  display_name: str = ""
100
+ symbol: str = "" # emoji
101
 
102
 
103
  class ModelType(Enum):
 
122
  return ModelType.IFT
123
  return ModelType.Unknown
124
 
125
+
126
  class WeightType(Enum):
127
  Adapter = ModelDetails("Adapter")
128
  Original = ModelDetails("Original")
129
  Delta = ModelDetails("Delta")
130
 
131
+
132
  class Precision(Enum):
133
  float16 = ModelDetails("float16")
134
  bfloat16 = ModelDetails("bfloat16")
135
  float32 = ModelDetails("float32")
136
+ # qt_8bit = ModelDetails("8bit")
137
+ # qt_4bit = ModelDetails("4bit")
138
+ # qt_GPTQ = ModelDetails("GPTQ")
139
  Unknown = ModelDetails("?")
140
 
141
  def from_str(precision):
 
145
  return Precision.bfloat16
146
  if precision in ["float32"]:
147
  return Precision.float32
148
+ # if precision in ["8bit"]:
149
  # return Precision.qt_8bit
150
+ # if precision in ["4bit"]:
151
  # return Precision.qt_4bit
152
+ # if precision in ["GPTQ", "None"]:
153
  # return Precision.qt_GPTQ
154
  return Precision.Unknown
155
 
156
+
157
  # Column selection
158
  COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
159
  TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
src/envs.py CHANGED
@@ -4,9 +4,9 @@ from huggingface_hub import HfApi
4
 
5
  # Info to change for your repository
6
  # ----------------------------------
7
- TOKEN = os.environ.get("TOKEN") # A read/write token for your org
8
 
9
- OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
  # ----------------------------------
11
 
12
  REPO_ID = f"{OWNER}/leaderboard"
@@ -14,11 +14,14 @@ QUEUE_REPO = f"{OWNER}/requests"
14
  RESULTS_REPO = f"{OWNER}/results"
15
 
16
  # If you setup a cache later, just change HF_HOME
17
- CACHE_PATH=os.getenv("HF_HOME", ".")
18
 
19
  # Local caches
20
  EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
  EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
 
 
 
22
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
 
 
4
 
5
  # Info to change for your repository
6
  # ----------------------------------
7
+ TOKEN = os.environ.get("TOKEN") # A read/write token for your org
8
 
9
+ OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
  # ----------------------------------
11
 
12
  REPO_ID = f"{OWNER}/leaderboard"
 
14
  RESULTS_REPO = f"{OWNER}/results"
15
 
16
  # If you setup a cache later, just change HF_HOME
17
+ CACHE_PATH = os.getenv("HF_HOME", ".")
18
 
19
  # Local caches
20
  EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
  EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
+
23
+ CRM_RESULTS_PATH = os.path.join(CACHE_PATH, "crm-results")
24
+
25
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
26
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
27
 
src/leaderboard/read_evals.py CHANGED
@@ -1,6 +1,5 @@
1
  import glob
2
  import json
3
- import math
4
  import os
5
  from dataclasses import dataclass
6
 
@@ -8,28 +7,29 @@ import dateutil
8
  import numpy as np
9
 
10
  from src.display.formatting import make_clickable_model
11
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
- from src.submission.check_validity import is_model_on_hub
 
13
 
14
 
15
  @dataclass
16
  class EvalResult:
17
- """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
- """
19
- eval_name: str # org_model_precision (uid)
20
- full_model: str # org/model (path on hub)
21
- org: str
22
  model: str
23
- revision: str # commit hash, "" if main
24
  results: dict
25
  precision: Precision = Precision.Unknown
26
- model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
- weight_type: WeightType = WeightType.Original # Original or Adapter
28
- architecture: str = "Unknown"
29
  license: str = "?"
30
  likes: int = 0
31
  num_params: int = 0
32
- date: str = "" # submission date of request file
33
  still_on_hub: bool = False
34
 
35
  @classmethod
@@ -57,14 +57,14 @@ class EvalResult:
57
  result_key = f"{org}_{model}_{precision.value.name}"
58
  full_model = "/".join(org_and_model)
59
 
60
- still_on_hub, _, model_config = is_model_on_hub(
61
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
62
- )
63
  architecture = "?"
64
- if model_config is not None:
65
- architectures = getattr(model_config, "architectures", None)
66
- if architectures:
67
- architecture = ";".join(architectures)
68
 
69
  # Extract results available in this file (some results are split in several files)
70
  results = {}
@@ -85,10 +85,10 @@ class EvalResult:
85
  org=org,
86
  model=model,
87
  results=results,
88
- precision=precision,
89
- revision= config.get("model_sha", ""),
90
- still_on_hub=still_on_hub,
91
- architecture=architecture
92
  )
93
 
94
  def update_with_request_file(self, requests_path):
@@ -105,7 +105,9 @@ class EvalResult:
105
  self.num_params = request.get("params", 0)
106
  self.date = request.get("submitted_time", "")
107
  except Exception:
108
- print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
 
 
109
 
110
  def to_dict(self):
111
  """Converts the Eval Result to a dict compatible with our dataframe display"""
@@ -146,10 +148,7 @@ def get_request_file_for_model(requests_path, model_name, precision):
146
  for tmp_request_file in request_files:
147
  with open(tmp_request_file, "r") as f:
148
  req_content = json.load(f)
149
- if (
150
- req_content["status"] in ["FINISHED"]
151
- and req_content["precision"] == precision.split(".")[-1]
152
- ):
153
  request_file = tmp_request_file
154
  return request_file
155
 
@@ -188,7 +187,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
188
  results = []
189
  for v in eval_results.values():
190
  try:
191
- v.to_dict() # we test if the dict version is complete
192
  results.append(v)
193
  except KeyError: # not all eval values present
194
  continue
 
1
  import glob
2
  import json
 
3
  import os
4
  from dataclasses import dataclass
5
 
 
7
  import numpy as np
8
 
9
  from src.display.formatting import make_clickable_model
10
+ from src.display.utils import AutoEvalColumn, ModelType, Precision, Tasks, WeightType
11
+
12
+ # from src.submission.check_validity import is_model_on_hub
13
 
14
 
15
  @dataclass
16
  class EvalResult:
17
+ """Represents one full evaluation. Built from a combination of the result and request file for a given run."""
18
+
19
+ eval_name: str # org_model_precision (uid)
20
+ full_model: str # org/model (path on hub)
21
+ org: str
22
  model: str
23
+ revision: str # commit hash, "" if main
24
  results: dict
25
  precision: Precision = Precision.Unknown
26
+ model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
+ weight_type: WeightType = WeightType.Original # Original or Adapter
28
+ architecture: str = "Unknown"
29
  license: str = "?"
30
  likes: int = 0
31
  num_params: int = 0
32
+ date: str = "" # submission date of request file
33
  still_on_hub: bool = False
34
 
35
  @classmethod
 
57
  result_key = f"{org}_{model}_{precision.value.name}"
58
  full_model = "/".join(org_and_model)
59
 
60
+ # still_on_hub, _, model_config = is_model_on_hub(
61
+ # full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
62
+ # )
63
  architecture = "?"
64
+ # if model_config is not None:
65
+ # architectures = getattr(model_config, "architectures", None)
66
+ # if architectures:
67
+ # architecture = ";".join(architectures)
68
 
69
  # Extract results available in this file (some results are split in several files)
70
  results = {}
 
85
  org=org,
86
  model=model,
87
  results=results,
88
+ precision=precision,
89
+ revision=config.get("model_sha", ""),
90
+ still_on_hub=False,
91
+ architecture=architecture,
92
  )
93
 
94
  def update_with_request_file(self, requests_path):
 
105
  self.num_params = request.get("params", 0)
106
  self.date = request.get("submitted_time", "")
107
  except Exception:
108
+ print(
109
+ f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}"
110
+ )
111
 
112
  def to_dict(self):
113
  """Converts the Eval Result to a dict compatible with our dataframe display"""
 
148
  for tmp_request_file in request_files:
149
  with open(tmp_request_file, "r") as f:
150
  req_content = json.load(f)
151
+ if req_content["status"] in ["FINISHED"] and req_content["precision"] == precision.split(".")[-1]:
 
 
 
152
  request_file = tmp_request_file
153
  return request_file
154
 
 
187
  results = []
188
  for v in eval_results.values():
189
  try:
190
+ v.to_dict() # we test if the dict version is complete
191
  results.append(v)
192
  except KeyError: # not all eval values present
193
  continue
src/populate.py CHANGED
@@ -8,6 +8,19 @@ from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
  from src.leaderboard.read_evals import get_raw_eval_results
9
 
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
  """Creates a dataframe from all the individual experiment results"""
13
  raw_data = get_raw_eval_results(results_path, requests_path)
 
8
  from src.leaderboard.read_evals import get_raw_eval_results
9
 
10
 
11
+ def get_leaderboard_df_crm(crm_results_path: str, cols: list) -> pd.DataFrame:
12
+ """Creates a dataframe from all the individual experiment results"""
13
+ leaderboard_accuracy_df = pd.read_csv(os.path.join(crm_results_path, "hf_leaderboard_accuracy.csv"))
14
+ # leaderboard_accuracy_df = leaderboard_accuracy_df.sort_values(
15
+ # by=[AutoEvalColumn.accuracy_metric_average.name], ascending=False
16
+ # )
17
+ # print(leaderboard_accuracy_df)
18
+ # print(leaderboard_accuracy_df.columns)
19
+ # print(leaderboard_accuracy_df["Model Name"].nunique())
20
+ leaderboard_accuracy_df = leaderboard_accuracy_df[cols].round(decimals=2)
21
+ return leaderboard_accuracy_df
22
+
23
+
24
  def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
25
  """Creates a dataframe from all the individual experiment results"""
26
  raw_data = get_raw_eval_results(results_path, requests_path)
src/submission/check_validity.py DELETED
@@ -1,99 +0,0 @@
1
- import json
2
- import os
3
- import re
4
- from collections import defaultdict
5
- from datetime import datetime, timedelta, timezone
6
-
7
- import huggingface_hub
8
- from huggingface_hub import ModelCard
9
- from huggingface_hub.hf_api import ModelInfo
10
- from transformers import AutoConfig
11
- from transformers.models.auto.tokenization_auto import AutoTokenizer
12
-
13
- def check_model_card(repo_id: str) -> tuple[bool, str]:
14
- """Checks if the model card and license exist and have been filled"""
15
- try:
16
- card = ModelCard.load(repo_id)
17
- except huggingface_hub.utils.EntryNotFoundError:
18
- return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
-
20
- # Enforce license metadata
21
- if card.data.license is None:
22
- if not ("license_name" in card.data and "license_link" in card.data):
23
- return False, (
24
- "License not found. Please add a license to your model card using the `license` metadata or a"
25
- " `license_name`/`license_link` pair."
26
- )
27
-
28
- # Enforce card content
29
- if len(card.text) < 200:
30
- return False, "Please add a description to your model card, it is too short."
31
-
32
- return True, ""
33
-
34
- def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
35
- """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
- try:
37
- config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
38
- if test_tokenizer:
39
- try:
40
- tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
41
- except ValueError as e:
42
- return (
43
- False,
44
- f"uses a tokenizer which is not in a transformers release: {e}",
45
- None
46
- )
47
- except Exception as e:
48
- return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
49
- return True, None, config
50
-
51
- except ValueError:
52
- return (
53
- False,
54
- "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
55
- None
56
- )
57
-
58
- except Exception as e:
59
- return False, "was not found on hub!", None
60
-
61
-
62
- def get_model_size(model_info: ModelInfo, precision: str):
63
- """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
64
- try:
65
- model_size = round(model_info.safetensors["total"] / 1e9, 3)
66
- except (AttributeError, TypeError):
67
- return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
68
-
69
- size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
70
- model_size = size_factor * model_size
71
- return model_size
72
-
73
- def get_model_arch(model_info: ModelInfo):
74
- """Gets the model architecture from the configuration"""
75
- return model_info.config.get("architectures", "Unknown")
76
-
77
- def already_submitted_models(requested_models_dir: str) -> set[str]:
78
- """Gather a list of already submitted models to avoid duplicates"""
79
- depth = 1
80
- file_names = []
81
- users_to_submission_dates = defaultdict(list)
82
-
83
- for root, _, files in os.walk(requested_models_dir):
84
- current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
85
- if current_depth == depth:
86
- for file in files:
87
- if not file.endswith(".json"):
88
- continue
89
- with open(os.path.join(root, file), "r") as f:
90
- info = json.load(f)
91
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
92
-
93
- # Select organisation
94
- if info["model"].count("/") == 0 or "submitted_time" not in info:
95
- continue
96
- organisation, _ = info["model"].split("/")
97
- users_to_submission_dates[organisation].append(info["submitted_time"])
98
-
99
- return set(file_names), users_to_submission_dates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/submit.py DELETED
@@ -1,119 +0,0 @@
1
- import json
2
- import os
3
- from datetime import datetime, timezone
4
-
5
- from src.display.formatting import styled_error, styled_message, styled_warning
6
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
- from src.submission.check_validity import (
8
- already_submitted_models,
9
- check_model_card,
10
- get_model_size,
11
- is_model_on_hub,
12
- )
13
-
14
- REQUESTED_MODELS = None
15
- USERS_TO_SUBMISSION_DATES = None
16
-
17
- def add_new_eval(
18
- model: str,
19
- base_model: str,
20
- revision: str,
21
- precision: str,
22
- weight_type: str,
23
- model_type: str,
24
- ):
25
- global REQUESTED_MODELS
26
- global USERS_TO_SUBMISSION_DATES
27
- if not REQUESTED_MODELS:
28
- REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
-
30
- user_name = ""
31
- model_path = model
32
- if "/" in model:
33
- user_name = model.split("/")[0]
34
- model_path = model.split("/")[1]
35
-
36
- precision = precision.split(" ")[0]
37
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
-
39
- if model_type is None or model_type == "":
40
- return styled_error("Please select a model type.")
41
-
42
- # Does the model actually exist?
43
- if revision == "":
44
- revision = "main"
45
-
46
- # Is the model on the hub?
47
- if weight_type in ["Delta", "Adapter"]:
48
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
- if not base_model_on_hub:
50
- return styled_error(f'Base model "{base_model}" {error}')
51
-
52
- if not weight_type == "Adapter":
53
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
- if not model_on_hub:
55
- return styled_error(f'Model "{model}" {error}')
56
-
57
- # Is the model info correctly filled?
58
- try:
59
- model_info = API.model_info(repo_id=model, revision=revision)
60
- except Exception:
61
- return styled_error("Could not get your model information. Please fill it up properly.")
62
-
63
- model_size = get_model_size(model_info=model_info, precision=precision)
64
-
65
- # Were the model card and license filled?
66
- try:
67
- license = model_info.cardData["license"]
68
- except Exception:
69
- return styled_error("Please select a license for your model")
70
-
71
- modelcard_OK, error_msg = check_model_card(model)
72
- if not modelcard_OK:
73
- return styled_error(error_msg)
74
-
75
- # Seems good, creating the eval
76
- print("Adding new eval")
77
-
78
- eval_entry = {
79
- "model": model,
80
- "base_model": base_model,
81
- "revision": revision,
82
- "precision": precision,
83
- "weight_type": weight_type,
84
- "status": "PENDING",
85
- "submitted_time": current_time,
86
- "model_type": model_type,
87
- "likes": model_info.likes,
88
- "params": model_size,
89
- "license": license,
90
- "private": False,
91
- }
92
-
93
- # Check for duplicate submission
94
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
95
- return styled_warning("This model has been already submitted.")
96
-
97
- print("Creating eval file")
98
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
- os.makedirs(OUT_DIR, exist_ok=True)
100
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
-
102
- with open(out_path, "w") as f:
103
- f.write(json.dumps(eval_entry))
104
-
105
- print("Uploading eval file")
106
- API.upload_file(
107
- path_or_fileobj=out_path,
108
- path_in_repo=out_path.split("eval-queue/")[1],
109
- repo_id=QUEUE_REPO,
110
- repo_type="dataset",
111
- commit_message=f"Add {model} to eval queue",
112
- )
113
-
114
- # Remove the local file
115
- os.remove(out_path)
116
-
117
- return styled_message(
118
- "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
119
- )