Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- opencompass/configs/datasets/ARC_e/ARC_e_gen.py +4 -0
- opencompass/configs/datasets/ARC_e/ARC_e_gen_1e0de5.py +44 -0
- opencompass/configs/datasets/ARC_e/ARC_e_ppl.py +4 -0
- opencompass/configs/datasets/ARC_e/ARC_e_ppl_2ef631.py +37 -0
- opencompass/configs/datasets/ARC_e/ARC_e_ppl_a450bd.py +54 -0
- opencompass/configs/datasets/ARC_e/ARC_e_ppl_d52a21.py +34 -0
- opencompass/configs/datasets/CLUE_C3/CLUE_C3_gen.py +4 -0
- opencompass/configs/datasets/CLUE_C3/CLUE_C3_gen_8c358f.py +51 -0
- opencompass/configs/datasets/CLUE_C3/CLUE_C3_ppl.py +4 -0
- opencompass/configs/datasets/CLUE_C3/CLUE_C3_ppl_56b537.py +36 -0
- opencompass/configs/datasets/CLUE_C3/CLUE_C3_ppl_e24a31.py +37 -0
- opencompass/configs/datasets/ChemBench/ChemBench_gen.py +77 -0
- opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen.py +4 -0
- opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen_b90e4a.py +75 -0
- opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl.py +4 -0
- opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_7d1c07.py +43 -0
- opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_d10e8a.py +48 -0
- opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_fff486.py +48 -0
- opencompass/configs/datasets/MathBench/deprecated_mathbench_2024_gen_de9ff9.py +108 -0
- opencompass/configs/datasets/MathBench/deprecated_mathbench_agent_gen_48ec47.py +128 -0
- opencompass/configs/datasets/MathBench/deprecated_mathbench_agent_gen_fbe13b.py +130 -0
- opencompass/configs/datasets/MathBench/deprecated_mathbench_arith_gen_ccd638.py +58 -0
- opencompass/configs/datasets/MathBench/deprecated_mathbench_cot_gen_66f329.py +110 -0
- opencompass/configs/datasets/MathBench/mathbench_2024_gen_fc2a24.py +81 -0
- opencompass/configs/datasets/MathBench/mathbench_2024_wocircular_gen_1dc21d.py +81 -0
- opencompass/configs/datasets/MathBench/mathbench_2024_wocircular_mixed_8eb12b.py +81 -0
- opencompass/configs/datasets/MathBench/mathbench_gen.py +4 -0
- opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen.py +4 -0
- opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen_68aac7.py +43 -0
- opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl.py +4 -0
- opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_50f8f6.py +34 -0
- opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_66caf3.py +53 -0
- opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen.py +4 -0
- opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_68aac7.py +43 -0
- opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl.py +4 -0
- opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_50f8f6.py +34 -0
- opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_66caf3.py +53 -0
- opencompass/configs/datasets/TheoremQA/README.md +69 -0
- opencompass/configs/datasets/TheoremQA/TheoremQA_5shot_gen_6f0af8.py +45 -0
- opencompass/configs/datasets/TheoremQA/TheoremQA_few_shot_examples.py +22 -0
- opencompass/configs/datasets/TheoremQA/TheoremQA_few_shot_examples_official.py +22 -0
- opencompass/configs/datasets/TheoremQA/TheoremQA_gen.py +4 -0
- opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_gen_424e0a.py +39 -0
- opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_gen_7009de.py +44 -0
- opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_gen_ef26ca.py +44 -0
- opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_post_v2_gen_2c2583.py +38 -0
- opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_post_v2_gen_ef26ca.py +45 -0
- opencompass/configs/datasets/XCOPA/XCOPA_ppl.py +4 -0
- opencompass/configs/datasets/XCOPA/XCOPA_ppl_54058d.py +31 -0
- opencompass/configs/datasets/commonsenseqa/commonsenseqa_7shot_cot_gen_734a22.py +115 -0
opencompass/configs/datasets/ARC_e/ARC_e_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .ARC_e_gen_1e0de5 import ARC_e_datasets # noqa: F401, F403
|
opencompass/configs/datasets/ARC_e/ARC_e_gen_1e0de5.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import ARCDataset
|
6 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
7 |
+
|
8 |
+
ARC_e_reader_cfg = dict(
|
9 |
+
input_columns=['question', 'textA', 'textB', 'textC', 'textD'],
|
10 |
+
output_column='answerKey')
|
11 |
+
|
12 |
+
ARC_e_infer_cfg = dict(
|
13 |
+
prompt_template=dict(
|
14 |
+
type=PromptTemplate,
|
15 |
+
template=dict(
|
16 |
+
round=[
|
17 |
+
dict(
|
18 |
+
role='HUMAN',
|
19 |
+
prompt=
|
20 |
+
'Question: {question}\nA. {textA}\nB. {textB}\nC. {textC}\nD. {textD}\nAnswer:'
|
21 |
+
)
|
22 |
+
], ),
|
23 |
+
),
|
24 |
+
retriever=dict(type=ZeroRetriever),
|
25 |
+
inferencer=dict(type=GenInferencer),
|
26 |
+
)
|
27 |
+
|
28 |
+
ARC_e_eval_cfg = dict(
|
29 |
+
evaluator=dict(type=AccEvaluator),
|
30 |
+
pred_role='BOT',
|
31 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
|
32 |
+
)
|
33 |
+
|
34 |
+
ARC_e_datasets = [
|
35 |
+
dict(
|
36 |
+
abbr='ARC-e',
|
37 |
+
type=ARCDataset,
|
38 |
+
path='opencompass/ai2_arc-easy-dev',
|
39 |
+
name='ARC-Easy',
|
40 |
+
reader_cfg=ARC_e_reader_cfg,
|
41 |
+
infer_cfg=ARC_e_infer_cfg,
|
42 |
+
eval_cfg=ARC_e_eval_cfg,
|
43 |
+
)
|
44 |
+
]
|
opencompass/configs/datasets/ARC_e/ARC_e_ppl.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .ARC_e_ppl_a450bd import ARC_e_datasets # noqa: F401, F403
|
opencompass/configs/datasets/ARC_e/ARC_e_ppl_2ef631.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import ARCDataset
|
6 |
+
|
7 |
+
ARC_e_reader_cfg = dict(
|
8 |
+
input_columns=['question', 'textA', 'textB', 'textC', 'textD'],
|
9 |
+
output_column='answerKey')
|
10 |
+
|
11 |
+
ARC_e_infer_cfg = dict(
|
12 |
+
prompt_template=dict(
|
13 |
+
type=PromptTemplate,
|
14 |
+
template={
|
15 |
+
opt: dict(
|
16 |
+
round=[
|
17 |
+
dict(role='HUMAN', prompt=f'{{question}}\nA. {{textA}}\nB. {{textB}}\nC. {{textC}}\nD. {{textD}}'),
|
18 |
+
dict(role='BOT', prompt=f'Answer: {opt}'),
|
19 |
+
]
|
20 |
+
) for opt in ['A', 'B', 'C', 'D']
|
21 |
+
},
|
22 |
+
),
|
23 |
+
retriever=dict(type=ZeroRetriever),
|
24 |
+
inferencer=dict(type=PPLInferencer))
|
25 |
+
|
26 |
+
ARC_e_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
27 |
+
|
28 |
+
ARC_e_datasets = [
|
29 |
+
dict(
|
30 |
+
type=ARCDataset,
|
31 |
+
abbr='ARC-e',
|
32 |
+
path='opencompass/ai2_arc-easy-dev',
|
33 |
+
name='ARC-Easy',
|
34 |
+
reader_cfg=ARC_e_reader_cfg,
|
35 |
+
infer_cfg=ARC_e_infer_cfg,
|
36 |
+
eval_cfg=ARC_e_eval_cfg)
|
37 |
+
]
|
opencompass/configs/datasets/ARC_e/ARC_e_ppl_a450bd.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import ARCDataset
|
6 |
+
|
7 |
+
ARC_e_reader_cfg = dict(
|
8 |
+
input_columns=['question', 'textA', 'textB', 'textC', 'textD'],
|
9 |
+
output_column='answerKey')
|
10 |
+
|
11 |
+
ARC_e_infer_cfg = dict(
|
12 |
+
prompt_template=dict(
|
13 |
+
type=PromptTemplate,
|
14 |
+
template={
|
15 |
+
'A':
|
16 |
+
dict(
|
17 |
+
round=[
|
18 |
+
dict(role='HUMAN', prompt='Question: {question}\nAnswer: '),
|
19 |
+
dict(role='BOT', prompt='{textA}')
|
20 |
+
], ),
|
21 |
+
'B':
|
22 |
+
dict(
|
23 |
+
round=[
|
24 |
+
dict(role='HUMAN', prompt='Question: {question}\nAnswer: '),
|
25 |
+
dict(role='BOT', prompt='{textB}')
|
26 |
+
], ),
|
27 |
+
'C':
|
28 |
+
dict(
|
29 |
+
round=[
|
30 |
+
dict(role='HUMAN', prompt='Question: {question}\nAnswer: '),
|
31 |
+
dict(role='BOT', prompt='{textC}')
|
32 |
+
], ),
|
33 |
+
'D':
|
34 |
+
dict(
|
35 |
+
round=[
|
36 |
+
dict(role='HUMAN', prompt='Question: {question}\nAnswer: '),
|
37 |
+
dict(role='BOT', prompt='{textD}')
|
38 |
+
], ),
|
39 |
+
}),
|
40 |
+
retriever=dict(type=ZeroRetriever),
|
41 |
+
inferencer=dict(type=PPLInferencer))
|
42 |
+
|
43 |
+
ARC_e_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
44 |
+
|
45 |
+
ARC_e_datasets = [
|
46 |
+
dict(
|
47 |
+
type=ARCDataset,
|
48 |
+
abbr='ARC-e',
|
49 |
+
path='opencompass/ai2_arc-easy-dev',
|
50 |
+
name='ARC-Easy',
|
51 |
+
reader_cfg=ARC_e_reader_cfg,
|
52 |
+
infer_cfg=ARC_e_infer_cfg,
|
53 |
+
eval_cfg=ARC_e_eval_cfg)
|
54 |
+
]
|
opencompass/configs/datasets/ARC_e/ARC_e_ppl_d52a21.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import ARCDataset
|
6 |
+
|
7 |
+
ARC_e_reader_cfg = dict(
|
8 |
+
input_columns=['question', 'textA', 'textB', 'textC', 'textD'],
|
9 |
+
output_column='answerKey')
|
10 |
+
|
11 |
+
ARC_e_infer_cfg = dict(
|
12 |
+
prompt_template=dict(
|
13 |
+
type=PromptTemplate,
|
14 |
+
template={
|
15 |
+
'A': 'Question: {question}\nAnswer: {textA}',
|
16 |
+
'B': 'Question: {question}\nAnswer: {textB}',
|
17 |
+
'C': 'Question: {question}\nAnswer: {textC}',
|
18 |
+
'D': 'Question: {question}\nAnswer: {textD}'
|
19 |
+
}),
|
20 |
+
retriever=dict(type=ZeroRetriever),
|
21 |
+
inferencer=dict(type=PPLInferencer))
|
22 |
+
|
23 |
+
ARC_e_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
24 |
+
|
25 |
+
ARC_e_datasets = [
|
26 |
+
dict(
|
27 |
+
type=ARCDataset,
|
28 |
+
abbr='ARC-e',
|
29 |
+
path='opencompass/ai2_arc-easy-dev',
|
30 |
+
name='ARC-Easy',
|
31 |
+
reader_cfg=ARC_e_reader_cfg,
|
32 |
+
infer_cfg=ARC_e_infer_cfg,
|
33 |
+
eval_cfg=ARC_e_eval_cfg)
|
34 |
+
]
|
opencompass/configs/datasets/CLUE_C3/CLUE_C3_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .CLUE_C3_gen_8c358f import C3_datasets # noqa: F401, F403
|
opencompass/configs/datasets/CLUE_C3/CLUE_C3_gen_8c358f.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import C3Dataset_V2
|
6 |
+
from opencompass.utils.text_postprocessors import first_capital_postprocess
|
7 |
+
|
8 |
+
C3_reader_cfg = dict(
|
9 |
+
input_columns=[
|
10 |
+
'question',
|
11 |
+
'content',
|
12 |
+
'choice0',
|
13 |
+
'choice1',
|
14 |
+
'choice2',
|
15 |
+
'choice3',
|
16 |
+
'choices',
|
17 |
+
],
|
18 |
+
output_column='label',
|
19 |
+
)
|
20 |
+
|
21 |
+
C3_infer_cfg = dict(
|
22 |
+
prompt_template=dict(
|
23 |
+
type=PromptTemplate,
|
24 |
+
template=dict(round=[
|
25 |
+
dict(
|
26 |
+
role='HUMAN',
|
27 |
+
prompt=
|
28 |
+
'{content}\n问:{question}\nA. {choice0}\nB. {choice1}\nC. {choice2}\nD. {choice3}\n请从“A”,“B”,“C”,“D”中进行选择。\n答:',
|
29 |
+
),
|
30 |
+
]),
|
31 |
+
),
|
32 |
+
retriever=dict(type=ZeroRetriever),
|
33 |
+
inferencer=dict(type=GenInferencer),
|
34 |
+
)
|
35 |
+
|
36 |
+
C3_eval_cfg = dict(
|
37 |
+
evaluator=dict(type=AccEvaluator),
|
38 |
+
pred_role='BOT',
|
39 |
+
pred_postprocessor=dict(type=first_capital_postprocess),
|
40 |
+
)
|
41 |
+
|
42 |
+
C3_datasets = [
|
43 |
+
dict(
|
44 |
+
abbr='C3',
|
45 |
+
type=C3Dataset_V2,
|
46 |
+
path='./data/CLUE/C3/dev_0.json',
|
47 |
+
reader_cfg=C3_reader_cfg,
|
48 |
+
infer_cfg=C3_infer_cfg,
|
49 |
+
eval_cfg=C3_eval_cfg,
|
50 |
+
)
|
51 |
+
]
|
opencompass/configs/datasets/CLUE_C3/CLUE_C3_ppl.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .CLUE_C3_ppl_e24a31 import C3_datasets # noqa: F401, F403
|
opencompass/configs/datasets/CLUE_C3/CLUE_C3_ppl_56b537.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import C3Dataset
|
6 |
+
|
7 |
+
C3_reader_cfg = dict(
|
8 |
+
input_columns=[
|
9 |
+
'question', 'content', 'choice0', 'choice1', 'choice2', 'choice3',
|
10 |
+
'choices'
|
11 |
+
],
|
12 |
+
output_column='label')
|
13 |
+
|
14 |
+
C3_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template={
|
18 |
+
0: '文章:{content}\n问题:{question}\n答案:{choice0}',
|
19 |
+
1: '文章:{content}\n问题:{question}\n答案:{choice1}',
|
20 |
+
2: '文章:{content}\n问题:{question}\n答案:{choice2}',
|
21 |
+
3: '文章:{content}\n问题:{question}\n答案:{choice3}'
|
22 |
+
}),
|
23 |
+
retriever=dict(type=ZeroRetriever),
|
24 |
+
inferencer=dict(type=PPLInferencer))
|
25 |
+
|
26 |
+
C3_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
27 |
+
|
28 |
+
C3_datasets = [
|
29 |
+
dict(
|
30 |
+
type=C3Dataset,
|
31 |
+
abbr='C3',
|
32 |
+
path='./data/CLUE/C3/dev_0.json',
|
33 |
+
reader_cfg=C3_reader_cfg,
|
34 |
+
infer_cfg=C3_infer_cfg,
|
35 |
+
eval_cfg=C3_eval_cfg)
|
36 |
+
]
|
opencompass/configs/datasets/CLUE_C3/CLUE_C3_ppl_e24a31.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import C3Dataset
|
6 |
+
|
7 |
+
C3_reader_cfg = dict(
|
8 |
+
input_columns=[
|
9 |
+
'question', 'content', 'choice0', 'choice1', 'choice2', 'choice3',
|
10 |
+
'choices'
|
11 |
+
],
|
12 |
+
output_column='label')
|
13 |
+
|
14 |
+
C3_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template={
|
18 |
+
i: dict(round=[
|
19 |
+
dict(role='HUMAN', prompt='文章:{content}\n问题:{question}'),
|
20 |
+
dict(role='BOT', prompt=f'答案:{{choice{i}}}')
|
21 |
+
])
|
22 |
+
for i in range(4)
|
23 |
+
}),
|
24 |
+
retriever=dict(type=ZeroRetriever),
|
25 |
+
inferencer=dict(type=PPLInferencer))
|
26 |
+
|
27 |
+
C3_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
28 |
+
|
29 |
+
C3_datasets = [
|
30 |
+
dict(
|
31 |
+
type=C3Dataset,
|
32 |
+
abbr='C3',
|
33 |
+
path='./data/CLUE/C3/dev_0.json',
|
34 |
+
reader_cfg=C3_reader_cfg,
|
35 |
+
infer_cfg=C3_infer_cfg,
|
36 |
+
eval_cfg=C3_eval_cfg)
|
37 |
+
]
|
opencompass/configs/datasets/ChemBench/ChemBench_gen.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import FixKRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import ChemBenchDataset
|
6 |
+
from opencompass.utils.text_postprocessors import first_capital_postprocess
|
7 |
+
|
8 |
+
|
9 |
+
chembench_reader_cfg = dict(
|
10 |
+
input_columns=['input', 'A', 'B', 'C', 'D'],
|
11 |
+
output_column='target',
|
12 |
+
train_split='dev')
|
13 |
+
|
14 |
+
chembench_all_sets = [
|
15 |
+
'Name_Conversion',
|
16 |
+
'Property_Prediction',
|
17 |
+
'Mol2caption',
|
18 |
+
'Caption2mol',
|
19 |
+
'Product_Prediction',
|
20 |
+
'Retrosynthesis',
|
21 |
+
'Yield_Prediction',
|
22 |
+
'Temperature_Prediction',
|
23 |
+
'Solvent_Prediction'
|
24 |
+
]
|
25 |
+
|
26 |
+
|
27 |
+
chembench_datasets = []
|
28 |
+
for _name in chembench_all_sets:
|
29 |
+
# _hint = f'There is a single choice question about {_name.replace("_", " ")}. Answer the question by replying A, B, C or D.'
|
30 |
+
_hint = f'There is a single choice question about chemistry. Answer the question by replying A, B, C or D.'
|
31 |
+
|
32 |
+
chembench_infer_cfg = dict(
|
33 |
+
ice_template=dict(
|
34 |
+
type=PromptTemplate,
|
35 |
+
template=dict(round=[
|
36 |
+
dict(
|
37 |
+
role='HUMAN',
|
38 |
+
prompt=
|
39 |
+
f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
|
40 |
+
),
|
41 |
+
dict(role='BOT', prompt='{target}\n')
|
42 |
+
]),
|
43 |
+
),
|
44 |
+
prompt_template=dict(
|
45 |
+
type=PromptTemplate,
|
46 |
+
template=dict(
|
47 |
+
begin='</E>',
|
48 |
+
round=[
|
49 |
+
dict(
|
50 |
+
role='HUMAN',
|
51 |
+
prompt=
|
52 |
+
f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
|
53 |
+
),
|
54 |
+
],
|
55 |
+
),
|
56 |
+
ice_token='</E>',
|
57 |
+
),
|
58 |
+
retriever=dict(type=FixKRetriever, fix_id_list=[0, 1, 2, 3, 4]),
|
59 |
+
inferencer=dict(type=GenInferencer),
|
60 |
+
)
|
61 |
+
|
62 |
+
chembench_eval_cfg = dict(
|
63 |
+
evaluator=dict(type=AccEvaluator),
|
64 |
+
pred_postprocessor=dict(type=first_capital_postprocess))
|
65 |
+
|
66 |
+
chembench_datasets.append(
|
67 |
+
dict(
|
68 |
+
abbr=f'ChemBench_{_name}',
|
69 |
+
type=ChemBenchDataset,
|
70 |
+
path='opencompass/ChemBench',
|
71 |
+
name=_name,
|
72 |
+
reader_cfg=chembench_reader_cfg,
|
73 |
+
infer_cfg=chembench_infer_cfg,
|
74 |
+
eval_cfg=chembench_eval_cfg,
|
75 |
+
))
|
76 |
+
|
77 |
+
del _name, _hint
|
opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .FewCLUE_tnews_gen_b90e4a import tnews_datasets # noqa: F401, F403
|
opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen_b90e4a.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import TNewsDatasetV2
|
6 |
+
from opencompass.utils.text_postprocessors import first_capital_postprocess
|
7 |
+
|
8 |
+
tnews_reader_cfg = dict(
|
9 |
+
input_columns='sentence',
|
10 |
+
output_column='label_desc2',
|
11 |
+
)
|
12 |
+
|
13 |
+
tnews_labels = [
|
14 |
+
'农业新闻', # news_agriculture
|
15 |
+
'旅游新闻', # news_travel
|
16 |
+
'游戏新闻', # news_game
|
17 |
+
'科技类别公司新闻', # news_tech
|
18 |
+
'体育类别新闻', # news_sports
|
19 |
+
'初升高教育新闻', # news_edu
|
20 |
+
'娱乐圈新闻', # news_entertainment
|
21 |
+
'投资资讯', # news_finance
|
22 |
+
'军事类别常识', # news_military
|
23 |
+
'车辆新闻', # news_car
|
24 |
+
'楼市新闻', # news_house
|
25 |
+
'环球不含中国类别新闻', # news_world
|
26 |
+
'书籍文化历史类别新闻', # news_culture
|
27 |
+
'故事类别新闻', # news_story
|
28 |
+
'股票市场类别新闻', # news_stock
|
29 |
+
]
|
30 |
+
_tnews_options_list_str = '\n'.join(f'{chr(ord("A") + i)}. {tnews_labels[i]}'
|
31 |
+
for i in range(len(tnews_labels)))
|
32 |
+
_tnews_options_range_str = ','.join(f'“{chr(ord("A") + i)}”'
|
33 |
+
for i in range(len(tnews_labels)))
|
34 |
+
|
35 |
+
tnews_infer_cfg = dict(
|
36 |
+
prompt_template=dict(
|
37 |
+
type=PromptTemplate,
|
38 |
+
template=dict(round=[
|
39 |
+
dict(
|
40 |
+
role='HUMAN',
|
41 |
+
prompt=
|
42 |
+
f'{{sentence}}\n请判断上述内容属于什么新闻?\n{_tnews_options_list_str}\n请从{_tnews_options_range_str}中进行选择。\n答:',
|
43 |
+
),
|
44 |
+
]),
|
45 |
+
),
|
46 |
+
retriever=dict(type=ZeroRetriever),
|
47 |
+
inferencer=dict(type=GenInferencer),
|
48 |
+
)
|
49 |
+
|
50 |
+
tnews_eval_cfg = dict(
|
51 |
+
evaluator=dict(type=AccEvaluator),
|
52 |
+
pred_role='BOT',
|
53 |
+
pred_postprocessor=dict(type=first_capital_postprocess),
|
54 |
+
)
|
55 |
+
|
56 |
+
tnews_datasets = [
|
57 |
+
dict(
|
58 |
+
abbr='tnews-dev',
|
59 |
+
type=TNewsDatasetV2,
|
60 |
+
path='./data/FewCLUE/tnews/dev_few_all.json',
|
61 |
+
reader_cfg=tnews_reader_cfg,
|
62 |
+
infer_cfg=tnews_infer_cfg,
|
63 |
+
eval_cfg=tnews_eval_cfg,
|
64 |
+
),
|
65 |
+
dict(
|
66 |
+
abbr='tnews-test',
|
67 |
+
type=TNewsDatasetV2,
|
68 |
+
path='./data/FewCLUE/tnews/test_public.json',
|
69 |
+
reader_cfg=tnews_reader_cfg,
|
70 |
+
infer_cfg=tnews_infer_cfg,
|
71 |
+
eval_cfg=tnews_eval_cfg,
|
72 |
+
),
|
73 |
+
]
|
74 |
+
|
75 |
+
del _tnews_options_list_str, _tnews_options_range_str
|
opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .FewCLUE_tnews_ppl_d10e8a import tnews_datasets # noqa: F401, F403
|
opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_7d1c07.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import TNewsDataset
|
6 |
+
|
7 |
+
tnews_reader_cfg = dict(input_columns='sentence', output_column='label_desc2')
|
8 |
+
|
9 |
+
tnews_labels = [
|
10 |
+
'农业新闻', '旅游新闻', '游戏新闻', '科技类别公司新闻', '体育类别新闻', '初升高教育新闻', '娱乐圈新闻', '投资资讯',
|
11 |
+
'军事类别常识', '车辆新闻', '楼市新闻', '环球不含中国类别新闻', '书籍文化历史类别新闻', '故事类别新闻', '股票市场类别新闻'
|
12 |
+
]
|
13 |
+
|
14 |
+
tnews_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template={lb: f'{{sentence}}这篇新闻属于:{lb}'
|
18 |
+
for lb in tnews_labels}),
|
19 |
+
retriever=dict(type=ZeroRetriever),
|
20 |
+
inferencer=dict(type=PPLInferencer))
|
21 |
+
|
22 |
+
tnews_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
23 |
+
|
24 |
+
tnews_datasets = [
|
25 |
+
dict(
|
26 |
+
type=TNewsDataset,
|
27 |
+
path='json',
|
28 |
+
abbr='tnews-dev',
|
29 |
+
data_files='./data/FewCLUE/tnews/dev_few_all.json',
|
30 |
+
split='train',
|
31 |
+
reader_cfg=tnews_reader_cfg,
|
32 |
+
infer_cfg=tnews_infer_cfg,
|
33 |
+
eval_cfg=tnews_eval_cfg),
|
34 |
+
dict(
|
35 |
+
type=TNewsDataset,
|
36 |
+
path='json',
|
37 |
+
abbr='tnews-test',
|
38 |
+
data_files='./data/FewCLUE/tnews/test_public.json',
|
39 |
+
split='train',
|
40 |
+
reader_cfg=tnews_reader_cfg,
|
41 |
+
infer_cfg=tnews_infer_cfg,
|
42 |
+
eval_cfg=tnews_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_d10e8a.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import TNewsDataset
|
6 |
+
|
7 |
+
tnews_reader_cfg = dict(input_columns='sentence', output_column='label_desc2')
|
8 |
+
|
9 |
+
tnews_labels = [
|
10 |
+
'农业新闻', '旅游新闻', '游戏新闻', '科技类别公司新闻', '体育类别新闻', '初升高教育新闻', '娱乐圈新闻', '投资资讯',
|
11 |
+
'军事类别常识', '车辆新闻', '楼市新闻', '环球不含中国类别新闻', '书籍文化历史类别新闻', '故事类别新闻', '股票市场类别新闻'
|
12 |
+
]
|
13 |
+
|
14 |
+
tnews_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template={
|
18 |
+
lb: dict(round=[
|
19 |
+
dict(role='HUMAN', prompt='{sentence}\n上述内容属于什么新闻?'),
|
20 |
+
dict(role='BOT', prompt=lb)
|
21 |
+
])
|
22 |
+
for lb in tnews_labels
|
23 |
+
}),
|
24 |
+
retriever=dict(type=ZeroRetriever),
|
25 |
+
inferencer=dict(type=PPLInferencer))
|
26 |
+
|
27 |
+
tnews_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
28 |
+
|
29 |
+
tnews_datasets = [
|
30 |
+
dict(
|
31 |
+
type=TNewsDataset,
|
32 |
+
path='json',
|
33 |
+
abbr='tnews-dev',
|
34 |
+
data_files='./data/FewCLUE/tnews/dev_few_all.json',
|
35 |
+
split='train',
|
36 |
+
reader_cfg=tnews_reader_cfg,
|
37 |
+
infer_cfg=tnews_infer_cfg,
|
38 |
+
eval_cfg=tnews_eval_cfg),
|
39 |
+
dict(
|
40 |
+
type=TNewsDataset,
|
41 |
+
path='json',
|
42 |
+
abbr='tnews-test',
|
43 |
+
data_files='./data/FewCLUE/tnews/test_public.json',
|
44 |
+
split='train',
|
45 |
+
reader_cfg=tnews_reader_cfg,
|
46 |
+
infer_cfg=tnews_infer_cfg,
|
47 |
+
eval_cfg=tnews_eval_cfg)
|
48 |
+
]
|
opencompass/configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_fff486.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import TNewsDataset
|
6 |
+
|
7 |
+
tnews_reader_cfg = dict(input_columns='sentence', output_column='label_desc2')
|
8 |
+
|
9 |
+
tnews_labels = [
|
10 |
+
'农业新闻', '旅游新闻', '游戏新闻', '科技类别公司新闻', '体育类别新闻', '初升高教育新闻', '娱乐圈新闻', '投资资讯',
|
11 |
+
'军事类别常识', '车辆新闻', '楼市新闻', '环球不含中国类别新闻', '书籍文化历史类别新闻', '故事类别新闻', '股票市场类别新闻'
|
12 |
+
]
|
13 |
+
|
14 |
+
tnews_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template={
|
18 |
+
lb: dict(round=[
|
19 |
+
dict(role='HUMAN', prompt='以下内容属于什么新闻:{sentence}。'),
|
20 |
+
dict(role='BOT', prompt=lb)
|
21 |
+
])
|
22 |
+
for lb in tnews_labels
|
23 |
+
}),
|
24 |
+
retriever=dict(type=ZeroRetriever),
|
25 |
+
inferencer=dict(type=PPLInferencer))
|
26 |
+
|
27 |
+
tnews_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
28 |
+
|
29 |
+
tnews_datasets = [
|
30 |
+
dict(
|
31 |
+
type=TNewsDataset,
|
32 |
+
path='json',
|
33 |
+
abbr='tnews-dev',
|
34 |
+
data_files='./data/FewCLUE/tnews/dev_few_all.json',
|
35 |
+
split='train',
|
36 |
+
reader_cfg=tnews_reader_cfg,
|
37 |
+
infer_cfg=tnews_infer_cfg,
|
38 |
+
eval_cfg=tnews_eval_cfg),
|
39 |
+
dict(
|
40 |
+
type=TNewsDataset,
|
41 |
+
path='json',
|
42 |
+
abbr='tnews-test',
|
43 |
+
data_files='./data/FewCLUE/tnews/test_public.json',
|
44 |
+
split='train',
|
45 |
+
reader_cfg=tnews_reader_cfg,
|
46 |
+
infer_cfg=tnews_infer_cfg,
|
47 |
+
eval_cfg=tnews_eval_cfg)
|
48 |
+
]
|
opencompass/configs/datasets/MathBench/deprecated_mathbench_2024_gen_de9ff9.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
5 |
+
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
6 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
7 |
+
|
8 |
+
single_choice_prompts = {
|
9 |
+
'single_choice_cn_with_reasoning': '以下是一道关于数学的单项选择题,请你一步一步推理,并在最后用“所以答案为选项X”给出答案,其中“X”为选项A,B,C,D中你认为正确的选项。下面是你要回答的问题\n{question}\n让我们一步一步思考:\n',
|
10 |
+
'single_choice_cn': '以下是一道关于数学的单项选择题,请你直接回答正确答案的选项序号。\n下面是你要回答的题目:\n{question}\n答案选项:',
|
11 |
+
'single_choice_en_with_reasoning': "Here is a multiple-choice question about mathematics. Please reason through it step by step, and at the end, provide your answer option with 'Therefore, the correct answer is option X', Where 'X' is the correct option you think from A,B,C,D. Here is the question you need to answer:\n{question}\nLet's think step by step:",
|
12 |
+
'single_choice_en': 'Here is a multiple-choice question about mathematics. Please provide the correct answer option directly.\nHere is the question you need to answer:\n{question}\nAnswer option:',
|
13 |
+
}
|
14 |
+
|
15 |
+
cloze_prompts = {
|
16 |
+
'cloze_cn': [
|
17 |
+
dict(role='HUMAN', prompt='Q: 林中有15棵树。林务工人员今天将在林中种植树木。完成后,将有21棵树。林务工人员今天种植了多少棵树?'),
|
18 |
+
dict(role='BOT', prompt='A: 我们从15棵树开始。后来有21棵树。差值必定是他们种植的树木数量。所以,他们必须种植了21 - 15 = 6棵树。答案是 6\n'),
|
19 |
+
dict(role='HUMAN', prompt='Q: 如果停车场有3辆车,又有2辆车进来,停车场里有多少辆车?'),
|
20 |
+
dict(role='BOT', prompt='A: 停车场已经有3辆车。又进来了2辆车。现在有3 + 2 = 5辆车。答案是 5\n'),
|
21 |
+
dict(role='HUMAN', prompt='Q: 黎恩有32块巧克力,她的妹妹有42块。如果他们吃了35块,他们总共剩下多少块?'),
|
22 |
+
dict(role='BOT', prompt='A: 黎恩有32块巧克力,Leah的妹妹有42块。这意味着原本有32 + 42 = 74块巧克力。被吃掉了35块。所以他们总共还剩下74 - 35 = 39块巧克力。答案是 39\n'),
|
23 |
+
dict(role='HUMAN', prompt='Q: 杰森有20个棒棒糖。他给丹妮一些棒棒糖。现在Jason只剩下12个棒棒糖。杰森给丹妮多少个棒棒糖?'),
|
24 |
+
dict(role='BOT', prompt='A: 杰森有20个棒棒糖。因为他现在只剩下12个,所以他必须把剩下的都给了丹妮。他给丹妮的棒棒糖数量必定是20 - 12 = 8个。答案是 8\n'),
|
25 |
+
dict(role='HUMAN', prompt='Q: 莎莎有五个玩具。在圣诞节,他从他的爸爸和妈妈那里各得到了两个玩具。现在他有多少个玩具?'),
|
26 |
+
dict(role='BOT', prompt='A: 她有5个玩具。他从妈妈那里得到了2个,所以之后他有5 + 2 = 7个玩具。然后他从爸爸那里得到了2个,所以总共他有7 + 2 = 9个玩具。答案是 9\n'),
|
27 |
+
dict(role='HUMAN', prompt='Q: 服务器房间里有九台电脑。从周一到周四每天增加五台电脑。现在服务器房里有多少台电脑?'),
|
28 |
+
dict(role='BOT', prompt='A: 从周一到周四有4天。每天增加5台电脑。这意味着总共增加了4 * 5 = 20台电脑。一开始有9台电脑,所以现在有9 + 20 = 29台电脑。答案是 29\n'),
|
29 |
+
dict(role='HUMAN', prompt='Q: 迈克尔有58个高尔夫球。星期二,他丢失了23个高尔夫球。星期三,他又丢失了2个。星期三结束时他还剩下多少个高尔夫球?'),
|
30 |
+
dict(role='BOT', prompt='A: 迈克尔一开始有58个球。星期二他丢失了23个,所以之后他还剩下58 - 23 = 35个球。星期三他又丢失了2个,所以现在他还剩下35 - 2 = 33个球。答案是 33\n'),
|
31 |
+
dict(role='HUMAN', prompt='Q: 奥利弗有23美元。她用每个3美元的价格买了五个百吉饼。她还剩下多少钱?'),
|
32 |
+
dict(role='BOT', prompt='A: 她以每个3美元的价格买了5个百吉饼。这意味着她在百吉饼上花费了5 * 3 = 15美元。她一开始有23美元,所以现在她还剩下23 - 15 = 8美元。答案是 8\n'),
|
33 |
+
dict(role='HUMAN', prompt='Q: {question}'),
|
34 |
+
dict(role='BOT', prompt='A: {answer}'),
|
35 |
+
],
|
36 |
+
'cloze_en': [
|
37 |
+
dict(role='HUMAN', prompt='Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?'),
|
38 |
+
dict(role='BOT', prompt='A: We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted. So, they must have planted 21 - 15 = 6 trees. The answer is 6.\n'),
|
39 |
+
dict(role='HUMAN', prompt='Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?'),
|
40 |
+
dict(role='BOT', prompt='A: There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.\n'),
|
41 |
+
dict(role='HUMAN', prompt='Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?'),
|
42 |
+
dict(role='BOT', prompt="A: Leah had 32 chocolates and Leah's sister had 42. That means there were originally 32 + 42 = 74 chocolates. 35 have been eaten. So in total they still have 74 - 35 = 39 chocolates. The answer is 39.\n"),
|
43 |
+
dict(role='HUMAN', prompt='Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?'),
|
44 |
+
dict(role='BOT', prompt='A: Jason had 20 lollipops. Since he only has 12 now, he must have given the rest to Denny. The number of lollipops he has given to Denny must have been 20 - 12 = 8 lollipops. The answer is 8.\n'),
|
45 |
+
dict(role='HUMAN', prompt='Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?'),
|
46 |
+
dict(role='BOT', prompt='A: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.\n'),
|
47 |
+
dict(role='HUMAN', prompt='Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?'),
|
48 |
+
dict(role='BOT', prompt='A: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.\n'),
|
49 |
+
dict(role='HUMAN', prompt='Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?'),
|
50 |
+
dict(role='BOT', prompt='A: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.\n'),
|
51 |
+
dict(role='HUMAN', prompt='Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?'),
|
52 |
+
dict(role='BOT', prompt='A: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.\n'),
|
53 |
+
dict(role='HUMAN', prompt='Q: {question}'),
|
54 |
+
dict(role='BOT', prompt='A: {answer}\n'),
|
55 |
+
]}
|
56 |
+
|
57 |
+
mathbench_sets = {
|
58 |
+
'college': ['single_choice_cn', 'single_choice_en'],
|
59 |
+
'high': ['single_choice_cn', 'single_choice_en'],
|
60 |
+
'middle': ['single_choice_cn', 'single_choice_en'],
|
61 |
+
'primary': ['cloze_cn', 'cloze_en'],
|
62 |
+
'calculate': ['cloze_en'],
|
63 |
+
}
|
64 |
+
|
65 |
+
# Generate reasoning path or not, only for single choice
|
66 |
+
with_reasoning = True
|
67 |
+
|
68 |
+
# Use circular evaluation or not
|
69 |
+
with_circular_eval = True
|
70 |
+
|
71 |
+
mathbench_datasets = []
|
72 |
+
|
73 |
+
for _split in list(mathbench_sets.keys()):
|
74 |
+
for _name in mathbench_sets[_split]:
|
75 |
+
mathbench_infer_cfg = dict(
|
76 |
+
prompt_template=dict(
|
77 |
+
type=PromptTemplate,
|
78 |
+
template=dict(
|
79 |
+
round=[
|
80 |
+
dict(
|
81 |
+
role='HUMAN',
|
82 |
+
prompt=single_choice_prompts[_name + '_with_reasoning'] if with_reasoning else single_choice_prompts[_name],
|
83 |
+
),
|
84 |
+
dict(role='BOT', prompt='{answer}')] if 'choice' in _name else cloze_prompts[_name],
|
85 |
+
),
|
86 |
+
),
|
87 |
+
retriever=dict(type=ZeroRetriever),
|
88 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
89 |
+
)
|
90 |
+
|
91 |
+
mathbench_eval_cfg = dict(
|
92 |
+
evaluator=dict(type=CircularEvaluator if 'choice' in _name and with_circular_eval else AccEvaluator),
|
93 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD') if 'single_choice' in _name else dict(type=mathbench_postprocess, name=_name))
|
94 |
+
|
95 |
+
mathbench_datasets.append(
|
96 |
+
dict(
|
97 |
+
abbr='mathbench-' + _split + '-' + _name,
|
98 |
+
type=MathBenchDataset,
|
99 |
+
path=f'./data/mathbench/{_split}',
|
100 |
+
name=_name,
|
101 |
+
with_circular=with_circular_eval,
|
102 |
+
reader_cfg=dict(
|
103 |
+
input_columns=['question'],
|
104 |
+
output_column='answer'
|
105 |
+
),
|
106 |
+
infer_cfg=mathbench_infer_cfg,
|
107 |
+
eval_cfg=mathbench_eval_cfg,
|
108 |
+
))
|
opencompass/configs/datasets/MathBench/deprecated_mathbench_agent_gen_48ec47.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import AgentInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
5 |
+
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
6 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
7 |
+
|
8 |
+
|
9 |
+
PROMPT_EN = {
|
10 |
+
'FEWSHOT_INSTRUCTION_CLOZE' : [
|
11 |
+
dict(role='HUMAN', prompt='Mark\'s basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What\'s the total number of points scored by both teams added together?'),
|
12 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\ndef solution():\n mark_pointers_2 = 25 * 2\n mark_pointers_3 = 8 * 3\n mark_free_throws = 10 * 1\n mark_points_scored = mark_pointers_2 + mark_pointers_3 + mark_free_throws\n opponents_pointers_2 = mark_pointers_2 * 2\n opponents_pointers_3 = mark_pointers_3 / 2\n opponents_free_throws = mark_free_throws / 2\n opponents_points_scored = opponents_pointers_2 + opponents_pointers_3 + opponents_free_throws\n total_points_scored = mark_points_scored + opponents_points_scored\n result = total_points_scored\n return result\n```'),
|
13 |
+
dict(role='SYSTEM', prompt='Response:210'),
|
14 |
+
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 210'),
|
15 |
+
|
16 |
+
dict(role='HUMAN', prompt='Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?'),
|
17 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\ndef solution():\n marbles = 60\n num_increased_marbles = marbles * 2 / 5\n num_total_marbles = marbles + num_increased_marbles\n frisbees = marbles / 2\n num_increased_frisbees = frisbees * 2 / 5\n num_total_frisbees = frisbees + num_increased_frisbees\n deck_cards = frisbees - 20\n num_increased_deck_cards = deck_cards * 2 / 5\n num_total_deck_cards = deck_cards + num_increased_deck_cards\n num_total = num_total_marbles + num_total_frisbees + num_total_deck_cards\n result = num_total\n return result\n```'),
|
18 |
+
dict(role='SYSTEM', prompt='Response:140'),
|
19 |
+
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 140'),
|
20 |
+
|
21 |
+
dict(role='HUMAN', prompt='A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?'),
|
22 |
+
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:```python\ndef solution():\n num_fruits_per_first_three_basket = 9 + 15 + 14\n num_fruits_first_three_basket = num_fruits_per_first_three_basket * 3\n num_apple_fourth_basket = 9 - 2\n num_orange_fourth_basket = 15 - 2\n num_banana_fourth_basket = 14 - 2\n num_fruits_fourth_basket = num_apple_fourth_basket + num_orange_fourth_basket + num_banana_fourth_basket\n num_fruits_total = num_fruits_first_three_basket + num_fruits_fourth_basket\n result = num_fruits_total\n return result\n```"""),
|
23 |
+
dict(role='SYSTEM', prompt='Response:146'),
|
24 |
+
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 146'),
|
25 |
+
|
26 |
+
dict(role='HUMAN', prompt='{question}'),
|
27 |
+
],
|
28 |
+
'FEWSHOT_INSTRUCTION_CHOICE' : [
|
29 |
+
dict(role='HUMAN', prompt='Given point P(-1,4) lies on the graph of the inverse proportionality function $y=\\frac{{k}}{{x}}$ (k≠0), what is the value of k? A. $-\\frac{{1}}{{4}}$ B. $\\frac{{1}}{{4}}$ C. $4$ D. $-4$'),
|
30 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\nfrom sympy import solve, symbols, Ne, Eq\ndef solution():\n k = symbols(\'k\')\n result = solve([Eq(4, k / (-1)), Ne(k, 0)], k, dict=True)\n return result\n```'),
|
31 |
+
dict(role='SYSTEM', prompt='Response:{{Eq(k, -4)}}'),
|
32 |
+
dict(role='BOT', prompt='Thought: Substituting point P into the function yields the value of k as $-4$\nFinalAnswer: D'),
|
33 |
+
|
34 |
+
dict(role='HUMAN', prompt='The graph of the power function $y=(x)$ passes through the point$ (2, \\dfrac {{1}}{{4}}) $, what is the value of $f(-3)$? A. $\\frac{{1}}{{9}}$ B. $\\frac{{1}}{{8}})=196-x$ C. $\\frac{{2}}{{9}}$ D. $\\frac{{1}}{{4}}$'),
|
35 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\nfrom sympy import *\ndef solution():\n x, y, k = symbols(\'x y k\')\n eq1 = Eq(2**k, Rational(1, 4))\n k_value = solve(eq1, k)[0]\n y = x**k_value\n result = y.subs(x, -3)\n return result\n```'),
|
36 |
+
dict(role='SYSTEM', prompt='Response:1/9'),
|
37 |
+
dict(role='BOT', prompt='Thought: The functional expression of the power function is $y=x^{{-2}}$. Substituting $x=-3$ yields $y=$\\frac{{1}}{{9}}$\nFinalAnswer: A'),
|
38 |
+
|
39 |
+
dict(role='HUMAN', prompt='If $3 x-y=12$, what is the value of $\\frac{8^{x}}{2^{y}} ?$\nA. The value cannot be determined from the information given.\nB. $2^{12}$\nC. 4\nD. $8^{2}$'),
|
40 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\nfrom sympy import symbols, Eq, solve\n\ndef sloution():\n x, y = symbols(\'x y\')\n equation = Eq(3*x - y, 12)\n\n y_in_terms_of_x = solve(equation, y)[0]\n expression = 8**x / 2**y_in_terms_of_x\n result = expression.simplify()\n return result\n```'),
|
41 |
+
dict(role='SYSTEM', prompt='Response:2**12'),
|
42 |
+
dict(role='BOT', prompt='Thought: The value of $\\frac{8^{x}}{2^{y}}$ is $2^{12}$\nFinalAnswer: B'),
|
43 |
+
|
44 |
+
dict(role='HUMAN', prompt='{question}'),
|
45 |
+
]
|
46 |
+
}
|
47 |
+
|
48 |
+
PROMPT_CN = {
|
49 |
+
'FEWSHOT_INSTRUCTION_CLOZE' : [
|
50 |
+
dict(role='HUMAN', prompt='Mark的篮球队得到25个2分球,8个3分球和10个罚球。他们的对手得到2分球的两倍,但3分球和罚球的一半。两队得分的总和是多少?'),
|
51 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\ndef solution():\n mark_pointers_2 = 25 * 2\n mark_pointers_3 = 8 * 3\n mark_free_throws = 10 * 1\n mark_points_scored = mark_pointers_2 + mark_pointers_3 + mark_free_throws\n opponents_pointers_2 = mark_pointers_2 * 2\n opponents_pointers_3 = mark_pointers_3 / 2\n opponents_free_throws = mark_free_throws / 2\n opponents_points_scored = opponents_pointers_2 + opponents_pointers_3 + opponents_free_throws\n total_points_scored = mark_points_scored + opponents_points_scored\n result = total_points_scored\n return result\n```'),
|
52 |
+
dict(role='SYSTEM', prompt='Response:210'),
|
53 |
+
dict(role='BOT', prompt='Thought: 根据回答,我得到了答案\nFinalAnswer: 210'),
|
54 |
+
|
55 |
+
dict(role='HUMAN', prompt='Bella有两倍于飞盘的弹珠。她还比卡片多20个飞盘。如果她买每种物品多2/5,她会有多少总数的物品,如果她现在有60颗弹珠?'),
|
56 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\ndef solution():\n marbles = 60\n num_increased_marbles = marbles * 2 / 5\n num_total_marbles = marbles + num_increased_marbles\n frisbees = marbles / 2\n num_increased_frisbees = frisbees * 2 / 5\n num_total_frisbees = frisbees + num_increased_frisbees\n deck_cards = frisbees - 20\n num_increased_deck_cards = deck_cards * 2 / 5\n num_total_deck_cards = deck_cards + num_increased_deck_cards\n num_total = num_total_marbles + num_total_frisbees + num_total_deck_cards\n result = num_total\n return result\n```'),
|
57 |
+
dict(role='SYSTEM', prompt='Response:140'),
|
58 |
+
dict(role='BOT', prompt='Thought: 根据回答,我得到了答案\nFinalAnswer: 140'),
|
59 |
+
|
60 |
+
dict(role='HUMAN', prompt='一个有4个水果篮子,前三个篮子里有9个苹果、15个橙子和14个香蕉,第四个篮子里每种水果都少2个。总共有多少水果?'),
|
61 |
+
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:```python\ndef solution():\n num_fruits_per_first_three_basket = 9 + 15 + 14\n num_fruits_first_three_basket = num_fruits_per_first_three_basket * 3\n num_apple_fourth_basket = 9 - 2\n num_orange_fourth_basket = 15 - 2\n num_banana_fourth_basket = 14 - 2\n num_fruits_fourth_basket = num_apple_fourth_basket + num_orange_fourth_basket + num_banana_fourth_basket\n num_fruits_total = num_fruits_first_three_basket + num_fruits_fourth_basket\n result = num_fruits_total\n return result\n```"""),
|
62 |
+
dict(role='SYSTEM', prompt='Response:146'),
|
63 |
+
dict(role='BOT', prompt='Thought: 根据回答,我得到了答案\nFinalAnswer: 146'),
|
64 |
+
|
65 |
+
dict(role='HUMAN', prompt='{question}'),
|
66 |
+
],
|
67 |
+
'FEWSHOT_INSTRUCTION_CHOICE' : [
|
68 |
+
dict(role='HUMAN', prompt='已知点P(-1,4)在反比例函数$y=\\frac{{k}}{{x}}$ (k≠0)的图象上,则k的值是____'),
|
69 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\nfrom sympy import solve, symbols, Ne, Eq\ndef solution():\n k = symbols(\'k\')\n result = solve([Eq(4, k / (-1)), Ne(k, 0)], k, dict=True)\n return result\n```'),
|
70 |
+
dict(role='SYSTEM', prompt='Response:{{Eq(k, -4)}}'),
|
71 |
+
dict(role='BOT', prompt='Thought: 将点 P 带入函数解出 k 的值为 $-4$\nFinalAnswer: D'),
|
72 |
+
|
73 |
+
dict(role='HUMAN', prompt='幂函数$ y=(x) $的图象经过点$ (2, \\dfrac {{1}}{{4}}) $,则$ f(-3) $的值为 ______ .'),
|
74 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\nfrom sympy import *\ndef solution():\n x, y, k = symbols(\'x y k\')\n eq1 = Eq(2**k, Rational(1, 4))\n k_value = solve(eq1, k)[0]\n y = x**k_value\n result = y.subs(x, -3)\n return result\n```'),
|
75 |
+
dict(role='SYSTEM', prompt='Response:1/9'),
|
76 |
+
dict(role='BOT', prompt='Thought: 求出幂函数的函数表达式为 $y=x^{{-2}}$,代入 $x=-3$ 得到 $y=$\\frac{{1}}{{9}}$\nFinalAnswer: A'),
|
77 |
+
|
78 |
+
dict(role='HUMAN', prompt='如果$3 x-y=12$,则$\\frac{8^{x}}{2^{y}}$的值是多少?\nA. 无法从给定的信息中确定值。\nB. $2^{12}$\nC. 4\nD. $8^{2}$'),
|
79 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\nfrom sympy import symbols, Eq, solve\n\ndef sloution():\n x, y = symbols(\'x y\')\n equation = Eq(3*x - y, 12)\n\n y_in_terms_of_x = solve(equation, y)[0]\n expression = 8**x / 2**y_in_terms_of_x\n result = expression.simplify()\n return result\n```'),
|
80 |
+
dict(role='SYSTEM', prompt='Response:2**12'),
|
81 |
+
dict(role='BOT', prompt='Thought: $\\frac{8^{x}}{2^{y}}$的值是$2^{12}$\nFinalAnswer: B'),
|
82 |
+
|
83 |
+
dict(role='HUMAN', prompt='{question}'),
|
84 |
+
]
|
85 |
+
}
|
86 |
+
|
87 |
+
mathbench_sets = {
|
88 |
+
'college': ['single_choice_cn', 'cloze_en'],
|
89 |
+
'high': ['single_choice_cn', 'single_choice_en'],
|
90 |
+
'middle': ['single_choice_cn'],
|
91 |
+
'primary': ['cloze_cn'],
|
92 |
+
'primary_refine': ['refine_cloze_cn']
|
93 |
+
}
|
94 |
+
|
95 |
+
# Use circular evaluation or not
|
96 |
+
with_circular_eval = True
|
97 |
+
|
98 |
+
mathbench_agent_datasets = []
|
99 |
+
|
100 |
+
for _split in list(mathbench_sets.keys()):
|
101 |
+
for _name in mathbench_sets[_split]:
|
102 |
+
prompt_example = PROMPT_CN if '_cn' in _name else PROMPT_EN
|
103 |
+
mathbench_infer_cfg = dict(
|
104 |
+
prompt_template=dict(type=PromptTemplate,
|
105 |
+
template=dict(
|
106 |
+
round = prompt_example['FEWSHOT_INSTRUCTION_CLOZE'] if 'cloze' in _name else prompt_example['FEWSHOT_INSTRUCTION_CHOICE'])),
|
107 |
+
retriever=dict(type=ZeroRetriever),
|
108 |
+
inferencer=dict(type=AgentInferencer)
|
109 |
+
)
|
110 |
+
|
111 |
+
mathbench_eval_cfg = dict(
|
112 |
+
evaluator=dict(type=CircularEvaluator if 'choice' in _name and with_circular_eval else AccEvaluator),
|
113 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD') if 'single_choice' in _name else dict(type=mathbench_postprocess, name=_name))
|
114 |
+
|
115 |
+
mathbench_agent_datasets.append(
|
116 |
+
dict(
|
117 |
+
abbr='mathbench-' + _split + '-' + _name + '-agent',
|
118 |
+
type=MathBenchDataset,
|
119 |
+
path=f'./data/mathbench/{_split}',
|
120 |
+
name=_name,
|
121 |
+
with_circular=with_circular_eval,
|
122 |
+
reader_cfg=dict(
|
123 |
+
input_columns=['question'],
|
124 |
+
output_column='answer'
|
125 |
+
),
|
126 |
+
infer_cfg=mathbench_infer_cfg,
|
127 |
+
eval_cfg=mathbench_eval_cfg,
|
128 |
+
))
|
opencompass/configs/datasets/MathBench/deprecated_mathbench_agent_gen_fbe13b.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# GONNA BE DEPRECATED, DON'T USE IT
|
2 |
+
|
3 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
4 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
5 |
+
from opencompass.openicl.icl_inferencer import AgentInferencer
|
6 |
+
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
7 |
+
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
8 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
9 |
+
|
10 |
+
|
11 |
+
PROMPT_EN = {
|
12 |
+
'FEWSHOT_INSTRUCTION_CLOZE' : [
|
13 |
+
dict(role='HUMAN', prompt='Mark\'s basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What\'s the total number of points scored by both teams added together?'),
|
14 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:def solution():\n mark_pointers_2 = 25 * 2\n mark_pointers_3 = 8 * 3\n mark_free_throws = 10 * 1\n mark_points_scored = mark_pointers_2 + mark_pointers_3 + mark_free_throws\n opponents_pointers_2 = mark_pointers_2 * 2\n opponents_pointers_3 = mark_pointers_3 / 2\n opponents_free_throws = mark_free_throws / 2\n opponents_points_scored = opponents_pointers_2 + opponents_pointers_3 + opponents_free_throws\n total_points_scored = mark_points_scored + opponents_points_scored\n result = total_points_scored\n return result'),
|
15 |
+
dict(role='SYSTEM', prompt='Response:210'),
|
16 |
+
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 210'),
|
17 |
+
|
18 |
+
dict(role='HUMAN', prompt='Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?'),
|
19 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:def solution():\n marbles = 60\n num_increased_marbles = marbles * 2 / 5\n num_total_marbles = marbles + num_increased_marbles\n frisbees = marbles / 2\n num_increased_frisbees = frisbees * 2 / 5\n num_total_frisbees = frisbees + num_increased_frisbees\n deck_cards = frisbees - 20\n num_increased_deck_cards = deck_cards * 2 / 5\n num_total_deck_cards = deck_cards + num_increased_deck_cards\n num_total = num_total_marbles + num_total_frisbees + num_total_deck_cards\n result = num_total\n return result'),
|
20 |
+
dict(role='SYSTEM', prompt='Response:140'),
|
21 |
+
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 140'),
|
22 |
+
|
23 |
+
dict(role='HUMAN', prompt='A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?'),
|
24 |
+
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:def solution():\n num_fruits_per_first_three_basket = 9 + 15 + 14\n num_fruits_first_three_basket = num_fruits_per_first_three_basket * 3\n num_apple_fourth_basket = 9 - 2\n num_orange_fourth_basket = 15 - 2\n num_banana_fourth_basket = 14 - 2\n num_fruits_fourth_basket = num_apple_fourth_basket + num_orange_fourth_basket + num_banana_fourth_basket\n num_fruits_total = num_fruits_first_three_basket + num_fruits_fourth_basket\n result = num_fruits_total\n return result"""),
|
25 |
+
dict(role='SYSTEM', prompt='Response:146'),
|
26 |
+
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 146'),
|
27 |
+
|
28 |
+
dict(role='HUMAN', prompt='{question}'),
|
29 |
+
],
|
30 |
+
'FEWSHOT_INSTRUCTION_CHOICE' : [
|
31 |
+
dict(role='HUMAN', prompt='Given point P(-1,4) lies on the graph of the inverse proportionality function $y=\\frac{{k}}{{x}}$ (k≠0), what is the value of k? A. $-\\frac{{1}}{{4}}$ B. $\\frac{{1}}{{4}}$ C. $4$ D. $-4$'),
|
32 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import solve, symbols, Ne, Eq\ndef solution():\n k = symbols(\'k\')\n result = solve([Eq(4, k / (-1)), Ne(k, 0)], k, dict=True)\n return result'),
|
33 |
+
dict(role='SYSTEM', prompt='Response:{{Eq(k, -4)}}'),
|
34 |
+
dict(role='BOT', prompt='Thought: Substituting point P into the function yields the value of k as $-4$\nFinalAnswer: D'),
|
35 |
+
|
36 |
+
dict(role='HUMAN', prompt='The graph of the power function $y=(x)$ passes through the point$ (2, \\dfrac {{1}}{{4}}) $, what is the value of $f(-3)$? A. $\\frac{{1}}{{9}}$ B. $\\frac{{1}}{{8}})=196-x$ C. $\\frac{{2}}{{9}}$ D. $\\frac{{1}}{{4}}$'),
|
37 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import *\ndef solution():\n x, y, k = symbols(\'x y k\')\n eq1 = Eq(2**k, Rational(1, 4))\n k_value = solve(eq1, k)[0]\n y = x**k_value\n result = y.subs(x, -3)\n return result'),
|
38 |
+
dict(role='SYSTEM', prompt='Response:1/9'),
|
39 |
+
dict(role='BOT', prompt='Thought: The functional expression of the power function is $y=x^{{-2}}$. Substituting $x=-3$ yields $y=$\\frac{{1}}{{9}}$\nFinalAnswer: A'),
|
40 |
+
|
41 |
+
dict(role='HUMAN', prompt='If $3 x-y=12$, what is the value of $\\frac{8^{x}}{2^{y}} ?$\nA. The value cannot be determined from the information given.\nB. $2^{12}$\nC. 4\nD. $8^{2}$'),
|
42 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import symbols, Eq, solve\n\ndef sloution():\n x, y = symbols(\'x y\')\n equation = Eq(3*x - y, 12)\n\n y_in_terms_of_x = solve(equation, y)[0]\n expression = 8**x / 2**y_in_terms_of_x\n result = expression.simplify()\n return result'),
|
43 |
+
dict(role='SYSTEM', prompt='Response:2**12'),
|
44 |
+
dict(role='BOT', prompt='Thought: The value of $\\frac{8^{x}}{2^{y}}$ is $2^{12}$\nFinalAnswer: B'),
|
45 |
+
|
46 |
+
dict(role='HUMAN', prompt='{question}'),
|
47 |
+
]
|
48 |
+
}
|
49 |
+
|
50 |
+
PROMPT_CN = {
|
51 |
+
'FEWSHOT_INSTRUCTION_CLOZE' : [
|
52 |
+
dict(role='HUMAN', prompt='Mark的篮球队得到25个2分球,8个3分球和10个罚球。他们的对手得到2分球的两倍,但3分球和罚球的一半。两队得分的总和是多少?'),
|
53 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:def solution():\n mark_pointers_2 = 25 * 2\n mark_pointers_3 = 8 * 3\n mark_free_throws = 10 * 1\n mark_points_scored = mark_pointers_2 + mark_pointers_3 + mark_free_throws\n opponents_pointers_2 = mark_pointers_2 * 2\n opponents_pointers_3 = mark_pointers_3 / 2\n opponents_free_throws = mark_free_throws / 2\n opponents_points_scored = opponents_pointers_2 + opponents_pointers_3 + opponents_free_throws\n total_points_scored = mark_points_scored + opponents_points_scored\n result = total_points_scored\n return result'),
|
54 |
+
dict(role='SYSTEM', prompt='Response:210'),
|
55 |
+
dict(role='BOT', prompt='Thought: 根据回答,我得到了答案\nFinalAnswer: 210'),
|
56 |
+
|
57 |
+
dict(role='HUMAN', prompt='Bella有两倍于飞盘的弹珠。她还比卡片多20个飞盘。如果她买每种物品多2/5,她会有多少总数的物品,如果她现在有60颗弹珠?'),
|
58 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:def solution():\n marbles = 60\n num_increased_marbles = marbles * 2 / 5\n num_total_marbles = marbles + num_increased_marbles\n frisbees = marbles / 2\n num_increased_frisbees = frisbees * 2 / 5\n num_total_frisbees = frisbees + num_increased_frisbees\n deck_cards = frisbees - 20\n num_increased_deck_cards = deck_cards * 2 / 5\n num_total_deck_cards = deck_cards + num_increased_deck_cards\n num_total = num_total_marbles + num_total_frisbees + num_total_deck_cards\n result = num_total\n return result'),
|
59 |
+
dict(role='SYSTEM', prompt='Response:140'),
|
60 |
+
dict(role='BOT', prompt='Thought: 根据回答,我得到了答案\nFinalAnswer: 140'),
|
61 |
+
|
62 |
+
dict(role='HUMAN', prompt='一个有4个水果篮子,前三个篮子里有9个苹果、15个橙子和14个香蕉,第四个篮子里每种水果都少2个。总共有多少水果?'),
|
63 |
+
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:def solution():\n num_fruits_per_first_three_basket = 9 + 15 + 14\n num_fruits_first_three_basket = num_fruits_per_first_three_basket * 3\n num_apple_fourth_basket = 9 - 2\n num_orange_fourth_basket = 15 - 2\n num_banana_fourth_basket = 14 - 2\n num_fruits_fourth_basket = num_apple_fourth_basket + num_orange_fourth_basket + num_banana_fourth_basket\n num_fruits_total = num_fruits_first_three_basket + num_fruits_fourth_basket\n result = num_fruits_total\n return result"""),
|
64 |
+
dict(role='SYSTEM', prompt='Response:146'),
|
65 |
+
dict(role='BOT', prompt='Thought: 根据回答,我得到了答案\nFinalAnswer: 146'),
|
66 |
+
|
67 |
+
dict(role='HUMAN', prompt='{question}'),
|
68 |
+
],
|
69 |
+
'FEWSHOT_INSTRUCTION_CHOICE' : [
|
70 |
+
dict(role='HUMAN', prompt='已知点P(-1,4)在反比例函数$y=\\frac{{k}}{{x}}$ (k≠0)的图象上,则k的值是____'),
|
71 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import solve, symbols, Ne, Eq\ndef solution():\n k = symbols(\'k\')\n result = solve([Eq(4, k / (-1)), Ne(k, 0)], k, dict=True)\n return result'),
|
72 |
+
dict(role='SYSTEM', prompt='Response:{{Eq(k, -4)}}'),
|
73 |
+
dict(role='BOT', prompt='Thought: 将点 P 带入函数解出 k 的值为 $-4$\nFinalAnswer: D'),
|
74 |
+
|
75 |
+
dict(role='HUMAN', prompt='幂函数$ y=(x) $的图象经过点$ (2, \\dfrac {{1}}{{4}}) $,则$ f(-3) $的值为 ______ .'),
|
76 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import *\ndef solution():\n x, y, k = symbols(\'x y k\')\n eq1 = Eq(2**k, Rational(1, 4))\n k_value = solve(eq1, k)[0]\n y = x**k_value\n result = y.subs(x, -3)\n return result'),
|
77 |
+
dict(role='SYSTEM', prompt='Response:1/9'),
|
78 |
+
dict(role='BOT', prompt='Thought: 求出幂函数的函数表达式为 $y=x^{{-2}}$,代入 $x=-3$ 得到 $y=$\\frac{{1}}{{9}}$\nFinalAnswer: A'),
|
79 |
+
|
80 |
+
dict(role='HUMAN', prompt='如果$3 x-y=12$,则$\\frac{8^{x}}{2^{y}}$的值是多少?\nA. 无法从给定的信息中确定值。\nB. $2^{12}$\nC. 4\nD. $8^{2}$'),
|
81 |
+
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import symbols, Eq, solve\n\ndef sloution():\n x, y = symbols(\'x y\')\n equation = Eq(3*x - y, 12)\n\n y_in_terms_of_x = solve(equation, y)[0]\n expression = 8**x / 2**y_in_terms_of_x\n result = expression.simplify()\n return result'),
|
82 |
+
dict(role='SYSTEM', prompt='Response:2**12'),
|
83 |
+
dict(role='BOT', prompt='Thought: $\\frac{8^{x}}{2^{y}}$的值是$2^{12}$\nFinalAnswer: B'),
|
84 |
+
|
85 |
+
dict(role='HUMAN', prompt='{question}'),
|
86 |
+
]
|
87 |
+
}
|
88 |
+
|
89 |
+
mathbench_sets = {
|
90 |
+
'college': ['single_choice_cn', 'cloze_en'],
|
91 |
+
'high': ['single_choice_cn', 'single_choice_en'],
|
92 |
+
'middle': ['single_choice_cn'],
|
93 |
+
'primary': ['cloze_cn'],
|
94 |
+
'primary_refine': ['refine_cloze_cn']
|
95 |
+
}
|
96 |
+
|
97 |
+
# Use circular evaluation or not
|
98 |
+
with_circular_eval = True
|
99 |
+
|
100 |
+
mathbench_agent_datasets = []
|
101 |
+
|
102 |
+
for _split in list(mathbench_sets.keys()):
|
103 |
+
for _name in mathbench_sets[_split]:
|
104 |
+
prompt_example = PROMPT_CN if '_cn' in _name else PROMPT_EN
|
105 |
+
mathbench_infer_cfg = dict(
|
106 |
+
prompt_template=dict(type=PromptTemplate,
|
107 |
+
template=dict(
|
108 |
+
round = prompt_example['FEWSHOT_INSTRUCTION_CLOZE'] if 'cloze' in _name else prompt_example['FEWSHOT_INSTRUCTION_CHOICE'])),
|
109 |
+
retriever=dict(type=ZeroRetriever),
|
110 |
+
inferencer=dict(type=AgentInferencer)
|
111 |
+
)
|
112 |
+
|
113 |
+
mathbench_eval_cfg = dict(
|
114 |
+
evaluator=dict(type=CircularEvaluator if 'choice' in _name and with_circular_eval else AccEvaluator),
|
115 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD') if 'single_choice' in _name else dict(type=mathbench_postprocess, name=_name))
|
116 |
+
|
117 |
+
mathbench_agent_datasets.append(
|
118 |
+
dict(
|
119 |
+
abbr='mathbench-' + _split + '-' + _name + '-agent',
|
120 |
+
type=MathBenchDataset,
|
121 |
+
path=f'./data/mathbench/{_split}',
|
122 |
+
name=_name,
|
123 |
+
with_circular=with_circular_eval,
|
124 |
+
reader_cfg=dict(
|
125 |
+
input_columns=['question'],
|
126 |
+
output_column='answer'
|
127 |
+
),
|
128 |
+
infer_cfg=mathbench_infer_cfg,
|
129 |
+
eval_cfg=mathbench_eval_cfg,
|
130 |
+
))
|
opencompass/configs/datasets/MathBench/deprecated_mathbench_arith_gen_ccd638.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
6 |
+
|
7 |
+
cloze_prompts ={
|
8 |
+
'cloze_arith_en': [
|
9 |
+
dict(role='HUMAN', prompt='Q: Calculate (341/11)/(9/(-6)*(-2)/3).'),
|
10 |
+
dict(role='BOT', prompt='A: First, (9/(-6)*(-2)/3) can be simplified by : 9/(-6) = -1.5, -1.5 * (-2) = 3, 3 / 3 = 1. So, (9/(-6)*(-2)/3) is equal to 1. Now, we have `(341/11)/1` equals `341/11`. Finally, calculate `341/11 = 31`. The answer is 31.\n'),
|
11 |
+
dict(role='HUMAN', prompt='Q: In base 14, what is 5 - 638d8d?'),
|
12 |
+
dict(role='BOT', prompt='A: 5 - 638d8d = -638d88. The answer is -638d88.\n'),
|
13 |
+
dict(role='HUMAN', prompt='Q: What is -491354 times -0.34?'),
|
14 |
+
dict(role='BOT', prompt='A: The product of -491354 and -0.34 is 167060.36. The answer is 167060.36.\n'),
|
15 |
+
dict(role='HUMAN', prompt='Q: What is the value of (-55)/(6930/(-382)) + (0 - 3)?.'),
|
16 |
+
dict(role='BOT', prompt='A: First, (-55)/(6930/(-382)) = (-55)/(-(6930/382)) = 55*382/6930 = 21010/6930 = 2101/693. Then, 2101/693 + (0 - 3) = 2101/693 - 3 = 2101/693 - 3*693/693 = (2101-2079)/693 = 22/693 = 2/63. The answer is 2/63.\n'),
|
17 |
+
dict(role='HUMAN', prompt='Q: {question}'),
|
18 |
+
dict(role='BOT', prompt='A: {answer}\n'),
|
19 |
+
]
|
20 |
+
}
|
21 |
+
|
22 |
+
mathbench_sets = {
|
23 |
+
'arithmetic': ['cloze_arith_en'],
|
24 |
+
}
|
25 |
+
|
26 |
+
mathbench_datasets = []
|
27 |
+
|
28 |
+
for _split in list(mathbench_sets.keys()):
|
29 |
+
for _name in mathbench_sets[_split]:
|
30 |
+
mathbench_infer_cfg = dict(
|
31 |
+
prompt_template=dict(
|
32 |
+
type=PromptTemplate,
|
33 |
+
template=dict(
|
34 |
+
round=cloze_prompts[_name],
|
35 |
+
),
|
36 |
+
),
|
37 |
+
retriever=dict(type=ZeroRetriever),
|
38 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
39 |
+
)
|
40 |
+
|
41 |
+
mathbench_eval_cfg = dict(
|
42 |
+
evaluator=dict(type=AccEvaluator),
|
43 |
+
pred_postprocessor=dict(type=mathbench_postprocess, name=_name))
|
44 |
+
|
45 |
+
mathbench_datasets.append(
|
46 |
+
dict(
|
47 |
+
type=MathBenchDataset,
|
48 |
+
path=f'./data/mathbench/{_split}',
|
49 |
+
name=_name,
|
50 |
+
with_circular=False,
|
51 |
+
abbr='mathbench-arithmetic' + _split + '-' + _name,
|
52 |
+
reader_cfg=dict(
|
53 |
+
input_columns=['question'],
|
54 |
+
output_column='answer'
|
55 |
+
),
|
56 |
+
infer_cfg=mathbench_infer_cfg,
|
57 |
+
eval_cfg=mathbench_eval_cfg,
|
58 |
+
))
|
opencompass/configs/datasets/MathBench/deprecated_mathbench_cot_gen_66f329.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
5 |
+
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
6 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
7 |
+
|
8 |
+
|
9 |
+
single_choice_prompts = {
|
10 |
+
'single_choice_cn_with_reasoning': '以下是一道关于数学的单项选择题,请你一步一步推理,并在最后用“所以答案为选项X”给出答案,其中“X”为选项A,B,C,D中你认为正确的选项。下面是你要回答的问题\n{question}\n让我们一步一步思考:\n',
|
11 |
+
'single_choice_cn': '以下是一道关于数学的单项选择题,请你直接回答正确答案的选项序号。\n下面是你要回答的题目:\n{question}\n答案选项:',
|
12 |
+
'single_choice_en_with_reasoning': "Here is a multiple-choice question about mathematics. Please reason through it step by step, and at the end, provide your answer option with 'Therefore, the correct answer is option X', Where 'X' is the correct option you think from A,B,C,D. Here is the question you need to answer:\n{question}\nLet's think step by step:",
|
13 |
+
'single_choice_en': 'Here is a multiple-choice question about mathematics. Please provide the correct answer option directly.\nHere is the question you need to answer:\n{question}\nAnswer option:',
|
14 |
+
}
|
15 |
+
|
16 |
+
cloze_prompts = {
|
17 |
+
'cloze_cn': [
|
18 |
+
dict(role='HUMAN', prompt='Q: 林中有15棵树。林务工人员今天将在林中种植树木。完成后,将有21棵树。林务工人员今天种植了多少棵树?'),
|
19 |
+
dict(role='BOT', prompt='A: 我们从15棵树开始。后来有21棵树。差值必定是他们种植的树木数量。所以,他们必须种植了21 - 15 = 6棵树。答案是 6\n'),
|
20 |
+
dict(role='HUMAN', prompt='Q: 如果停车场有3辆车,又有2辆车进来,停车场里有多少辆车?'),
|
21 |
+
dict(role='BOT', prompt='A: 停车场已经有3辆车。又进来了2辆车。现在有3 + 2 = 5辆车。答案是 5\n'),
|
22 |
+
dict(role='HUMAN', prompt='Q: 黎恩有32块巧克力,她的妹妹有42块。如果他们吃了35块,他们总共剩下多少块?'),
|
23 |
+
dict(role='BOT', prompt='A: 黎恩有32块巧克力,Leah的妹妹有42块。这意味着原本有32 + 42 = 74块巧克力。被吃掉了35块。所以他们总共还剩下74 - 35 = 39块巧克力。答案是 39\n'),
|
24 |
+
dict(role='HUMAN', prompt='Q: 杰森有20个棒棒糖。他给丹妮一些棒棒糖。现在Jason只剩下12个棒棒糖。杰森给丹妮多少个棒棒糖?'),
|
25 |
+
dict(role='BOT', prompt='A: 杰森有20个棒棒糖。因为他现在只剩下12个,所以他必须把剩下的都给了丹妮。他给丹妮的棒棒糖数量必定是20 - 12 = 8个。答案是 8\n'),
|
26 |
+
dict(role='HUMAN', prompt='Q: 莎莎有五个玩具。在圣诞节,他从他的爸爸和妈妈那里各得到了两个玩具。现在他有多少个玩具?'),
|
27 |
+
dict(role='BOT', prompt='A: 她有5个玩具。他从妈妈那里得到了2个,所以之后他有5 + 2 = 7个玩具。然后他从爸爸那里得到了2个,所以总共他有7 + 2 = 9个玩具。答案是 9\n'),
|
28 |
+
dict(role='HUMAN', prompt='Q: 服务器房间里有九台电脑。从周一到周四每天增加五台电脑。现在服务器房里有多少台电脑?'),
|
29 |
+
dict(role='BOT', prompt='A: 从周一到周四有4天。每天增加5台电脑。这意味着总共增加了4 * 5 = 20台电脑。一开始有9台电脑,所以现在有9 + 20 = 29台电脑。答案是 29\n'),
|
30 |
+
dict(role='HUMAN', prompt='Q: 迈克尔有58个高尔夫球。星期二,他丢失了23个高尔夫球。星期三,他又丢失了2个。星期三结束时他还剩下多少个高尔夫球?'),
|
31 |
+
dict(role='BOT', prompt='A: 迈克尔一开始有58个球。星期二他丢失了23个,所以之后他还剩下58 - 23 = 35个球。星期三他又丢失了2个,所以现在他还剩下35 - 2 = 33个球。答案是 33\n'),
|
32 |
+
dict(role='HUMAN', prompt='Q: 奥利弗有23美元。她用每个3美元的价格买了五个百吉饼。她还剩下多少钱?'),
|
33 |
+
dict(role='BOT', prompt='A: 她以每个3美元的价格买了5个百吉饼。这意味着她在百吉饼上花费了5 * 3 = 15美元。她一开始有23美元,所以现在她还剩下23 - 15 = 8美元。答案是 8\n'),
|
34 |
+
dict(role='HUMAN', prompt='Q: {question}'),
|
35 |
+
dict(role='BOT', prompt='A: {answer}'),
|
36 |
+
],
|
37 |
+
'cloze_en': [
|
38 |
+
dict(role='HUMAN', prompt='Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?'),
|
39 |
+
dict(role='BOT', prompt='A: We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted. So, they must have planted 21 - 15 = 6 trees. The answer is 6.\n'),
|
40 |
+
dict(role='HUMAN', prompt='Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?'),
|
41 |
+
dict(role='BOT', prompt='A: There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.\n'),
|
42 |
+
dict(role='HUMAN', prompt='Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?'),
|
43 |
+
dict(role='BOT', prompt="A: Leah had 32 chocolates and Leah's sister had 42. That means there were originally 32 + 42 = 74 chocolates. 35 have been eaten. So in total they still have 74 - 35 = 39 chocolates. The answer is 39.\n"),
|
44 |
+
dict(role='HUMAN', prompt='Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?'),
|
45 |
+
dict(role='BOT', prompt='A: Jason had 20 lollipops. Since he only has 12 now, he must have given the rest to Denny. The number of lollipops he has given to Denny must have been 20 - 12 = 8 lollipops. The answer is 8.\n'),
|
46 |
+
dict(role='HUMAN', prompt='Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?'),
|
47 |
+
dict(role='BOT', prompt='A: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.\n'),
|
48 |
+
dict(role='HUMAN', prompt='Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?'),
|
49 |
+
dict(role='BOT', prompt='A: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.\n'),
|
50 |
+
dict(role='HUMAN', prompt='Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?'),
|
51 |
+
dict(role='BOT', prompt='A: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.\n'),
|
52 |
+
dict(role='HUMAN', prompt='Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?'),
|
53 |
+
dict(role='BOT', prompt='A: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.\n'),
|
54 |
+
dict(role='HUMAN', prompt='Q: {question}'),
|
55 |
+
dict(role='BOT', prompt='A: {answer}\n'),
|
56 |
+
]}
|
57 |
+
cloze_prompts['refine_cloze_cn'] = cloze_prompts['cloze_cn']
|
58 |
+
|
59 |
+
mathbench_sets = {
|
60 |
+
'college': ['single_choice_cn', 'cloze_en'],
|
61 |
+
'high': ['single_choice_cn', 'single_choice_en'],
|
62 |
+
'middle': ['single_choice_cn'],
|
63 |
+
'primary': ['cloze_cn'],
|
64 |
+
'primary_refine': ['refine_cloze_cn']
|
65 |
+
}
|
66 |
+
|
67 |
+
# Generate reasoning path or not, only for single choice
|
68 |
+
with_reasoning = True
|
69 |
+
|
70 |
+
# Use circular evaluation or not
|
71 |
+
with_circular_eval = True
|
72 |
+
|
73 |
+
mathbench_datasets = []
|
74 |
+
|
75 |
+
for _split in list(mathbench_sets.keys()):
|
76 |
+
for _name in mathbench_sets[_split]:
|
77 |
+
mathbench_infer_cfg = dict(
|
78 |
+
prompt_template=dict(
|
79 |
+
type=PromptTemplate,
|
80 |
+
template=dict(
|
81 |
+
round=[
|
82 |
+
dict(
|
83 |
+
role='HUMAN',
|
84 |
+
prompt=single_choice_prompts[_name + '_with_reasoning'] if with_reasoning else single_choice_prompts[_name],
|
85 |
+
),
|
86 |
+
dict(role='BOT', prompt='{answer}')] if 'choice' in _name else cloze_prompts[_name],
|
87 |
+
),
|
88 |
+
),
|
89 |
+
retriever=dict(type=ZeroRetriever),
|
90 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
91 |
+
)
|
92 |
+
|
93 |
+
mathbench_eval_cfg = dict(
|
94 |
+
evaluator=dict(type=CircularEvaluator if 'choice' in _name and with_circular_eval else AccEvaluator),
|
95 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD') if 'single_choice' in _name else dict(type=mathbench_postprocess, name=_name))
|
96 |
+
|
97 |
+
mathbench_datasets.append(
|
98 |
+
dict(
|
99 |
+
abbr='mathbench-' + _split + '-' + _name,
|
100 |
+
type=MathBenchDataset,
|
101 |
+
path=f'./data/mathbench/{_split}',
|
102 |
+
name=_name,
|
103 |
+
with_circular=with_circular_eval,
|
104 |
+
reader_cfg=dict(
|
105 |
+
input_columns=['question'],
|
106 |
+
output_column='answer'
|
107 |
+
),
|
108 |
+
infer_cfg=mathbench_infer_cfg,
|
109 |
+
eval_cfg=mathbench_eval_cfg,
|
110 |
+
))
|
opencompass/configs/datasets/MathBench/mathbench_2024_gen_fc2a24.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
from copy import deepcopy
|
3 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
4 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
5 |
+
from opencompass.openicl.icl_inferencer import GenInferencer, PPLInferencer
|
6 |
+
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
7 |
+
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
8 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
9 |
+
|
10 |
+
with read_base():
|
11 |
+
from .mathbench_prompt import zero_shot_prompts, few_shot_prompts, mathbench_sets
|
12 |
+
|
13 |
+
# Max for this dataset is 4
|
14 |
+
num_shot = 4
|
15 |
+
# Generate reasoning path or not, only for single choice
|
16 |
+
with_reasoning = True
|
17 |
+
# Use circular evaluation or not
|
18 |
+
with_circular_eval = True
|
19 |
+
# Use PPL mode in single choice test or not
|
20 |
+
use_ppl_single_choice = False
|
21 |
+
|
22 |
+
assert 0 <= num_shot <= 4
|
23 |
+
if num_shot == 0:
|
24 |
+
prompts = zero_shot_prompts
|
25 |
+
else:
|
26 |
+
prompts = {name: p[- 2 * num_shot - 2:] for name, p in few_shot_prompts.items()}
|
27 |
+
|
28 |
+
mathbench_datasets = []
|
29 |
+
for _split in mathbench_sets:
|
30 |
+
for _name in mathbench_sets[_split]:
|
31 |
+
if 'single_choice' in _name:
|
32 |
+
if with_reasoning:
|
33 |
+
template_round = prompts[_name + '_with_reasoning']
|
34 |
+
else:
|
35 |
+
template_round = prompts[_name]
|
36 |
+
else:
|
37 |
+
template_round = prompts[_name]
|
38 |
+
|
39 |
+
if 'single_choice' in _name:
|
40 |
+
pred_postprocessor = dict(type=first_option_postprocess, options='ABCD')
|
41 |
+
else:
|
42 |
+
pred_postprocessor = dict(type=mathbench_postprocess, name=_name)
|
43 |
+
|
44 |
+
if 'single_choice' in _name and with_circular_eval:
|
45 |
+
evaluator = dict(type=CircularEvaluator)
|
46 |
+
else:
|
47 |
+
evaluator = dict(type=AccEvaluator)
|
48 |
+
|
49 |
+
# assemble the final config
|
50 |
+
mathbench_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
51 |
+
if use_ppl_single_choice and 'single_choice' in _name and not with_reasoning:
|
52 |
+
template = {}
|
53 |
+
for answer in ['A', 'B', 'C', 'D']:
|
54 |
+
one_template_round = deepcopy(template_round)
|
55 |
+
one_template_round['round'][-1]['prompt'] = one_template_round['round'][-1]['prompt'].format(answer=answer)
|
56 |
+
template[answer] = dict(round=one_template_round)
|
57 |
+
mathbench_infer_cfg = dict(
|
58 |
+
prompt_template=dict(type=PromptTemplate, template=template),
|
59 |
+
retriever=dict(type=ZeroRetriever),
|
60 |
+
inferencer=dict(type=PPLInferencer),
|
61 |
+
)
|
62 |
+
else:
|
63 |
+
mathbench_infer_cfg = dict(
|
64 |
+
prompt_template=dict(type=PromptTemplate, template=dict(round=template_round)),
|
65 |
+
retriever=dict(type=ZeroRetriever),
|
66 |
+
inferencer=dict(type=GenInferencer, max_out_len=2048),
|
67 |
+
)
|
68 |
+
mathbench_eval_cfg = dict(evaluator=evaluator, pred_postprocessor=pred_postprocessor)
|
69 |
+
|
70 |
+
mathbench_datasets.append(
|
71 |
+
dict(
|
72 |
+
abbr='mathbench-' + _split + '-' + _name,
|
73 |
+
type=MathBenchDataset,
|
74 |
+
path=f'data/mathbench_v1/{_split}',
|
75 |
+
name=_name,
|
76 |
+
with_circular=with_circular_eval,
|
77 |
+
reader_cfg=mathbench_reader_cfg,
|
78 |
+
infer_cfg=mathbench_infer_cfg,
|
79 |
+
eval_cfg=mathbench_eval_cfg,
|
80 |
+
)
|
81 |
+
)
|
opencompass/configs/datasets/MathBench/mathbench_2024_wocircular_gen_1dc21d.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
from copy import deepcopy
|
3 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
4 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
5 |
+
from opencompass.openicl.icl_inferencer import GenInferencer, PPLInferencer
|
6 |
+
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
7 |
+
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
8 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
9 |
+
|
10 |
+
with read_base():
|
11 |
+
from .mathbench_prompt import zero_shot_prompts, few_shot_prompts, mathbench_sets
|
12 |
+
|
13 |
+
# Max for this dataset is 4
|
14 |
+
num_shot = 4
|
15 |
+
# Generate reasoning path or not, only for single choice
|
16 |
+
with_reasoning = False
|
17 |
+
# Use circular evaluation or not
|
18 |
+
with_circular_eval = False
|
19 |
+
# Use PPL mode in single choice test or not
|
20 |
+
use_ppl_single_choice = False
|
21 |
+
|
22 |
+
assert 0 <= num_shot <= 4
|
23 |
+
if num_shot == 0:
|
24 |
+
prompts = zero_shot_prompts
|
25 |
+
else:
|
26 |
+
prompts = {name: p[- 2 * num_shot - 2:] for name, p in few_shot_prompts.items()}
|
27 |
+
|
28 |
+
mathbench_datasets = []
|
29 |
+
for _split in mathbench_sets:
|
30 |
+
for _name in mathbench_sets[_split]:
|
31 |
+
if 'single_choice' in _name:
|
32 |
+
if with_reasoning:
|
33 |
+
template_round = prompts[_name + '_with_reasoning']
|
34 |
+
else:
|
35 |
+
template_round = prompts[_name]
|
36 |
+
else:
|
37 |
+
template_round = prompts[_name]
|
38 |
+
|
39 |
+
if 'single_choice' in _name:
|
40 |
+
pred_postprocessor = dict(type=first_option_postprocess, options='ABCD')
|
41 |
+
else:
|
42 |
+
pred_postprocessor = dict(type=mathbench_postprocess, name=_name)
|
43 |
+
|
44 |
+
if 'single_choice' in _name and with_circular_eval:
|
45 |
+
evaluator = dict(type=CircularEvaluator)
|
46 |
+
else:
|
47 |
+
evaluator = dict(type=AccEvaluator)
|
48 |
+
|
49 |
+
# assemble the final config
|
50 |
+
mathbench_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
51 |
+
if use_ppl_single_choice and 'single_choice' in _name and not with_reasoning:
|
52 |
+
template = {}
|
53 |
+
for answer in ['A', 'B', 'C', 'D']:
|
54 |
+
one_template_round = deepcopy(template_round)
|
55 |
+
one_template_round['round'][-1]['prompt'] = one_template_round['round'][-1]['prompt'].format(answer=answer)
|
56 |
+
template[answer] = dict(round=one_template_round)
|
57 |
+
mathbench_infer_cfg = dict(
|
58 |
+
prompt_template=dict(type=PromptTemplate, template=template),
|
59 |
+
retriever=dict(type=ZeroRetriever),
|
60 |
+
inferencer=dict(type=PPLInferencer),
|
61 |
+
)
|
62 |
+
else:
|
63 |
+
mathbench_infer_cfg = dict(
|
64 |
+
prompt_template=dict(type=PromptTemplate, template=dict(round=template_round)),
|
65 |
+
retriever=dict(type=ZeroRetriever),
|
66 |
+
inferencer=dict(type=GenInferencer, max_out_len=2048),
|
67 |
+
)
|
68 |
+
mathbench_eval_cfg = dict(evaluator=evaluator, pred_postprocessor=pred_postprocessor)
|
69 |
+
|
70 |
+
mathbench_datasets.append(
|
71 |
+
dict(
|
72 |
+
abbr='mathbench-wocircular-' + _split + '-' + _name,
|
73 |
+
type=MathBenchDataset,
|
74 |
+
path=f'data/mathbench_v1/{_split}',
|
75 |
+
name=_name,
|
76 |
+
with_circular=with_circular_eval,
|
77 |
+
reader_cfg=mathbench_reader_cfg,
|
78 |
+
infer_cfg=mathbench_infer_cfg,
|
79 |
+
eval_cfg=mathbench_eval_cfg,
|
80 |
+
)
|
81 |
+
)
|
opencompass/configs/datasets/MathBench/mathbench_2024_wocircular_mixed_8eb12b.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
from copy import deepcopy
|
3 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
4 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
5 |
+
from opencompass.openicl.icl_inferencer import GenInferencer, PPLInferencer
|
6 |
+
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
7 |
+
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
8 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
9 |
+
|
10 |
+
with read_base():
|
11 |
+
from .mathbench_prompt import zero_shot_prompts, few_shot_prompts, mathbench_sets
|
12 |
+
|
13 |
+
# Max for this dataset is 4
|
14 |
+
num_shot = 4
|
15 |
+
# Generate reasoning path or not, only for single choice
|
16 |
+
with_reasoning = False
|
17 |
+
# Use circular evaluation or not
|
18 |
+
with_circular_eval = False
|
19 |
+
# Use PPL mode in single choice test or not
|
20 |
+
use_ppl_single_choice = True
|
21 |
+
|
22 |
+
assert 0 <= num_shot <= 4
|
23 |
+
if num_shot == 0:
|
24 |
+
prompts = zero_shot_prompts
|
25 |
+
else:
|
26 |
+
prompts = {name: p[- 2 * num_shot - 2:] for name, p in few_shot_prompts.items()}
|
27 |
+
|
28 |
+
mathbench_datasets = []
|
29 |
+
for _split in mathbench_sets:
|
30 |
+
for _name in mathbench_sets[_split]:
|
31 |
+
if 'single_choice' in _name:
|
32 |
+
if with_reasoning:
|
33 |
+
template_round = prompts[_name + '_with_reasoning']
|
34 |
+
else:
|
35 |
+
template_round = prompts[_name]
|
36 |
+
else:
|
37 |
+
template_round = prompts[_name]
|
38 |
+
|
39 |
+
if 'single_choice' in _name:
|
40 |
+
pred_postprocessor = dict(type=first_option_postprocess, options='ABCD')
|
41 |
+
else:
|
42 |
+
pred_postprocessor = dict(type=mathbench_postprocess, name=_name)
|
43 |
+
|
44 |
+
if 'single_choice' in _name and with_circular_eval:
|
45 |
+
evaluator = dict(type=CircularEvaluator)
|
46 |
+
else:
|
47 |
+
evaluator = dict(type=AccEvaluator)
|
48 |
+
|
49 |
+
# assemble the final config
|
50 |
+
mathbench_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
51 |
+
if use_ppl_single_choice and 'single_choice' in _name and not with_reasoning:
|
52 |
+
template = {}
|
53 |
+
for answer in ['A', 'B', 'C', 'D']:
|
54 |
+
one_template_round = deepcopy(template_round)
|
55 |
+
one_template_round[-1]['prompt'] = one_template_round[-1]['prompt'].format(answer=answer)
|
56 |
+
template[answer] = dict(round=one_template_round)
|
57 |
+
mathbench_infer_cfg = dict(
|
58 |
+
prompt_template=dict(type=PromptTemplate, template=template),
|
59 |
+
retriever=dict(type=ZeroRetriever),
|
60 |
+
inferencer=dict(type=PPLInferencer),
|
61 |
+
)
|
62 |
+
else:
|
63 |
+
mathbench_infer_cfg = dict(
|
64 |
+
prompt_template=dict(type=PromptTemplate, template=dict(round=template_round)),
|
65 |
+
retriever=dict(type=ZeroRetriever),
|
66 |
+
inferencer=dict(type=GenInferencer, max_out_len=2048),
|
67 |
+
)
|
68 |
+
mathbench_eval_cfg = dict(evaluator=evaluator, pred_postprocessor=pred_postprocessor)
|
69 |
+
|
70 |
+
mathbench_datasets.append(
|
71 |
+
dict(
|
72 |
+
abbr='mathbench-wocircular-' + _split + '-' + _name,
|
73 |
+
type=MathBenchDataset,
|
74 |
+
path=f'data/mathbench_v1/{_split}',
|
75 |
+
name=_name,
|
76 |
+
with_circular=with_circular_eval,
|
77 |
+
reader_cfg=mathbench_reader_cfg,
|
78 |
+
infer_cfg=mathbench_infer_cfg,
|
79 |
+
eval_cfg=mathbench_eval_cfg,
|
80 |
+
)
|
81 |
+
)
|
opencompass/configs/datasets/MathBench/mathbench_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .mathbench_2024_gen_19e486 import mathbench_datasets # noqa: F401, F403
|
opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .SuperGLUE_AX_g_gen_68aac7 import AX_g_datasets # noqa: F401, F403
|
opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen_68aac7.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import AXDatasetV2
|
6 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
7 |
+
|
8 |
+
AX_g_reader_cfg = dict(
|
9 |
+
input_columns=['hypothesis', 'premise'],
|
10 |
+
output_column='label',
|
11 |
+
)
|
12 |
+
|
13 |
+
AX_g_infer_cfg = dict(
|
14 |
+
prompt_template=dict(
|
15 |
+
type=PromptTemplate,
|
16 |
+
template=dict(round=[
|
17 |
+
dict(
|
18 |
+
role='HUMAN',
|
19 |
+
prompt=
|
20 |
+
'{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?\nA. Yes\nB. No\nAnswer:'
|
21 |
+
),
|
22 |
+
]),
|
23 |
+
),
|
24 |
+
retriever=dict(type=ZeroRetriever),
|
25 |
+
inferencer=dict(type=GenInferencer),
|
26 |
+
)
|
27 |
+
|
28 |
+
AX_g_eval_cfg = dict(
|
29 |
+
evaluator=dict(type=AccEvaluator),
|
30 |
+
pred_role='BOT',
|
31 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
|
32 |
+
)
|
33 |
+
|
34 |
+
AX_g_datasets = [
|
35 |
+
dict(
|
36 |
+
abbr='AX_g',
|
37 |
+
type=AXDatasetV2,
|
38 |
+
path='./data/SuperGLUE/AX-g/AX-g.jsonl',
|
39 |
+
reader_cfg=AX_g_reader_cfg,
|
40 |
+
infer_cfg=AX_g_infer_cfg,
|
41 |
+
eval_cfg=AX_g_eval_cfg,
|
42 |
+
)
|
43 |
+
]
|
opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .SuperGLUE_AX_g_ppl_66caf3 import AX_g_datasets # noqa: F401, F403
|
opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_50f8f6.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import HFDataset
|
6 |
+
|
7 |
+
AX_g_reader_cfg = dict(
|
8 |
+
input_columns=['hypothesis', 'premise'],
|
9 |
+
output_column='label',
|
10 |
+
test_split='train')
|
11 |
+
|
12 |
+
AX_g_infer_cfg = dict(
|
13 |
+
prompt_template=dict(
|
14 |
+
type=PromptTemplate,
|
15 |
+
template={
|
16 |
+
'entailment': '{premise}?entailment, {hypothesis}',
|
17 |
+
'not_entailment': '{premise}?not_entailment, {hypothesis}'
|
18 |
+
}),
|
19 |
+
retriever=dict(type=ZeroRetriever),
|
20 |
+
inferencer=dict(type=PPLInferencer))
|
21 |
+
|
22 |
+
AX_g_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
23 |
+
|
24 |
+
AX_g_datasets = [
|
25 |
+
dict(
|
26 |
+
type=HFDataset,
|
27 |
+
abbr='AX_g',
|
28 |
+
path='json',
|
29 |
+
data_files='./data/SuperGLUE/AX-g/AX-g.jsonl',
|
30 |
+
split='train',
|
31 |
+
reader_cfg=AX_g_reader_cfg,
|
32 |
+
infer_cfg=AX_g_infer_cfg,
|
33 |
+
eval_cfg=AX_g_eval_cfg)
|
34 |
+
]
|
opencompass/configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_66caf3.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import HFDataset
|
6 |
+
|
7 |
+
AX_g_reader_cfg = dict(
|
8 |
+
input_columns=['hypothesis', 'premise'],
|
9 |
+
output_column='label',
|
10 |
+
test_split='train')
|
11 |
+
|
12 |
+
AX_g_infer_cfg = dict(
|
13 |
+
prompt_template=dict(
|
14 |
+
type=PromptTemplate,
|
15 |
+
template={
|
16 |
+
'entailment':
|
17 |
+
dict(round=[
|
18 |
+
dict(
|
19 |
+
role='HUMAN',
|
20 |
+
prompt=
|
21 |
+
'{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?'
|
22 |
+
),
|
23 |
+
dict(role='BOT', prompt='Yes'),
|
24 |
+
]),
|
25 |
+
'not_entailment':
|
26 |
+
dict(round=[
|
27 |
+
dict(
|
28 |
+
role='HUMAN',
|
29 |
+
prompt=
|
30 |
+
'{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?'
|
31 |
+
),
|
32 |
+
dict(role='BOT', prompt='No'),
|
33 |
+
])
|
34 |
+
},
|
35 |
+
),
|
36 |
+
retriever=dict(type=ZeroRetriever),
|
37 |
+
inferencer=dict(type=PPLInferencer),
|
38 |
+
)
|
39 |
+
|
40 |
+
AX_g_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
41 |
+
|
42 |
+
AX_g_datasets = [
|
43 |
+
dict(
|
44 |
+
type=HFDataset,
|
45 |
+
abbr='AX_g',
|
46 |
+
path='json',
|
47 |
+
data_files='./data/SuperGLUE/AX-g/AX-g.jsonl',
|
48 |
+
split='train',
|
49 |
+
reader_cfg=AX_g_reader_cfg,
|
50 |
+
infer_cfg=AX_g_infer_cfg,
|
51 |
+
eval_cfg=AX_g_eval_cfg,
|
52 |
+
)
|
53 |
+
]
|
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .SuperGLUE_RTE_gen_68aac7 import RTE_datasets # noqa: F401, F403
|
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_68aac7.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import AXDatasetV2
|
6 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
7 |
+
|
8 |
+
RTE_reader_cfg = dict(
|
9 |
+
input_columns=['hypothesis', 'premise'],
|
10 |
+
output_column='label',
|
11 |
+
)
|
12 |
+
|
13 |
+
RTE_infer_cfg = dict(
|
14 |
+
prompt_template=dict(
|
15 |
+
type=PromptTemplate,
|
16 |
+
template=dict(round=[
|
17 |
+
dict(
|
18 |
+
role='HUMAN',
|
19 |
+
prompt=
|
20 |
+
'{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?\nA. Yes\nB. No\nAnswer:'
|
21 |
+
),
|
22 |
+
]),
|
23 |
+
),
|
24 |
+
retriever=dict(type=ZeroRetriever),
|
25 |
+
inferencer=dict(type=GenInferencer),
|
26 |
+
)
|
27 |
+
|
28 |
+
RTE_eval_cfg = dict(
|
29 |
+
evaluator=dict(type=AccEvaluator),
|
30 |
+
pred_role='BOT',
|
31 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
|
32 |
+
)
|
33 |
+
|
34 |
+
RTE_datasets = [
|
35 |
+
dict(
|
36 |
+
abbr='RTE',
|
37 |
+
type=AXDatasetV2, # rte share the same format with ax
|
38 |
+
path='./data/SuperGLUE/RTE/val.jsonl',
|
39 |
+
reader_cfg=RTE_reader_cfg,
|
40 |
+
infer_cfg=RTE_infer_cfg,
|
41 |
+
eval_cfg=RTE_eval_cfg,
|
42 |
+
)
|
43 |
+
]
|
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .SuperGLUE_RTE_ppl_66caf3 import RTE_datasets # noqa: F401, F403
|
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_50f8f6.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import HFDataset
|
6 |
+
|
7 |
+
RTE_reader_cfg = dict(
|
8 |
+
input_columns=['hypothesis', 'premise'],
|
9 |
+
output_column='label',
|
10 |
+
test_split='train')
|
11 |
+
|
12 |
+
RTE_infer_cfg = dict(
|
13 |
+
prompt_template=dict(
|
14 |
+
type=PromptTemplate,
|
15 |
+
template={
|
16 |
+
'entailment': '{premise}?entailment, {hypothesis}',
|
17 |
+
'not_entailment': '{premise}?not_entailment, {hypothesis}'
|
18 |
+
}),
|
19 |
+
retriever=dict(type=ZeroRetriever),
|
20 |
+
inferencer=dict(type=PPLInferencer))
|
21 |
+
|
22 |
+
RTE_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
23 |
+
|
24 |
+
RTE_datasets = [
|
25 |
+
dict(
|
26 |
+
type=HFDataset,
|
27 |
+
abbr='RTE',
|
28 |
+
path='json',
|
29 |
+
data_files='./data/SuperGLUE/RTE/val.jsonl',
|
30 |
+
split='train',
|
31 |
+
reader_cfg=RTE_reader_cfg,
|
32 |
+
infer_cfg=RTE_infer_cfg,
|
33 |
+
eval_cfg=RTE_eval_cfg)
|
34 |
+
]
|
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_66caf3.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import HFDataset
|
6 |
+
|
7 |
+
RTE_reader_cfg = dict(
|
8 |
+
input_columns=['hypothesis', 'premise'],
|
9 |
+
output_column='label',
|
10 |
+
test_split='train')
|
11 |
+
|
12 |
+
RTE_infer_cfg = dict(
|
13 |
+
prompt_template=dict(
|
14 |
+
type=PromptTemplate,
|
15 |
+
template={
|
16 |
+
'entailment':
|
17 |
+
dict(round=[
|
18 |
+
dict(
|
19 |
+
role='HUMAN',
|
20 |
+
prompt=
|
21 |
+
'{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?'
|
22 |
+
),
|
23 |
+
dict(role='BOT', prompt='Yes'),
|
24 |
+
]),
|
25 |
+
'not_entailment':
|
26 |
+
dict(round=[
|
27 |
+
dict(
|
28 |
+
role='HUMAN',
|
29 |
+
prompt=
|
30 |
+
'{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?'
|
31 |
+
),
|
32 |
+
dict(role='BOT', prompt='No'),
|
33 |
+
])
|
34 |
+
},
|
35 |
+
),
|
36 |
+
retriever=dict(type=ZeroRetriever),
|
37 |
+
inferencer=dict(type=PPLInferencer),
|
38 |
+
)
|
39 |
+
|
40 |
+
RTE_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
41 |
+
|
42 |
+
RTE_datasets = [
|
43 |
+
dict(
|
44 |
+
type=HFDataset,
|
45 |
+
abbr='RTE',
|
46 |
+
path='json',
|
47 |
+
data_files='./data/SuperGLUE/RTE/val.jsonl',
|
48 |
+
split='train',
|
49 |
+
reader_cfg=RTE_reader_cfg,
|
50 |
+
infer_cfg=RTE_infer_cfg,
|
51 |
+
eval_cfg=RTE_eval_cfg,
|
52 |
+
)
|
53 |
+
]
|
opencompass/configs/datasets/TheoremQA/README.md
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# TheoremQA
|
2 |
+
|
3 |
+
```bash
|
4 |
+
python3 run.py --models hf_internlm2_7b --datasets TheoremQA_5shot_gen_6f0af8 --debug
|
5 |
+
python3 run.py --models hf_internlm2_chat_7b --datasets TheoremQA_5shot_gen_6f0af8 --debug
|
6 |
+
```
|
7 |
+
|
8 |
+
## Base Models
|
9 |
+
|
10 |
+
| model | TheoremQA |
|
11 |
+
|:------------------------:|------------:|
|
12 |
+
| llama-7b-turbomind | 10.25 |
|
13 |
+
| llama-13b-turbomind | 11.25 |
|
14 |
+
| llama-30b-turbomind | 14.25 |
|
15 |
+
| llama-65b-turbomind | 15.62 |
|
16 |
+
| llama-2-7b-turbomind | 12.62 |
|
17 |
+
| llama-2-13b-turbomind | 11.88 |
|
18 |
+
| llama-2-70b-turbomind | 15.62 |
|
19 |
+
| llama-3-8b-turbomind | 20.25 |
|
20 |
+
| llama-3-70b-turbomind | 33.62 |
|
21 |
+
| internlm2-1.8b-turbomind | 10.50 |
|
22 |
+
| internlm2-7b-turbomind | 21.88 |
|
23 |
+
| internlm2-20b-turbomind | 26.00 |
|
24 |
+
| qwen-1.8b-turbomind | 9.38 |
|
25 |
+
| qwen-7b-turbomind | 15.00 |
|
26 |
+
| qwen-14b-turbomind | 21.62 |
|
27 |
+
| qwen-72b-turbomind | 27.12 |
|
28 |
+
| qwen1.5-0.5b-hf | 5.88 |
|
29 |
+
| qwen1.5-1.8b-hf | 12.00 |
|
30 |
+
| qwen1.5-4b-hf | 13.75 |
|
31 |
+
| qwen1.5-7b-hf | 4.25 |
|
32 |
+
| qwen1.5-14b-hf | 12.62 |
|
33 |
+
| qwen1.5-32b-hf | 26.62 |
|
34 |
+
| qwen1.5-72b-hf | 26.62 |
|
35 |
+
| qwen1.5-moe-a2-7b-hf | 7.50 |
|
36 |
+
| mistral-7b-v0.1-hf | 17.00 |
|
37 |
+
| mistral-7b-v0.2-hf | 16.25 |
|
38 |
+
| mixtral-8x7b-v0.1-hf | 24.12 |
|
39 |
+
| mixtral-8x22b-v0.1-hf | 36.75 |
|
40 |
+
| yi-6b-hf | 13.88 |
|
41 |
+
| yi-34b-hf | 24.75 |
|
42 |
+
| deepseek-7b-base-hf | 12.38 |
|
43 |
+
| deepseek-67b-base-hf | 21.25 |
|
44 |
+
|
45 |
+
## Chat Models
|
46 |
+
|
47 |
+
| model | TheoremQA |
|
48 |
+
|:-----------------------------:|------------:|
|
49 |
+
| qwen1.5-0.5b-chat-hf | 9.00 |
|
50 |
+
| qwen1.5-1.8b-chat-hf | 9.25 |
|
51 |
+
| qwen1.5-4b-chat-hf | 13.88 |
|
52 |
+
| qwen1.5-7b-chat-hf | 12.25 |
|
53 |
+
| qwen1.5-14b-chat-hf | 13.63 |
|
54 |
+
| qwen1.5-32b-chat-hf | 19.25 |
|
55 |
+
| qwen1.5-72b-chat-hf | 22.75 |
|
56 |
+
| qwen1.5-110b-chat-hf | 17.50 |
|
57 |
+
| internlm2-chat-1.8b-hf | 13.63 |
|
58 |
+
| internlm2-chat-1.8b-sft-hf | 12.88 |
|
59 |
+
| internlm2-chat-7b-hf | 18.50 |
|
60 |
+
| internlm2-chat-7b-sft-hf | 18.75 |
|
61 |
+
| internlm2-chat-20b-hf | 23.00 |
|
62 |
+
| internlm2-chat-20b-sft-hf | 25.12 |
|
63 |
+
| llama-3-8b-instruct-hf | 19.38 |
|
64 |
+
| llama-3-70b-instruct-hf | 36.25 |
|
65 |
+
| llama-3-8b-instruct-lmdeploy | 19.62 |
|
66 |
+
| llama-3-70b-instruct-lmdeploy | 34.50 |
|
67 |
+
| mistral-7b-instruct-v0.1-hf | 12.62 |
|
68 |
+
| mistral-7b-instruct-v0.2-hf | 11.38 |
|
69 |
+
| mixtral-8x7b-instruct-v0.1-hf | 26.00 |
|
opencompass/configs/datasets/TheoremQA/TheoremQA_5shot_gen_6f0af8.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
3 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
4 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
5 |
+
from opencompass.datasets import TheoremQADatasetV3, TheoremQA_postprocess_v3, TheoremQAEvaluatorV3
|
6 |
+
|
7 |
+
with read_base():
|
8 |
+
from .TheoremQA_few_shot_examples import examples
|
9 |
+
|
10 |
+
num_shot = 5
|
11 |
+
rounds = []
|
12 |
+
for index, (query, response) in enumerate(examples[:num_shot]):
|
13 |
+
if index == 0:
|
14 |
+
desc = 'You are supposed to provide a solution to a given problem.\n\n'
|
15 |
+
else:
|
16 |
+
desc = ''
|
17 |
+
rounds += [
|
18 |
+
dict(role='HUMAN', prompt=f'{desc}Problem:\n{query}\nSolution:'),
|
19 |
+
dict(role='BOT', prompt=f'{response}')
|
20 |
+
]
|
21 |
+
rounds += [dict(role='HUMAN', prompt='Problem:\n{Question}\nSolution:')]
|
22 |
+
|
23 |
+
TheoremQA_reader_cfg = dict(input_columns=['Question', 'Answer_type'], output_column='Answer', train_split='test', test_split='test')
|
24 |
+
|
25 |
+
TheoremQA_infer_cfg = dict(
|
26 |
+
prompt_template=dict(type=PromptTemplate, template=dict(round=rounds)),
|
27 |
+
retriever=dict(type=ZeroRetriever),
|
28 |
+
inferencer=dict(type=GenInferencer, max_out_len=1024, stopping_criteria=['Problem:', 'Problem']),
|
29 |
+
)
|
30 |
+
|
31 |
+
TheoremQA_eval_cfg = dict(
|
32 |
+
evaluator=dict(type=TheoremQAEvaluatorV3),
|
33 |
+
pred_postprocessor=dict(type=TheoremQA_postprocess_v3)
|
34 |
+
)
|
35 |
+
|
36 |
+
TheoremQA_datasets = [
|
37 |
+
dict(
|
38 |
+
abbr='TheoremQA',
|
39 |
+
type=TheoremQADatasetV3,
|
40 |
+
path='data/TheoremQA/theoremqa_test.json',
|
41 |
+
reader_cfg=TheoremQA_reader_cfg,
|
42 |
+
infer_cfg=TheoremQA_infer_cfg,
|
43 |
+
eval_cfg=TheoremQA_eval_cfg,
|
44 |
+
)
|
45 |
+
]
|
opencompass/configs/datasets/TheoremQA/TheoremQA_few_shot_examples.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
examples = [
|
2 |
+
(
|
3 |
+
'In a 10 Gigabit Ethernet network, the average size of a frame is 1500 bytes. If a burst of noise lasting 1ms interrupts the network, how many frames are lost?',
|
4 |
+
'First, calculate the data rate in bytes/s:\n$$10 Gigabit/s * (1 Byte / 8 bits) = 1.25 * 10^9 Bytes/s$$\nNext, calculate the data loss in bytes due to the noise:\n$$1 ms * 1.25 * 10^9 Bytes/s = 1.25 * 10^6 Bytes$$\nFinally, divide the data loss by the average frame size to get the number of frames lost:\n$$1.25 * 10^6 Bytes / 1500 Bytes/frame \\approx 833.33 frames$$\nThe answer is 833.33',
|
5 |
+
),
|
6 |
+
(
|
7 |
+
'Given x = 0.157, what is the value of $x \\times \\frac{\\prod_{n=1}^\\infty (1 - \\frac{x^2}{n^2 \\pi^2})}{\\sin(x)}$?',
|
8 |
+
"To evaluate the expression $x \\times \\frac{\\prod_{n=1}^{\\infty} (1 - \\frac{x^2}{n^2 \\pi^2})}{\\sin(x)}$ given x = 0.157, we first recognize that the product in the numerator is related to the sine function through the Euler's reflection formula for the sine function, which can be expressed as:\n$$\\sin(x) = x \\prod_{n=1}^{\\infty} \\left(1 - \\frac{x^2}{n^2 \\pi^2}\\right)$$\nTherefore, the given expression simplifies to: $x \\times \\frac{\\sin(x)}{\\sin(x)}$\nBecause sin(x) in the numerator and denominator cancels out, the expression simplifies further to just x.\nSo, given x = 0.157, the value of the expression is 0.157. This result is derived from the properties of the sine function and does not require computational evaluation.\nThe answer is 0.157",
|
9 |
+
),
|
10 |
+
(
|
11 |
+
'Consider the basis C of $\\mathbb{R}^2$ consisting of vectors $u_1 = [2, 4]$ and $u_2 = [1, -1]$. If $y = [8, 12]$, find the C-coordinate vector of y.',
|
12 |
+
"The goal is to express y as a linear combination of the basis vectors of C, i.e., $y = a\\cdot u_1 + b\\cdot u_2$, where a and b are the scalar coefficients that we want to find. These coefficients will form the C-coordinate vector of y, which we'll denote as $[a, b]_C$.\nGiven:\n- $u_1 = [2, 4]$,\n- $u_2 = [1, -1]$,\n- $y = [8, 12]$.\nWe need to solve the system of linear equations:\n2a + 1b = 8\n4a - 1b = 12\nLet's solve this system of equations to find a and b.\nThe solution to the system of equations is $a = \\frac{10}{3} and b = \\frac{4}{3}$. Therefore, the C-coordinate vector of y in the basis consisting of vectors $u_1 = [2, 4]$ and $u_2 = [1, -1]$ is $\\left[\\frac{10}{3}, \\frac{4}{3}\\right]_C$.\nLet's calculate the numerical value of $\\left[\\frac{10}{3}, \\frac{4}{3}\\right]_C$ as [3.33, 1.33].\nThe answer is [3.33, 1.33]",
|
13 |
+
),
|
14 |
+
(
|
15 |
+
'One can draw a simple, connected planar graph with 200 vertices and 397 edges. Is this statement True or False?',
|
16 |
+
"To determine the answer, we can use Euler's formula for planar graphs, which states that for any finite, connected, planar graph, $V - E + F = 2$, where V is the number of vertices, E is the number of edges, and F is the number of faces.\nGiven the modified question, we have V = 200 vertices and E = 397 edges. We want to find if we can have a graph that satisfies these conditions, adhering to Euler's formula.\nFirst, let's rearrange Euler's formula to solve for F: F = E - V + 2\nSubstituting the given values: F = 397 - 200 + 2, F = 199\nThis means a graph with 200 vertices and 397 edges would have 199 faces. However, to determine the truth of this possibility, we should check if this graph doesn't violate any other planar graph constraints, particularly regarding the number of edges.\nFor a simple, connected planar graph, there's also a relationship between vertices, edges, and faces given by the inequality: $E \\leq 3V - 6$\nSubstituting V = 200 gives: $E \\leq 3*200 - 6 = 594$\nWith E = 397, the condition $E \\leq 594$ is satisfied, meaning it's theoretically possible in terms of the edge condition for a planar graph.\nTherefore, one can draw a simple, connected planar graph with 200 vertices and 397 edges, resulting in 199 faces, without violating the conditions for it to be planar according to both Euler's formula and the constraint on the maximum number of edges.\nThe answer is True",
|
17 |
+
),
|
18 |
+
(
|
19 |
+
'Given a finite group G, and a collection of permutations H on a set. Then (a) there always exists H such that G is isomorphic to H; (b) for any H, G is isomorphic to H; (c) G can never be isomorphic to H; (d) none of the above. Which option is correct?',
|
20 |
+
"This is based on Cayley's theorem, which states that every group G is isomorphic to a subgroup of the symmetric group acting on G.\nIn other words, for every finite group G, there exists a collection of permutations H (which in this context, can be thought of as the set of permutations representing the action of G on itself) such that G is isomorphic to H.\nTherefore, there always exists H such that G is isomorphic to H.\nThe answer is (a)",
|
21 |
+
),
|
22 |
+
]
|
opencompass/configs/datasets/TheoremQA/TheoremQA_few_shot_examples_official.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
examples = [
|
2 |
+
(
|
3 |
+
'In a 10 Gigabit Ethernet network, the average size of a frame is 1500 bytes. If a burst of noise lasting 1ms interrupts the network, how many frames are lost?',
|
4 |
+
'First, calculate the data rate in bytes/s:\n\n10 Gigabit/s * (1 Byte / 8 bits) = 1.25 * 10^9 Bytes/s\n\nNext, calculate the data loss in bytes due to the noise:\n\n1 ms * 1.25 * 10^9 Bytes/s = 1.25 * 10^6 Bytes\n\nFinally, divide the data loss by the average frame size to get the number of frames lost:\n\n1.25 * 10^6 Bytes / 1500 Bytes/frame ≈ 833.33 frames\nThe answer is 833.33'
|
5 |
+
),
|
6 |
+
(
|
7 |
+
'Given x = 0.157, what is the value of x \\times \\frac{\\prod_{n=1}^\\infty (1 - \\frac{x^2}{n^2 \\pi^2})}{\\sin(x)}?',
|
8 |
+
"To evaluate the expression $x \\times \\frac{\\prod_{n=1}^{\\infty} (1 - \\frac{x^2}{n^2 \\pi^2})}{\\sin(x)}$ given x = 0.157, we first recognize that the product in the numerator is related to the sine function through the Euler's reflection formula for the sine function, which can be expressed as:\n\n$$\\sin(x) = x \\prod_{n=1}^{\\infty} \\left(1 - \\frac{x^2}{n^2 \\pi^2}\\right)$$\n\nTherefore, the given expression simplifies to: $x \\times \\frac{\\sin(x)}{\\sin(x)}$\n\nBecause sin(x) in the numerator and denominator cancels out, the expression simplifies further to just x.\n\nSo, given x = 0.157, the value of the expression is 0.157. This result is derived from the properties of the sine function and does not require computational evaluation.\nThe answer is 0.157"
|
9 |
+
),
|
10 |
+
(
|
11 |
+
'Consider the basis C of \\mathbb{R}^2 consisting of vectors u_1 = [2, 4] and u_2 = [1, -1]. If y = [8, 12], find the C-coordinate vector of y.',
|
12 |
+
"The goal is to express y as a linear combination of the basis vectors of C, i.e., $y = a\\cdot u_1 + b\\cdot u_2$, where a and b are the scalar coefficients that we want to find. These coefficients will form the C-coordinate vector of y, which we'll denote as $[a, b]_C$.\n\nGiven:\n- $u_1 = [2, 4]$,\n- $u_2 = [1, -1]$,\n- $y = [8, 12]$.\n\nWe need to solve the system of linear equations:\n2a + 1b = 8\n4a - 1b = 12\n\nLet's solve this system of equations to find a and b.\n\nThe solution to the system of equations is $a = \\frac{10}{3} and b = \\frac{4}{3}$. Therefore, the C-coordinate vector of y in the basis consisting of vectors u_1 = [2, 4] and u_2 = [1, -1] is $\\left[\\frac{10}{3}, \\frac{4}{3}\\right]_C$. \nLet's calculate the numerical value of $\\left[\x0crac{10}{3}, \x0crac{4}{3}\right]_C$ as [3.33, 1.33].\nThe answer is [3.33, 1.33]"
|
13 |
+
),
|
14 |
+
(
|
15 |
+
'One can draw a simple, connected planar graph with 200 vertices and 397 edges. Is this statement Trur or False?',
|
16 |
+
"To determine the answer, we can use Euler's formula for planar graphs, which states that for any finite, connected, planar graph, $V - E + F = 2$, where V is the number of vertices, E is the number of edges, and F is the number of faces.\n\nGiven the modified question, we have V = 200 vertices and E = 397 edges. We want to find if we can have a graph that satisfies these conditions, adhering to Euler's formula.\n\nFirst, let's rearrange Euler's formula to solve for F: F = E - V + 2\n\nSubstituting the given values: F = 397 - 200 + 2, F = 199\n\nThis means a graph with 200 vertices and 397 edges would have 199 faces. However, to determine the truth of this possibility, we should check if this graph doesn't violate any other planar graph constraints, particularly regarding the number of edges.\n\nFor a simple, connected planar graph, there's also a relationship between vertices, edges, and faces given by the inequality: $E \\leq 3V - 6$\n\nSubstituting V = 200 gives: $E \\leq 3*200 - 6 = 594$\n\nWith E = 397, the condition $E \\leq 594$ is satisfied, meaning it's theoretically possible in terms of the edge condition for a planar graph.\n\nTherefore, one can draw a simple, connected planar graph with 200 vertices and 397 edges, resulting in 199 faces, without violating the conditions for it to be planar according to both Euler's formula and the constraint on the maximum number of edges.\nThe answer is True"
|
17 |
+
),
|
18 |
+
(
|
19 |
+
'Given a finite group G, and a collection of permutations H on a set. Then (a) there always exists H such that G is isomorphic to H; (b) for any H, G is isomorphic to H; (c) G can never be isomorphic to H; (d) none of the above. Which option is correct?',
|
20 |
+
"This is based on Cayley's theorem, which states that every group G is isomorphic to a subgroup of the symmetric group acting on G. \nIn other words, for every finite group G, there exists a collection of permutations H (which in this context, can be thought of as the set of permutations representing the action of G on itself) such that G is isomorphic to H.\n\nTherefore, there always exists H such that G is isomorphic to H.\nThe answer is (a)"
|
21 |
+
)
|
22 |
+
]
|
opencompass/configs/datasets/TheoremQA/TheoremQA_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .TheoremQA_5shot_gen_6f0af8 import TheoremQA_datasets # noqa: F401, F403
|
opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_gen_424e0a.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import TheoremQADataset, TheoremQA_postprocess
|
6 |
+
|
7 |
+
TheoremQA_reader_cfg = dict(input_columns=['Question', 'Answer_type'], output_column='Answer', train_split='test')
|
8 |
+
|
9 |
+
TheoremQA_prompt1 = (
|
10 |
+
'Please read a math problem, and then think step by step to derive the answer. The answer is decided by Answer Type. '
|
11 |
+
'If the Answer type in [bool], the answer needs to be True or False. '
|
12 |
+
'Else if the Answer type in [integer, float] , The answer needs to be in numerical form. '
|
13 |
+
'Else if the Answer type in [list of integer, list of float] , the answer needs to be a list of number like [2, 3, 4]. '
|
14 |
+
'Else if the Answer type in [option], the answer needs to be an option like (a), (b), (c), (d).'
|
15 |
+
"You need to output the answer in your final sentence like 'Therefore, the answer is ...'."
|
16 |
+
)
|
17 |
+
TheoremQA_prompt2 = (
|
18 |
+
f'Below is an instruction that describes a task, paired with an input that provides further context. '
|
19 |
+
f'Write a response that appropriately completes the request.\n\n### Instruction:\n{TheoremQA_prompt1}\n\n### Input:\n{{Question}}\nAnswer_type:{{Answer_type}}\n### Response:\n'
|
20 |
+
)
|
21 |
+
|
22 |
+
TheoremQA_infer_cfg = dict(
|
23 |
+
prompt_template=dict(type=PromptTemplate, template=TheoremQA_prompt2),
|
24 |
+
retriever=dict(type=ZeroRetriever),
|
25 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
26 |
+
)
|
27 |
+
|
28 |
+
TheoremQA_eval_cfg = dict(evaluator=dict(type=AccEvaluator), pred_postprocessor=dict(type=TheoremQA_postprocess))
|
29 |
+
|
30 |
+
TheoremQA_datasets = [
|
31 |
+
dict(
|
32 |
+
abbr='TheoremQA',
|
33 |
+
type=TheoremQADataset,
|
34 |
+
path='./data/TheoremQA/test.csv',
|
35 |
+
reader_cfg=TheoremQA_reader_cfg,
|
36 |
+
infer_cfg=TheoremQA_infer_cfg,
|
37 |
+
eval_cfg=TheoremQA_eval_cfg,
|
38 |
+
)
|
39 |
+
]
|
opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_gen_7009de.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import TheoremQADataset, TheoremQA_postprocess
|
6 |
+
|
7 |
+
TheoremQA_reader_cfg = dict(input_columns=['Question', 'Answer_type'], output_column='Answer', train_split='test')
|
8 |
+
|
9 |
+
TheoremQA_prompt1 = """You are a mathematician, you are supposed to answer the given question. You need to output the answer in your final sentence like "Therefore, the answer is ...". The answer can only be one of the following forms:
|
10 |
+
1. a numerical value like 0.1, no symbol and no unit at all.
|
11 |
+
2. a list of number like [2, 3, 4].
|
12 |
+
3. True/False.
|
13 |
+
4. an option like (a), (b), (c), (d)
|
14 |
+
"""
|
15 |
+
TheoremQA_prompt2 = "Question: {Question}\nLet's think step by step."
|
16 |
+
|
17 |
+
TheoremQA_infer_cfg = dict(
|
18 |
+
prompt_template=dict(
|
19 |
+
type=PromptTemplate,
|
20 |
+
template=dict(
|
21 |
+
begin=[
|
22 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt=TheoremQA_prompt1),
|
23 |
+
],
|
24 |
+
round=[
|
25 |
+
dict(role='HUMAN', prompt=TheoremQA_prompt2),
|
26 |
+
],
|
27 |
+
),
|
28 |
+
),
|
29 |
+
retriever=dict(type=ZeroRetriever),
|
30 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
31 |
+
)
|
32 |
+
|
33 |
+
TheoremQA_eval_cfg = dict(evaluator=dict(type=AccEvaluator), pred_postprocessor=dict(type=TheoremQA_postprocess))
|
34 |
+
|
35 |
+
TheoremQA_datasets = [
|
36 |
+
dict(
|
37 |
+
abbr='TheoremQA',
|
38 |
+
type=TheoremQADataset,
|
39 |
+
path='./data/TheoremQA/test.csv',
|
40 |
+
reader_cfg=TheoremQA_reader_cfg,
|
41 |
+
infer_cfg=TheoremQA_infer_cfg,
|
42 |
+
eval_cfg=TheoremQA_eval_cfg,
|
43 |
+
)
|
44 |
+
]
|
opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_gen_ef26ca.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import TheoremQADataset, TheoremQA_postprocess
|
6 |
+
|
7 |
+
TheoremQA_reader_cfg = dict(input_columns=['Question', 'Answer_type'], output_column='Answer', train_split='test')
|
8 |
+
|
9 |
+
TheoremQA_prompt1 = """You are a mathematician, you are supposed to answer the given question. You need to output the answer in your final sentence like "Therefore, the answer is ...". The answer can only be one of the following forms:
|
10 |
+
1. a numerical value like 0.1, no symbol and no unit at all.
|
11 |
+
2. a list of number like [2, 3, 4].
|
12 |
+
3. True/False.
|
13 |
+
4. an option like (a), (b), (c), (d)
|
14 |
+
"""
|
15 |
+
TheoremQA_prompt2 = "Question: {Question}\nLet's think step by step."
|
16 |
+
|
17 |
+
TheoremQA_infer_cfg = dict(
|
18 |
+
prompt_template=dict(
|
19 |
+
type=PromptTemplate,
|
20 |
+
template=dict(
|
21 |
+
round=[
|
22 |
+
dict(
|
23 |
+
role='HUMAN',
|
24 |
+
prompt=TheoremQA_prompt1 + TheoremQA_prompt2,
|
25 |
+
),
|
26 |
+
]
|
27 |
+
),
|
28 |
+
),
|
29 |
+
retriever=dict(type=ZeroRetriever),
|
30 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
31 |
+
)
|
32 |
+
|
33 |
+
TheoremQA_eval_cfg = dict(evaluator=dict(type=AccEvaluator), pred_postprocessor=dict(type=TheoremQA_postprocess))
|
34 |
+
|
35 |
+
TheoremQA_datasets = [
|
36 |
+
dict(
|
37 |
+
abbr='TheoremQA',
|
38 |
+
type=TheoremQADataset,
|
39 |
+
path='./data/TheoremQA/test.csv',
|
40 |
+
reader_cfg=TheoremQA_reader_cfg,
|
41 |
+
infer_cfg=TheoremQA_infer_cfg,
|
42 |
+
eval_cfg=TheoremQA_eval_cfg,
|
43 |
+
)
|
44 |
+
]
|
opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_post_v2_gen_2c2583.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import TheoremQADataset, TheoremQA_postprocess_v2
|
6 |
+
|
7 |
+
TheoremQA_reader_cfg = dict(input_columns=['Question', 'Answer_type'], output_column='Answer', train_split='test')
|
8 |
+
|
9 |
+
TheoremQA_prompt1 = """You are a mathematician, you are supposed to answer the given question. You need to output the answer in your final sentence like "Therefore, the answer is ...". The answer can only be one of the following forms:
|
10 |
+
1. a numerical value like 0.1, no symbol and no unit at all.
|
11 |
+
2. a list of number like [2, 3, 4].
|
12 |
+
3. True/False.
|
13 |
+
4. an option like (a), (b), (c), (d)
|
14 |
+
"""
|
15 |
+
TheoremQA_prompt2 = "Question: {Question}\nLet's think step by step."
|
16 |
+
|
17 |
+
TheoremQA_infer_cfg = dict(
|
18 |
+
prompt_template=dict(
|
19 |
+
type=PromptTemplate,
|
20 |
+
template=TheoremQA_prompt1 + TheoremQA_prompt2,
|
21 |
+
),
|
22 |
+
retriever=dict(type=ZeroRetriever),
|
23 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
24 |
+
)
|
25 |
+
|
26 |
+
# 正确的 evaluator 需要借助于 llm 来进行答案提取,此评测逻辑亦会有较多 FN 。
|
27 |
+
TheoremQA_eval_cfg = dict(evaluator=dict(type=AccEvaluator), pred_postprocessor=dict(type=TheoremQA_postprocess_v2))
|
28 |
+
|
29 |
+
TheoremQA_datasets = [
|
30 |
+
dict(
|
31 |
+
abbr='TheoremQA',
|
32 |
+
type=TheoremQADataset,
|
33 |
+
path='./data/TheoremQA/test.csv',
|
34 |
+
reader_cfg=TheoremQA_reader_cfg,
|
35 |
+
infer_cfg=TheoremQA_infer_cfg,
|
36 |
+
eval_cfg=TheoremQA_eval_cfg,
|
37 |
+
)
|
38 |
+
]
|
opencompass/configs/datasets/TheoremQA/deprecated_TheoremQA_post_v2_gen_ef26ca.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import TheoremQADataset, TheoremQA_postprocess_v2
|
6 |
+
|
7 |
+
TheoremQA_reader_cfg = dict(input_columns=['Question', 'Answer_type'], output_column='Answer', train_split='test')
|
8 |
+
|
9 |
+
TheoremQA_prompt1 = """You are a mathematician, you are supposed to answer the given question. You need to output the answer in your final sentence like "Therefore, the answer is ...". The answer can only be one of the following forms:
|
10 |
+
1. a numerical value like 0.1, no symbol and no unit at all.
|
11 |
+
2. a list of number like [2, 3, 4].
|
12 |
+
3. True/False.
|
13 |
+
4. an option like (a), (b), (c), (d)
|
14 |
+
"""
|
15 |
+
TheoremQA_prompt2 = "Question: {Question}\nLet's think step by step."
|
16 |
+
|
17 |
+
TheoremQA_infer_cfg = dict(
|
18 |
+
prompt_template=dict(
|
19 |
+
type=PromptTemplate,
|
20 |
+
template=dict(
|
21 |
+
round=[
|
22 |
+
dict(
|
23 |
+
role='HUMAN',
|
24 |
+
prompt=TheoremQA_prompt1 + TheoremQA_prompt2,
|
25 |
+
),
|
26 |
+
]
|
27 |
+
),
|
28 |
+
),
|
29 |
+
retriever=dict(type=ZeroRetriever),
|
30 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
31 |
+
)
|
32 |
+
|
33 |
+
# 正确的 evaluator 需要借助于 llm 来进行答案提取,此评测逻辑亦会有较多 FN 。
|
34 |
+
TheoremQA_eval_cfg = dict(evaluator=dict(type=AccEvaluator), pred_postprocessor=dict(type=TheoremQA_postprocess_v2))
|
35 |
+
|
36 |
+
TheoremQA_datasets = [
|
37 |
+
dict(
|
38 |
+
abbr='TheoremQA',
|
39 |
+
type=TheoremQADataset,
|
40 |
+
path='./data/TheoremQA/test.csv',
|
41 |
+
reader_cfg=TheoremQA_reader_cfg,
|
42 |
+
infer_cfg=TheoremQA_infer_cfg,
|
43 |
+
eval_cfg=TheoremQA_eval_cfg,
|
44 |
+
)
|
45 |
+
]
|
opencompass/configs/datasets/XCOPA/XCOPA_ppl.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .XCOPA_ppl_54058d import XCOPA_datasets # noqa: F401, F403
|
opencompass/configs/datasets/XCOPA/XCOPA_ppl_54058d.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import PPLInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import XCOPADataset
|
6 |
+
|
7 |
+
XCOPA_reader_cfg = dict(
|
8 |
+
input_columns=['question', 'premise', 'choice1', 'choice2'],
|
9 |
+
output_column='label',
|
10 |
+
test_split='train')
|
11 |
+
|
12 |
+
XCOPA_infer_cfg = dict(
|
13 |
+
prompt_template=dict(
|
14 |
+
type=PromptTemplate,
|
15 |
+
template={
|
16 |
+
0: 'Premise:{premise}。\nQuestion:{question}。\nAnswer: {choice1}.',
|
17 |
+
1: 'Passage:{premise}。\nQuestion:{question}。\nAnswer: {choice2}.',
|
18 |
+
}),
|
19 |
+
retriever=dict(type=ZeroRetriever),
|
20 |
+
inferencer=dict(type=PPLInferencer))
|
21 |
+
|
22 |
+
XCOPA_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
23 |
+
|
24 |
+
XCOPA_datasets = [
|
25 |
+
dict(
|
26 |
+
type=XCOPADataset,
|
27 |
+
path='xcopa',
|
28 |
+
reader_cfg=XCOPA_reader_cfg,
|
29 |
+
infer_cfg=XCOPA_infer_cfg,
|
30 |
+
eval_cfg=XCOPA_eval_cfg)
|
31 |
+
]
|
opencompass/configs/datasets/commonsenseqa/commonsenseqa_7shot_cot_gen_734a22.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import AccEvaluator
|
5 |
+
from opencompass.datasets import commonsenseqaDataset
|
6 |
+
from opencompass.utils.text_postprocessors import (
|
7 |
+
match_answer_pattern,
|
8 |
+
)
|
9 |
+
|
10 |
+
commonsenseqa_reader_cfg = dict(
|
11 |
+
input_columns=['question', 'A', 'B', 'C', 'D', 'E'],
|
12 |
+
output_column='answerKey',
|
13 |
+
test_split='validation',
|
14 |
+
)
|
15 |
+
|
16 |
+
_ice_template = dict(
|
17 |
+
type=PromptTemplate,
|
18 |
+
template=dict(
|
19 |
+
begin='</E>',
|
20 |
+
round=[
|
21 |
+
dict(
|
22 |
+
role='HUMAN',
|
23 |
+
prompt='Q: What do people use to absorb extra ink from a fountain pen? Answer Choices: A.shirt pocket B.calligrapher’s hand C.inkwell D.desk drawer E.blotter',
|
24 |
+
),
|
25 |
+
dict(
|
26 |
+
role='BOT',
|
27 |
+
prompt='A: The answer must be an item that can absorb ink. Of the above choices, only blotters are used to absorb ink. So the answer is E.',
|
28 |
+
),
|
29 |
+
dict(
|
30 |
+
role='HUMAN',
|
31 |
+
prompt='Q: What home entertainment equipment requires cable?Answer Choices: A.radio shack B.substation C.television D.cabinet',
|
32 |
+
),
|
33 |
+
dict(
|
34 |
+
role='BOT',
|
35 |
+
prompt='A: The answer must require cable. Of the above choices, only television requires cable. So the answer is C.',
|
36 |
+
),
|
37 |
+
dict(
|
38 |
+
role='HUMAN',
|
39 |
+
prompt='Q: The fox walked from the city into the forest, what was it looking for? Answer Choices: A.pretty flowers B.hen house C.natural habitat D.storybook',
|
40 |
+
),
|
41 |
+
dict(
|
42 |
+
role='BOT',
|
43 |
+
prompt='A: The answer must be something in the forest. Of the above choices, only natural habitat is in the forest. So the answer is B.',
|
44 |
+
),
|
45 |
+
dict(
|
46 |
+
role='HUMAN',
|
47 |
+
prompt='Q: Sammy wanted to go to where the people were. Where might he go? Answer Choices: A.populated areas B.race track C.desert D.apartment E.roadblock',
|
48 |
+
),
|
49 |
+
dict(
|
50 |
+
role='BOT',
|
51 |
+
prompt='A: The answer must be a place with a lot of people. Of the above choices, only populated areas have a lot of people. So the answer is A.',
|
52 |
+
),
|
53 |
+
dict(
|
54 |
+
role='HUMAN',
|
55 |
+
prompt='Q: Where do you put your grapes just before checking out? Answer Choices: A.mouth B.grocery cart Csuper market D.fruit basket E.fruit market',
|
56 |
+
),
|
57 |
+
dict(
|
58 |
+
role='BOT',
|
59 |
+
prompt='A: The answer should be the place where grocery items are placed before checking out. Of the above choices, grocery cart makes the most sense for holding grocery items. So the answer is B.',
|
60 |
+
),
|
61 |
+
dict(
|
62 |
+
role='HUMAN',
|
63 |
+
prompt='Q: Google Maps and other highway and street GPS services have replaced what? Answer Choices: A.united states B.mexico C.countryside D.atlas',
|
64 |
+
),
|
65 |
+
dict(
|
66 |
+
role='BOT',
|
67 |
+
prompt='A: The answer must be something that used to do what Google Maps and GPS services do, which is to give directions. Of the above choices, only atlases are used to give directions. So the answer is D.',
|
68 |
+
),
|
69 |
+
dict(
|
70 |
+
role='HUMAN',
|
71 |
+
prompt='Q: Before getting a divorce, what did the wife feel who was doing all the work? Answer Choices: A.harder B.anguish C.bitterness D.tears E.sadness',
|
72 |
+
),
|
73 |
+
dict(
|
74 |
+
role='BOT',
|
75 |
+
prompt='A: The answer should be the feeling of someone getting divorced who was doing all the work. Of the above choices, the closest feeling is bitterness. So the answer is C.',
|
76 |
+
),
|
77 |
+
dict(
|
78 |
+
role='HUMAN',
|
79 |
+
prompt='Q:{question} Answer Choices: A. {A}\nB. {B}\nC. {C}\nD. {D}\nE. {E}\nA:',
|
80 |
+
),
|
81 |
+
dict(
|
82 |
+
role='BOT',
|
83 |
+
prompt='{answerKey}',
|
84 |
+
),
|
85 |
+
],
|
86 |
+
),
|
87 |
+
ice_token='</E>',
|
88 |
+
)
|
89 |
+
|
90 |
+
commonsenseqa_infer_cfg = dict(
|
91 |
+
ice_template=_ice_template,
|
92 |
+
retriever=dict(type=ZeroRetriever),
|
93 |
+
inferencer=dict(type=GenInferencer),
|
94 |
+
)
|
95 |
+
|
96 |
+
commonsenseqa_eval_cfg = dict(
|
97 |
+
evaluator=dict(type=AccEvaluator),
|
98 |
+
pred_postprocessor=dict(
|
99 |
+
type=match_answer_pattern, answer_pattern=r'(?i)so the answer is\s*([A-P])'
|
100 |
+
),
|
101 |
+
)
|
102 |
+
|
103 |
+
|
104 |
+
commonsenseqa_datasets = [
|
105 |
+
dict(
|
106 |
+
abbr='commonsense_qa',
|
107 |
+
type=commonsenseqaDataset,
|
108 |
+
path='opencompass/commonsense_qa',
|
109 |
+
reader_cfg=commonsenseqa_reader_cfg,
|
110 |
+
infer_cfg=commonsenseqa_infer_cfg,
|
111 |
+
eval_cfg=commonsenseqa_eval_cfg,
|
112 |
+
)
|
113 |
+
]
|
114 |
+
|
115 |
+
del _ice_template
|