Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- opencompass/configs/datasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen.py +4 -0
- opencompass/configs/datasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen_bd8ef0.py +42 -0
- opencompass/configs/datasets/adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen.py +4 -0
- opencompass/configs/datasets/adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen_bd8ef0.py +42 -0
- opencompass/configs/datasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen.py +4 -0
- opencompass/configs/datasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen_0b7326.py +42 -0
- opencompass/configs/datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen.py +4 -0
- opencompass/configs/datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen_cdc277.py +42 -0
- opencompass/configs/datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen.py +4 -0
- opencompass/configs/datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen_8cc547.py +42 -0
- opencompass/configs/datasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen.py +4 -0
- opencompass/configs/datasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen_ee8d3b.py +41 -0
- opencompass/configs/datasets/collections/leaderboard/qwen.py +51 -0
- opencompass/configs/datasets/collections/leaderboard/qwen_chat.py +51 -0
- opencompass/configs/datasets/compassbench_20_v1_1_public/agent/cibench_template_gen_e6b12a.py +57 -0
- opencompass/configs/datasets/compassbench_20_v1_1_public/agent/mus_teval_gen_105c48.py +56 -0
- opencompass/configs/datasets/compassbench_20_v1_1_public/code/compassbench_v1_1_code_gen_986f01.py +291 -0
- opencompass/configs/datasets/compassbench_20_v1_1_public/knowledge/compassbench_v1_knowledge_gen_bd74e0.py +133 -0
- opencompass/configs/datasets/compassbench_20_v1_1_public/language/compassbench_v1_language_gen_7aa06d.py +46 -0
- opencompass/configs/datasets/compassbench_20_v1_1_public/math/compassbench_v1_1_math_gen_1dc21d.py +81 -0
- opencompass/configs/datasets/compassbench_20_v1_1_public/math/mathbench_prompt.py +103 -0
- opencompass/configs/datasets/compassbench_20_v1_1_public/reason/compassbench_v1_reason_gen_d26d08.py +28 -0
- opencompass/configs/datasets/leval/levalcoursera/leval_coursera_gen.py +4 -0
- opencompass/configs/datasets/leval/levalcoursera/leval_coursera_gen_36a006.py +45 -0
- opencompass/configs/datasets/leval/levalfinancialqa/leval_financialqa_gen.py +4 -0
- opencompass/configs/datasets/leval/levalfinancialqa/leval_financialqa_gen_b03798.py +43 -0
- opencompass/configs/datasets/leval/levalgovreportsumm/leval_gov_report_summ_gen.py +4 -0
- opencompass/configs/datasets/leval/levalgovreportsumm/leval_gov_report_summ_gen_b03798.py +43 -0
- opencompass/configs/datasets/leval/levalgsm100/leval_gsm100_gen.py +4 -0
- opencompass/configs/datasets/leval/levalgsm100/leval_gsm100_gen_77dd94.py +46 -0
- opencompass/configs/datasets/leval/levallegalcontractqa/leval_legalcontractqa_gen.py +4 -0
- opencompass/configs/datasets/leval/levallegalcontractqa/leval_legalcontractqa_gen_68a2ac.py +43 -0
- opencompass/configs/datasets/leval/levalmeetingsumm/leval_meetingsumm_gen.py +4 -0
- opencompass/configs/datasets/leval/levalmeetingsumm/leval_meetingsumm_gen_b03798.py +43 -0
- opencompass/configs/datasets/leval/levalmultidocqa/leval_multidocqa_gen.py +4 -0
- opencompass/configs/datasets/leval/levalmultidocqa/leval_multidocqa_gen_96bf3f.py +43 -0
- opencompass/configs/datasets/leval/levalnarrativeqa/leval_narrativeqa_gen.py +4 -0
- opencompass/configs/datasets/leval/levalnarrativeqa/leval_narrativeqa_gen_766dd0.py +43 -0
- opencompass/configs/datasets/leval/levalnaturalquestion/leval_naturalquestion_gen.py +4 -0
- opencompass/configs/datasets/leval/levalnaturalquestion/leval_naturalquestion_gen_52c33f.py +43 -0
- opencompass/configs/datasets/leval/levalnewssumm/leval_newssumm_gen.py +4 -0
- opencompass/configs/datasets/leval/levalnewssumm/leval_newssumm_gen_b03798.py +43 -0
- opencompass/configs/datasets/leval/levalpaperassistant/leval_paper_assistant_gen.py +4 -0
- opencompass/configs/datasets/leval/levalpaperassistant/leval_paper_assistant_gen_b03798.py +43 -0
- opencompass/configs/datasets/leval/levalpatentsumm/leval_patent_summ_gen.py +4 -0
- opencompass/configs/datasets/leval/levalpatentsumm/leval_patent_summ_gen_b03798.py +43 -0
- opencompass/configs/datasets/leval/levalquality/leval_quality_gen.py +4 -0
- opencompass/configs/datasets/leval/levalquality/leval_quality_gen_36a006.py +45 -0
- opencompass/configs/datasets/leval/levalreviewsumm/leval_review_summ_gen.py +4 -0
- opencompass/configs/datasets/leval/levalreviewsumm/leval_review_summ_gen_b03798.py +43 -0
opencompass/configs/datasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .adv_glue_mnli_gen_bd8ef0 import adv_mnli_datasets # noqa: F401, F403
|
opencompass/configs/datasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen_bd8ef0.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.datasets import AdvMnliDataset, AccDropEvaluator
|
5 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
6 |
+
|
7 |
+
adv_mnli_reader_cfg = dict(
|
8 |
+
input_columns=['premise', 'hypothesis'], output_column='label_option')
|
9 |
+
|
10 |
+
adv_mnli_infer_cfg = dict(
|
11 |
+
prompt_template=dict(
|
12 |
+
type=PromptTemplate,
|
13 |
+
template=dict(round=[
|
14 |
+
dict(
|
15 |
+
role='HUMAN',
|
16 |
+
prompt=
|
17 |
+
"""Please identify whether the premise entails the hypothesis. The answer should be exactly 'A. yes', 'B. maybe' or 'C. no'.
|
18 |
+
premise: {premise}
|
19 |
+
hypothesis: {hypothesis}
|
20 |
+
Answer:"""),
|
21 |
+
]),
|
22 |
+
),
|
23 |
+
retriever=dict(type=ZeroRetriever),
|
24 |
+
inferencer=dict(type=GenInferencer),
|
25 |
+
)
|
26 |
+
|
27 |
+
adv_mnli_eval_cfg = dict(
|
28 |
+
evaluator=dict(type=AccDropEvaluator),
|
29 |
+
pred_role='BOT',
|
30 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='ABC'),
|
31 |
+
)
|
32 |
+
|
33 |
+
adv_mnli_datasets = [
|
34 |
+
dict(
|
35 |
+
abbr='adv_mnli',
|
36 |
+
type=AdvMnliDataset,
|
37 |
+
path='opencompass/advglue-dev',
|
38 |
+
reader_cfg=adv_mnli_reader_cfg,
|
39 |
+
infer_cfg=adv_mnli_infer_cfg,
|
40 |
+
eval_cfg=adv_mnli_eval_cfg,
|
41 |
+
)
|
42 |
+
]
|
opencompass/configs/datasets/adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .adv_glue_mnli_mm_gen_bd8ef0 import adv_mnli_mm_datasets # noqa: F401, F403
|
opencompass/configs/datasets/adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen_bd8ef0.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.datasets import AdvMnliMMDataset, AccDropEvaluator
|
5 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
6 |
+
|
7 |
+
adv_mnli_mm_reader_cfg = dict(
|
8 |
+
input_columns=['premise', 'hypothesis'], output_column='label_option')
|
9 |
+
|
10 |
+
adv_mnli_mm_infer_cfg = dict(
|
11 |
+
prompt_template=dict(
|
12 |
+
type=PromptTemplate,
|
13 |
+
template=dict(round=[
|
14 |
+
dict(
|
15 |
+
role='HUMAN',
|
16 |
+
prompt=
|
17 |
+
"""Please identify whether the premise entails the hypothesis. The answer should be exactly 'A. yes', 'B. maybe' or 'C. no'.
|
18 |
+
premise: {premise}
|
19 |
+
hypothesis: {hypothesis}
|
20 |
+
Answer:"""),
|
21 |
+
]),
|
22 |
+
),
|
23 |
+
retriever=dict(type=ZeroRetriever),
|
24 |
+
inferencer=dict(type=GenInferencer),
|
25 |
+
)
|
26 |
+
|
27 |
+
adv_mnli_mm_eval_cfg = dict(
|
28 |
+
evaluator=dict(type=AccDropEvaluator),
|
29 |
+
pred_role='BOT',
|
30 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='ABC'),
|
31 |
+
)
|
32 |
+
|
33 |
+
adv_mnli_mm_datasets = [
|
34 |
+
dict(
|
35 |
+
abbr='adv_mnli_mm',
|
36 |
+
type=AdvMnliMMDataset,
|
37 |
+
path='opencompass/advglue-dev',
|
38 |
+
reader_cfg=adv_mnli_mm_reader_cfg,
|
39 |
+
infer_cfg=adv_mnli_mm_infer_cfg,
|
40 |
+
eval_cfg=adv_mnli_mm_eval_cfg,
|
41 |
+
)
|
42 |
+
]
|
opencompass/configs/datasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .adv_glue_qnli_gen_0b7326 import adv_qnli_datasets # noqa: F401, F403
|
opencompass/configs/datasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen_0b7326.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.datasets import AdvQnliDataset, AccDropEvaluator
|
5 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
6 |
+
|
7 |
+
adv_qnli_reader_cfg = dict(
|
8 |
+
input_columns=['question', 'sentence'], output_column='label_option')
|
9 |
+
|
10 |
+
adv_qnli_infer_cfg = dict(
|
11 |
+
prompt_template=dict(
|
12 |
+
type=PromptTemplate,
|
13 |
+
template=dict(round=[
|
14 |
+
dict(
|
15 |
+
role='HUMAN',
|
16 |
+
prompt=
|
17 |
+
"""Please identify whether the sentence answers the question. The answer should be exactly 'A. yes' or 'B. no'.
|
18 |
+
question: {question}
|
19 |
+
sentence: {sentence}
|
20 |
+
Answer:"""),
|
21 |
+
]),
|
22 |
+
),
|
23 |
+
retriever=dict(type=ZeroRetriever),
|
24 |
+
inferencer=dict(type=GenInferencer),
|
25 |
+
)
|
26 |
+
|
27 |
+
adv_qnli_eval_cfg = dict(
|
28 |
+
evaluator=dict(type=AccDropEvaluator),
|
29 |
+
pred_role='BOT',
|
30 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
|
31 |
+
)
|
32 |
+
|
33 |
+
adv_qnli_datasets = [
|
34 |
+
dict(
|
35 |
+
abbr='adv_qnli',
|
36 |
+
type=AdvQnliDataset,
|
37 |
+
path='opencompass/advglue-dev',
|
38 |
+
reader_cfg=adv_qnli_reader_cfg,
|
39 |
+
infer_cfg=adv_qnli_infer_cfg,
|
40 |
+
eval_cfg=adv_qnli_eval_cfg,
|
41 |
+
)
|
42 |
+
]
|
opencompass/configs/datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .adv_glue_qqp_gen_cdc277 import adv_qqp_datasets # noqa: F401, F403
|
opencompass/configs/datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen_cdc277.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.datasets import AdvQqpDataset, AccDropEvaluator
|
5 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
6 |
+
|
7 |
+
adv_qqp_reader_cfg = dict(
|
8 |
+
input_columns=['question1', 'question2'], output_column='label_option')
|
9 |
+
|
10 |
+
adv_qqp_infer_cfg = dict(
|
11 |
+
prompt_template=dict(
|
12 |
+
type=PromptTemplate,
|
13 |
+
template=dict(round=[
|
14 |
+
dict(
|
15 |
+
role='HUMAN',
|
16 |
+
prompt=
|
17 |
+
"""Please identify whether Question 1 has the same meaning as Question 2. The answer should be exactly 'A. no' or 'B. yes'.
|
18 |
+
Question 1: {question1}
|
19 |
+
Question 2: {question2}
|
20 |
+
Answer:"""),
|
21 |
+
]),
|
22 |
+
),
|
23 |
+
retriever=dict(type=ZeroRetriever),
|
24 |
+
inferencer=dict(type=GenInferencer),
|
25 |
+
)
|
26 |
+
|
27 |
+
adv_qqp_eval_cfg = dict(
|
28 |
+
evaluator=dict(type=AccDropEvaluator),
|
29 |
+
pred_role='BOT',
|
30 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
|
31 |
+
)
|
32 |
+
|
33 |
+
adv_qqp_datasets = [
|
34 |
+
dict(
|
35 |
+
abbr='adv_qqp',
|
36 |
+
type=AdvQqpDataset,
|
37 |
+
path='opencompass/advglue-dev',
|
38 |
+
reader_cfg=adv_qqp_reader_cfg,
|
39 |
+
infer_cfg=adv_qqp_infer_cfg,
|
40 |
+
eval_cfg=adv_qqp_eval_cfg,
|
41 |
+
)
|
42 |
+
]
|
opencompass/configs/datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .adv_glue_rte_gen_8cc547 import adv_rte_datasets # noqa: F401, F403
|
opencompass/configs/datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen_8cc547.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.datasets import AdvRteDataset, AccDropEvaluator
|
5 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
6 |
+
|
7 |
+
adv_rte_reader_cfg = dict(
|
8 |
+
input_columns=['sentence1', 'sentence2'], output_column='label_option')
|
9 |
+
|
10 |
+
adv_rte_infer_cfg = dict(
|
11 |
+
prompt_template=dict(
|
12 |
+
type=PromptTemplate,
|
13 |
+
template=dict(round=[
|
14 |
+
dict(
|
15 |
+
role='HUMAN',
|
16 |
+
prompt=
|
17 |
+
"""Please identify whether the premise entails the hypothesis. The answer should be exactly 'A. yes' or 'B. no'.
|
18 |
+
hypothesis: {sentence1}
|
19 |
+
premise: {sentence2}
|
20 |
+
Answer:"""),
|
21 |
+
]),
|
22 |
+
),
|
23 |
+
retriever=dict(type=ZeroRetriever),
|
24 |
+
inferencer=dict(type=GenInferencer),
|
25 |
+
)
|
26 |
+
|
27 |
+
adv_rte_eval_cfg = dict(
|
28 |
+
evaluator=dict(type=AccDropEvaluator),
|
29 |
+
pred_role='BOT',
|
30 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
|
31 |
+
)
|
32 |
+
|
33 |
+
adv_rte_datasets = [
|
34 |
+
dict(
|
35 |
+
abbr='adv_rte',
|
36 |
+
type=AdvRteDataset,
|
37 |
+
path='opencompass/advglue-dev',
|
38 |
+
reader_cfg=adv_rte_reader_cfg,
|
39 |
+
infer_cfg=adv_rte_infer_cfg,
|
40 |
+
eval_cfg=adv_rte_eval_cfg,
|
41 |
+
)
|
42 |
+
]
|
opencompass/configs/datasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .adv_glue_sst2_gen_ee8d3b import adv_sst2_datasets # noqa: F401, F403
|
opencompass/configs/datasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen_ee8d3b.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.datasets import AdvSst2Dataset, AccDropEvaluator
|
5 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
6 |
+
|
7 |
+
adv_sst2_reader_cfg = dict(
|
8 |
+
input_columns=['sentence'], output_column='label_option')
|
9 |
+
|
10 |
+
adv_sst2_infer_cfg = dict(
|
11 |
+
prompt_template=dict(
|
12 |
+
type=PromptTemplate,
|
13 |
+
template=dict(round=[
|
14 |
+
dict(
|
15 |
+
role='HUMAN',
|
16 |
+
prompt=
|
17 |
+
"""For the given sentence, label the sentiment of the sentence as positive or negative. The answer should be exactly 'A. negative' or 'B. positive'.
|
18 |
+
sentence: {sentence}
|
19 |
+
Answer:"""),
|
20 |
+
]),
|
21 |
+
),
|
22 |
+
retriever=dict(type=ZeroRetriever),
|
23 |
+
inferencer=dict(type=GenInferencer),
|
24 |
+
)
|
25 |
+
|
26 |
+
adv_sst2_eval_cfg = dict(
|
27 |
+
evaluator=dict(type=AccDropEvaluator),
|
28 |
+
pred_role='BOT',
|
29 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
|
30 |
+
)
|
31 |
+
|
32 |
+
adv_sst2_datasets = [
|
33 |
+
dict(
|
34 |
+
abbr='adv_sst2',
|
35 |
+
type=AdvSst2Dataset,
|
36 |
+
path='opencompass/advglue-dev',
|
37 |
+
reader_cfg=adv_sst2_reader_cfg,
|
38 |
+
infer_cfg=adv_sst2_infer_cfg,
|
39 |
+
eval_cfg=adv_sst2_eval_cfg,
|
40 |
+
)
|
41 |
+
]
|
opencompass/configs/datasets/collections/leaderboard/qwen.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from ...ceval.ceval_ppl_578f8d import ceval_datasets
|
5 |
+
from ...agieval.agieval_mixed_713d14 import agieval_datasets
|
6 |
+
from ...mmlu.mmlu_ppl_ac766d import mmlu_datasets
|
7 |
+
from ...cmmlu.cmmlu_ppl_8b9c76 import cmmlu_datasets
|
8 |
+
from ...GaokaoBench.GaokaoBench_mixed_9af5ee import GaokaoBench_datasets
|
9 |
+
from ...ARC_c.ARC_c_gen_1e0de5 import ARC_c_datasets
|
10 |
+
from ...ARC_e.ARC_e_gen_1e0de5 import ARC_e_datasets
|
11 |
+
|
12 |
+
from ...SuperGLUE_WiC.SuperGLUE_WiC_ppl_312de9 import WiC_datasets
|
13 |
+
from ...FewCLUE_chid.FewCLUE_chid_ppl_8f2872 import chid_datasets
|
14 |
+
from ...CLUE_afqmc.CLUE_afqmc_ppl_6507d7 import afqmc_datasets
|
15 |
+
from ...SuperGLUE_WSC.SuperGLUE_WSC_ppl_003529 import WSC_datasets
|
16 |
+
from ...tydiqa.tydiqa_gen_978d2a import tydiqa_datasets
|
17 |
+
from ...flores.flores_gen_806ede import flores_datasets
|
18 |
+
|
19 |
+
from ...SuperGLUE_BoolQ.SuperGLUE_BoolQ_ppl_314797 import BoolQ_datasets
|
20 |
+
from ...commonsenseqa.commonsenseqa_ppl_5545e2 import commonsenseqa_datasets
|
21 |
+
from ...triviaqa.triviaqa_gen_0356ec import triviaqa_datasets
|
22 |
+
from ...nq.nq_gen_0356ec import nq_datasets
|
23 |
+
|
24 |
+
from ...CLUE_C3.CLUE_C3_gen_8c358f import C3_datasets
|
25 |
+
from ...race.race_ppl_5831a0 import race_datasets
|
26 |
+
from ...obqa.obqa_gen_9069e4 import obqa_datasets
|
27 |
+
from ...FewCLUE_csl.FewCLUE_csl_ppl_841b62 import csl_datasets
|
28 |
+
from ...lcsts.lcsts_gen_8ee1fe import lcsts_datasets
|
29 |
+
from ...Xsum.Xsum_gen_31397e import Xsum_datasets
|
30 |
+
from ...FewCLUE_eprstmt.FewCLUE_eprstmt_gen_740ea0 import eprstmt_datasets
|
31 |
+
from ...lambada.lambada_gen_217e11 import lambada_datasets
|
32 |
+
|
33 |
+
from ...CLUE_cmnli.CLUE_cmnli_ppl_fdc6de import cmnli_datasets
|
34 |
+
from ...CLUE_ocnli.CLUE_ocnli_gen_c4cb6c import ocnli_datasets
|
35 |
+
from ...SuperGLUE_AX_b.SuperGLUE_AX_b_gen_4dfefa import AX_b_datasets
|
36 |
+
from ...SuperGLUE_AX_g.SuperGLUE_AX_g_gen_68aac7 import AX_g_datasets
|
37 |
+
from ...SuperGLUE_RTE.SuperGLUE_RTE_gen_68aac7 import RTE_datasets
|
38 |
+
from ...SuperGLUE_COPA.SuperGLUE_COPA_gen_91ca53 import COPA_datasets
|
39 |
+
from ...SuperGLUE_ReCoRD.SuperGLUE_ReCoRD_gen_a69961 import ReCoRD_datasets
|
40 |
+
from ...hellaswag.hellaswag_gen_6faab5 import hellaswag_datasets
|
41 |
+
from ...piqa.piqa_gen_1194eb import piqa_datasets
|
42 |
+
from ...siqa.siqa_ppl_e8d8c5 import siqa_datasets
|
43 |
+
from ...math.math_gen_265cce import math_datasets
|
44 |
+
from ...gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
|
45 |
+
from ...drop.deprecated_drop_gen_8a9ed9 import drop_datasets
|
46 |
+
from ...humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets
|
47 |
+
from ...mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
|
48 |
+
from ...bbh.bbh_gen_5bf00b import bbh_datasets
|
49 |
+
|
50 |
+
|
51 |
+
datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
|
opencompass/configs/datasets/collections/leaderboard/qwen_chat.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from ...ceval.ceval_gen_5f30c7 import ceval_datasets
|
5 |
+
from ...agieval.agieval_mixed_713d14 import agieval_datasets
|
6 |
+
from ...mmlu.mmlu_gen_4d595a import mmlu_datasets
|
7 |
+
from ...cmmlu.cmmlu_gen_c13365 import cmmlu_datasets
|
8 |
+
from ...GaokaoBench.GaokaoBench_gen_5cfe9e import GaokaoBench_datasets
|
9 |
+
from ...ARC_c.ARC_c_ppl_2ef631 import ARC_c_datasets
|
10 |
+
from ...ARC_e.ARC_e_ppl_2ef631 import ARC_e_datasets
|
11 |
+
|
12 |
+
from ...SuperGLUE_WiC.SuperGLUE_WiC_gen_d06864 import WiC_datasets
|
13 |
+
from ...FewCLUE_chid.FewCLUE_chid_ppl_8f2872 import chid_datasets
|
14 |
+
from ...CLUE_afqmc.CLUE_afqmc_gen_901306 import afqmc_datasets
|
15 |
+
from ...SuperGLUE_WSC.SuperGLUE_WSC_ppl_003529 import WSC_datasets
|
16 |
+
from ...tydiqa.tydiqa_gen_978d2a import tydiqa_datasets
|
17 |
+
from ...flores.flores_gen_806ede import flores_datasets
|
18 |
+
|
19 |
+
from ...SuperGLUE_BoolQ.SuperGLUE_BoolQ_ppl_314797 import BoolQ_datasets
|
20 |
+
from ...commonsenseqa.commonsenseqa_gen_c946f2 import commonsenseqa_datasets
|
21 |
+
from ...triviaqa.triviaqa_gen_2121ce import triviaqa_datasets
|
22 |
+
from ...nq.nq_gen_c788f6 import nq_datasets
|
23 |
+
|
24 |
+
from ...CLUE_C3.CLUE_C3_gen_8c358f import C3_datasets
|
25 |
+
from ...race.race_gen_69ee4f import race_datasets
|
26 |
+
from ...obqa.obqa_ppl_6aac9e import obqa_datasets
|
27 |
+
from ...FewCLUE_csl.FewCLUE_csl_ppl_841b62 import csl_datasets
|
28 |
+
from ...lcsts.lcsts_gen_8ee1fe import lcsts_datasets
|
29 |
+
from ...Xsum.Xsum_gen_31397e import Xsum_datasets
|
30 |
+
from ...FewCLUE_eprstmt.FewCLUE_eprstmt_ppl_f1e631 import eprstmt_datasets
|
31 |
+
from ...lambada.lambada_gen_217e11 import lambada_datasets
|
32 |
+
|
33 |
+
from ...CLUE_cmnli.CLUE_cmnli_ppl_fdc6de import cmnli_datasets
|
34 |
+
from ...CLUE_ocnli.CLUE_ocnli_ppl_fdc6de import ocnli_datasets
|
35 |
+
from ...SuperGLUE_AX_b.SuperGLUE_AX_b_ppl_6db806 import AX_b_datasets
|
36 |
+
from ...SuperGLUE_AX_g.SuperGLUE_AX_g_ppl_66caf3 import AX_g_datasets
|
37 |
+
from ...SuperGLUE_RTE.SuperGLUE_RTE_ppl_66caf3 import RTE_datasets
|
38 |
+
from ...SuperGLUE_COPA.SuperGLUE_COPA_gen_91ca53 import COPA_datasets
|
39 |
+
from ...SuperGLUE_ReCoRD.SuperGLUE_ReCoRD_gen_30dea0 import ReCoRD_datasets
|
40 |
+
from ...hellaswag.hellaswag_ppl_a6e128 import hellaswag_datasets
|
41 |
+
from ...piqa.piqa_ppl_0cfff2 import piqa_datasets
|
42 |
+
from ...siqa.siqa_ppl_e8d8c5 import siqa_datasets
|
43 |
+
from ...math.math_gen_265cce import math_datasets
|
44 |
+
from ...gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
|
45 |
+
from ...drop.deprecated_drop_gen_8a9ed9 import drop_datasets
|
46 |
+
from ...humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets
|
47 |
+
from ...mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
|
48 |
+
from ...bbh.bbh_gen_5b92b0 import bbh_datasets
|
49 |
+
|
50 |
+
|
51 |
+
datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
|
opencompass/configs/datasets/compassbench_20_v1_1_public/agent/cibench_template_gen_e6b12a.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import AgentInferencer
|
4 |
+
from opencompass.datasets import CIBenchDataset, CIBenchEvaluator
|
5 |
+
|
6 |
+
libs = [
|
7 |
+
'/lightgbm',
|
8 |
+
'/matplotlib',
|
9 |
+
'/nltk',
|
10 |
+
'/opencv',
|
11 |
+
'/pandas',
|
12 |
+
'/pytorch',
|
13 |
+
'/scipy',
|
14 |
+
'/seaborn',
|
15 |
+
'/sklearn',
|
16 |
+
'/tensorflow',
|
17 |
+
'_chinese/lightgbm',
|
18 |
+
'_chinese/matplotlib',
|
19 |
+
'_chinese/nltk',
|
20 |
+
'_chinese/opencv',
|
21 |
+
'_chinese/pandas',
|
22 |
+
'_chinese/pytorch',
|
23 |
+
'_chinese/scipy',
|
24 |
+
'_chinese/seaborn',
|
25 |
+
'_chinese/sklearn',
|
26 |
+
'_chinese/tensorflow',
|
27 |
+
]
|
28 |
+
|
29 |
+
|
30 |
+
cibench_datasets = []
|
31 |
+
for lib in libs:
|
32 |
+
cibench_reader_cfg = dict(
|
33 |
+
input_columns=['questions'], output_column='references', train_split='test', test_split='test'
|
34 |
+
)
|
35 |
+
|
36 |
+
cibench_infer_cfg = dict(
|
37 |
+
prompt_template=dict(
|
38 |
+
type=PromptTemplate,
|
39 |
+
template='{questions}',
|
40 |
+
),
|
41 |
+
retriever=dict(type=ZeroRetriever),
|
42 |
+
inferencer=dict(type=AgentInferencer, infer_mode='every'),
|
43 |
+
)
|
44 |
+
|
45 |
+
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role='BOT')
|
46 |
+
|
47 |
+
cibench_datasets.append(
|
48 |
+
dict(
|
49 |
+
abbr=f'cibench_template{lib}_public',
|
50 |
+
type=CIBenchDataset,
|
51 |
+
path=f'data/compassbench_v1.1.public/agent-cibench/cibench_template{lib}',
|
52 |
+
internet_check=False,
|
53 |
+
reader_cfg=cibench_reader_cfg,
|
54 |
+
infer_cfg=cibench_infer_cfg,
|
55 |
+
eval_cfg=cibench_eval_cfg,
|
56 |
+
)
|
57 |
+
)
|
opencompass/configs/datasets/compassbench_20_v1_1_public/agent/mus_teval_gen_105c48.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import ChatInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import TEvalEvaluator
|
5 |
+
from opencompass.datasets import teval_postprocess, TEvalDataset
|
6 |
+
|
7 |
+
plugin_eval_subject_mapping = {
|
8 |
+
'instruct': ['instruct_v1'],
|
9 |
+
'instruct_zh': ['instruct_v1_zh'],
|
10 |
+
'plan': ['plan_json_v1', 'plan_str_v1'],
|
11 |
+
'plan_zh': ['plan_json_v1_zh', 'plan_str_v1_zh'],
|
12 |
+
'review': ['review_str_v1'],
|
13 |
+
'review_zh': ['review_str_v1_zh'],
|
14 |
+
'reason_retrieve_understand': ['reason_retrieve_understand_json_v1'],
|
15 |
+
'reason_retrieve_understand_zh': ['reason_retrieve_understand_json_v1_zh'],
|
16 |
+
'reason': ['reason_str_v1'],
|
17 |
+
'reason_zh': ['reason_str_v1_zh'],
|
18 |
+
'retrieve': ['retrieve_str_v1'],
|
19 |
+
'retrieve_zh': ['retrieve_str_v1_zh'],
|
20 |
+
'understand': ['understand_str_v1'],
|
21 |
+
'understand_zh': ['understand_str_v1_zh'],
|
22 |
+
}
|
23 |
+
|
24 |
+
plugin_eval_datasets = []
|
25 |
+
for _name in plugin_eval_subject_mapping:
|
26 |
+
plugin_eval_reader_cfg = dict(input_columns=['prompt'], output_column='ground_truth')
|
27 |
+
plugin_eval_infer_cfg = dict(
|
28 |
+
prompt_template=dict(
|
29 |
+
type=PromptTemplate,
|
30 |
+
template=dict(
|
31 |
+
round=[
|
32 |
+
dict(role='HUMAN', prompt='{prompt}'),
|
33 |
+
],
|
34 |
+
),
|
35 |
+
),
|
36 |
+
retriever=dict(type=ZeroRetriever),
|
37 |
+
inferencer=dict(type=ChatInferencer),
|
38 |
+
)
|
39 |
+
plugin_eval_eval_cfg = dict(
|
40 |
+
evaluator=dict(type=TEvalEvaluator, subset=_name),
|
41 |
+
pred_postprocessor=dict(type=teval_postprocess),
|
42 |
+
num_gpus=1,
|
43 |
+
)
|
44 |
+
|
45 |
+
for subset in plugin_eval_subject_mapping[_name]:
|
46 |
+
plugin_eval_datasets.append(
|
47 |
+
dict(
|
48 |
+
abbr='plugin_eval-mus-p10-' + subset + '_public',
|
49 |
+
type=TEvalDataset,
|
50 |
+
path='data/compassbench_v1.1.public/agent-teval-p10',
|
51 |
+
name=subset,
|
52 |
+
reader_cfg=plugin_eval_reader_cfg,
|
53 |
+
infer_cfg=plugin_eval_infer_cfg,
|
54 |
+
eval_cfg=plugin_eval_eval_cfg,
|
55 |
+
)
|
56 |
+
)
|
opencompass/configs/datasets/compassbench_20_v1_1_public/code/compassbench_v1_1_code_gen_986f01.py
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, HumanEvalPlusEvaluator, humaneval_postprocess_v2
|
5 |
+
from opencompass.datasets import MBPPDataset, SanitizedMBPPDataset, MBPPEvaluator
|
6 |
+
from opencompass.datasets import HumanevalXDataset, HumanevalXEvaluator
|
7 |
+
from opencompass.datasets import LCDataset, LCPassKEvaluator
|
8 |
+
from opencompass.datasets import TACODataset, TACOEvaluator
|
9 |
+
|
10 |
+
compassbench_v1_1_code_datasets = []
|
11 |
+
|
12 |
+
# --------------------------------------------------------------- HumanEval CN ---------------------------------------------------------------
|
13 |
+
humaneval_reader_cfg = dict(input_columns=['prompt'], output_column='task_id', train_split='test')
|
14 |
+
|
15 |
+
humaneval_infer_cfg = dict(
|
16 |
+
prompt_template=dict(
|
17 |
+
type=PromptTemplate,
|
18 |
+
template=dict(
|
19 |
+
round=[
|
20 |
+
dict(role='HUMAN', prompt='完成以下Python代码任务:\n{prompt}'),
|
21 |
+
]
|
22 |
+
),
|
23 |
+
),
|
24 |
+
retriever=dict(type=ZeroRetriever),
|
25 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
26 |
+
)
|
27 |
+
|
28 |
+
humaneval_eval_cfg = dict(
|
29 |
+
evaluator=dict(type=HumanEvalEvaluator),
|
30 |
+
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
31 |
+
)
|
32 |
+
|
33 |
+
compassbench_v1_1_code_datasets.append(
|
34 |
+
dict(
|
35 |
+
abbr='openai_humaneval_cn_public',
|
36 |
+
type=HumanevalDataset,
|
37 |
+
path='data/compassbench_v1.1.public/code/humaneval_cn/human-eval-cn-v2-20210705.jsonl',
|
38 |
+
reader_cfg=humaneval_reader_cfg,
|
39 |
+
infer_cfg=humaneval_infer_cfg,
|
40 |
+
eval_cfg=humaneval_eval_cfg,
|
41 |
+
)
|
42 |
+
)
|
43 |
+
|
44 |
+
# --------------------------------------------------------------- HumanEval Plus ---------------------------------------------------------------
|
45 |
+
humaneval_plus_reader_cfg = dict(input_columns=['prompt'], output_column='task_id', train_split='test')
|
46 |
+
|
47 |
+
# TODO: allow empty output-column
|
48 |
+
humaneval_plus_infer_cfg = dict(
|
49 |
+
prompt_template=dict(
|
50 |
+
type=PromptTemplate,
|
51 |
+
template=dict(
|
52 |
+
round=[
|
53 |
+
dict(role='HUMAN', prompt='Complete the following python code:\n{prompt}'),
|
54 |
+
]
|
55 |
+
),
|
56 |
+
),
|
57 |
+
retriever=dict(type=ZeroRetriever),
|
58 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
59 |
+
)
|
60 |
+
|
61 |
+
humaneval_plus_eval_cfg = dict(
|
62 |
+
evaluator=dict(type=HumanEvalPlusEvaluator),
|
63 |
+
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
64 |
+
)
|
65 |
+
|
66 |
+
compassbench_v1_1_code_datasets.append(
|
67 |
+
dict(
|
68 |
+
abbr='humaneval_plus_public',
|
69 |
+
type=HumanevalDataset,
|
70 |
+
path='data/compassbench_v1.1.public/code/humaneval/human-eval-v2-20210705.jsonl',
|
71 |
+
reader_cfg=humaneval_plus_reader_cfg,
|
72 |
+
infer_cfg=humaneval_plus_infer_cfg,
|
73 |
+
eval_cfg=humaneval_plus_eval_cfg,
|
74 |
+
)
|
75 |
+
)
|
76 |
+
|
77 |
+
# --------------------------------------------------------------- MBPP CN ---------------------------------------------------------------
|
78 |
+
mbpp_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_list_2', test_range='[0:5]')
|
79 |
+
|
80 |
+
mbpp_infer_cfg = dict(
|
81 |
+
prompt_template=dict(
|
82 |
+
type=PromptTemplate,
|
83 |
+
template=dict(
|
84 |
+
round=[
|
85 |
+
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是:编写一个函数,从给定的两个元组列表中查找相似的元素。 你的代码应该通过这些测试:\n\n assert similar_elements((3, 4, 5, 6),(5, 7, 4, 10)) == (4, 5)\n assert similar_elements((1, 2, 3, 4),(5, 4, 3, 7)) == (3, 4) \n assert similar_elements((11, 12, 14, 13),(17, 15, 14, 13)) == (13, 14) \n'),
|
86 |
+
dict(role='BOT', prompt="[BEGIN]\n 'def similar_elements(test_tup1, test_tup2):\r\n res = tuple(set(test_tup1) & set(test_tup2))\r\n return (res)' \n[DONE] \n\n "),
|
87 |
+
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是:编写一个 Python 函数来识别一个整数是否不是素数。 你的代码应该通过这些测试:\n\n assert is_not_prime(2) == False \n assert is_not_prime(10) == True \n assert is_not_prime(35) == True \n'),
|
88 |
+
dict(role='BOT', prompt="[BEGIN]\n 'import math\r\ndef is_not_prime(n):\r\n result = False\r\n for i in range(2,int(math.sqrt(n)) + 1):\r\n if n % i == 0:\r\n result = True\r\n return result' \n[DONE] \n\n "),
|
89 |
+
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是:编写一个函数,使用堆队列算法从给定的数字列表中查找最大整数。 你的代码应该通过这些测试:\n\n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],3)==[85, 75, 65] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],2)==[85, 75] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],5)==[85, 75, 65, 58, 35] \n'),
|
90 |
+
dict(role='BOT', prompt="[BEGIN]\n 'import heapq as hq\r\ndef heap_queue_largest(nums,n):\r\n largest_nums = hq.nlargest(n, nums)\r\n return largest_nums' \n[DONE] \n\n "),
|
91 |
+
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是: {text} 你的代码应该通过这些测试:\n\n {test_list} \n'),
|
92 |
+
dict(role='BOT', prompt='[BEGIN]\n'),
|
93 |
+
],
|
94 |
+
),
|
95 |
+
),
|
96 |
+
retriever=dict(type=ZeroRetriever),
|
97 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
98 |
+
)
|
99 |
+
|
100 |
+
mbpp_eval_cfg = dict(evaluator=dict(type=MBPPEvaluator), pred_role='BOT')
|
101 |
+
|
102 |
+
compassbench_v1_1_code_datasets.append(
|
103 |
+
dict(
|
104 |
+
type=MBPPDataset,
|
105 |
+
abbr='mbpp_cn_public',
|
106 |
+
path='data/compassbench_v1.1.public/code/mbpp_cn/mbpp_cn.jsonl',
|
107 |
+
reader_cfg=mbpp_reader_cfg,
|
108 |
+
infer_cfg=mbpp_infer_cfg,
|
109 |
+
eval_cfg=mbpp_eval_cfg,
|
110 |
+
)
|
111 |
+
)
|
112 |
+
|
113 |
+
# --------------------------------------------------------------- Sanitized MBPP ---------------------------------------------------------------
|
114 |
+
sanitized_mbpp_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_list_2', test_range='[0:5]')
|
115 |
+
|
116 |
+
sanitized_mbpp_infer_cfg = dict(
|
117 |
+
prompt_template=dict(
|
118 |
+
type=PromptTemplate,
|
119 |
+
template=dict(
|
120 |
+
round=[
|
121 |
+
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a function to find the similar elements from the given two tuple lists. Your code should pass these tests:\n\n assert similar_elements((3, 4, 5, 6),(5, 7, 4, 10)) == (4, 5)\n assert similar_elements((1, 2, 3, 4),(5, 4, 3, 7)) == (3, 4) \n assert similar_elements((11, 12, 14, 13),(17, 15, 14, 13)) == (13, 14) \n'),
|
122 |
+
dict(role='BOT', prompt="[BEGIN]\n 'def similar_elements(test_tup1, test_tup2):\r\n res = tuple(set(test_tup1) & set(test_tup2))\r\n return (res)' \n[DONE] \n\n "),
|
123 |
+
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a python function to identify non-prime numbers. Your code should pass these tests:\n\n assert is_not_prime(2) == False \n assert is_not_prime(10) == True \n assert is_not_prime(35) == True \n'),
|
124 |
+
dict(role='BOT', prompt="[BEGIN]\n 'import math\r\ndef is_not_prime(n):\r\n result = False\r\n for i in range(2,int(math.sqrt(n)) + 1):\r\n if n % i == 0:\r\n result = True\r\n return result' \n[DONE] \n\n "),
|
125 |
+
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a function to find the largest integers from a given list of numbers using heap queue algorithm. Your code should pass these tests:\n\n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],3)==[85, 75, 65] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],2)==[85, 75] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],5)==[85, 75, 65, 58, 35] \n'),
|
126 |
+
dict(role='BOT', prompt="[BEGIN]\n 'import heapq as hq\r\ndef heap_queue_largest(nums,n):\r\n largest_nums = hq.nlargest(n, nums)\r\n return largest_nums' \n[DONE] \n\n "),
|
127 |
+
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'),
|
128 |
+
dict(role='BOT', prompt='[BEGIN]\n'),
|
129 |
+
],
|
130 |
+
),
|
131 |
+
),
|
132 |
+
retriever=dict(type=ZeroRetriever),
|
133 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
134 |
+
)
|
135 |
+
|
136 |
+
sanitized_mbpp_eval_cfg = dict(evaluator=dict(type=MBPPEvaluator), pred_role='BOT')
|
137 |
+
|
138 |
+
compassbench_v1_1_code_datasets.append(
|
139 |
+
dict(
|
140 |
+
type=SanitizedMBPPDataset,
|
141 |
+
abbr='sanitized_mbpp_public',
|
142 |
+
path='data/compassbench_v1.1.public/code/mbpp/sanitized-mbpp.jsonl',
|
143 |
+
reader_cfg=sanitized_mbpp_reader_cfg,
|
144 |
+
infer_cfg=sanitized_mbpp_infer_cfg,
|
145 |
+
eval_cfg=sanitized_mbpp_eval_cfg,
|
146 |
+
)
|
147 |
+
)
|
148 |
+
|
149 |
+
# --------------------------------------------------------------- HumanevalX ---------------------------------------------------------------
|
150 |
+
humanevalx_reader_cfg = dict(input_columns=['prompt'], output_column='declaration', train_split='test')
|
151 |
+
|
152 |
+
humanevalx_infer_cfg = dict(
|
153 |
+
prompt_template=dict(type=PromptTemplate, template='{prompt}'),
|
154 |
+
retriever=dict(type=ZeroRetriever),
|
155 |
+
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
156 |
+
)
|
157 |
+
|
158 |
+
humanevalx_eval_cfg_dict = {
|
159 |
+
lang: dict(
|
160 |
+
evaluator=dict(
|
161 |
+
type=HumanevalXEvaluator,
|
162 |
+
language=lang,
|
163 |
+
ip_address=
|
164 |
+
'localhost', # replace to your code_eval_server ip_address, port
|
165 |
+
port=5001,
|
166 |
+
), # refer to https://opencompass.readthedocs.io/en/latest/advanced_guides/code_eval_service.html to launch a server
|
167 |
+
)
|
168 |
+
for lang in ['python', 'cpp', 'go', 'java', 'js'] # do not support rust now
|
169 |
+
}
|
170 |
+
|
171 |
+
# Please download the needed `xx.jsonl.gz` from
|
172 |
+
# https://github.com/THUDM/CodeGeeX2/tree/main/benchmark/humanevalx
|
173 |
+
# and move them into `data/humanevalx/` folder
|
174 |
+
for lang in ['python', 'cpp', 'go', 'java', 'js']:
|
175 |
+
compassbench_v1_1_code_datasets.append(
|
176 |
+
dict(
|
177 |
+
type=HumanevalXDataset,
|
178 |
+
abbr=f'humanevalx-{lang}_public',
|
179 |
+
language=lang,
|
180 |
+
path='data/compassbench_v1.1.public/code/humanevalx',
|
181 |
+
reader_cfg=humanevalx_reader_cfg,
|
182 |
+
infer_cfg=humanevalx_infer_cfg,
|
183 |
+
eval_cfg=humanevalx_eval_cfg_dict[lang],
|
184 |
+
)
|
185 |
+
)
|
186 |
+
|
187 |
+
# --------------------------------------------------------------- LCBench ---------------------------------------------------------------
|
188 |
+
LC_difficulties_list = ['EASY', 'MEDIUM', 'HARD']
|
189 |
+
LC_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_column')
|
190 |
+
|
191 |
+
|
192 |
+
LC_en_infer_cfg = dict(
|
193 |
+
prompt_template=dict(
|
194 |
+
type=PromptTemplate,
|
195 |
+
template=dict(
|
196 |
+
round=[
|
197 |
+
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: You are given three positive integers n, x, and y.\nIn a city, there exist houses numbered 1 to n connected by n streets. There is a street connecting the house numbered i with the house numbered i + 1 for all 1 <= i <= n - 1 . An additional street connects the house numbered x with the house numbered y.\nFor each k, such that 1 <= k <= n, you need to find the number of pairs of houses (house1, house2) such that the minimum number of streets that need to be traveled to reach house2 from house1 is k.\nReturn a 1-indexed array result of length n where result[k] represents the total number of pairs of houses such that the minimum streets required to reach one house from the other is k.\nNote that x and y can be equal. Your code should pass these tests:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'),
|
198 |
+
dict(role='BOT', prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "),
|
199 |
+
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: You are given a string word containing lowercase English letters.\nTelephone keypads have keys mapped with distinct collections of lowercase English letters, which can be used to form words by pushing them. For example, the key 2 is mapped with ["a","b","c"], we need to push the key one time to type "a", two times to type "b", and three times to type "c" .\nIt is allowed to remap the keys numbered 2 to 9 to distinct collections of letters. The keys can be remapped to any amount of letters, but each letter must be mapped to exactly one key. You need to find the minimum number of times the keys will be pushed to type the string word.\nReturn the minimum number of pushes needed to type word after remapping the keys.\nAn example mapping of letters to keys on a telephone keypad is given below. Note that 1, *, #, and 0 do not map to any letters. Your code should pass these tests:\n\n assert minimumPushes("abcde") == 5 \n assert minimumPushes("xyzxyzxyzxyz") == 12 \n assert minimumPushes("aabbccddeeffgghhiiiiii") == 24 \n'),
|
200 |
+
dict(role='BOT', prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "),
|
201 |
+
dict(role='HUMAN', prompt="You are an expert Python programmer, and here is your task: You are given an integer k and an integer x.\nConsider s is the 1-indexed binary representation of an integer num. The price of a number num is the number of i's such that i % x == 0 and s[i] is a set bit.\nReturn the greatest integer num such that the sum of prices of all numbers from 1 to num is less than or equal to k.\nNote:\nIn the binary representation of a number set bit is a bit of value 1.\nThe binary representation of a number will be indexed from right to left. For example, if s == 11100, s[4] == 1 and s[2] == 0. Your code should pass these tests:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n"),
|
202 |
+
dict(role='BOT', prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "),
|
203 |
+
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'),
|
204 |
+
dict(role='BOT', prompt='[BEGIN]\n'),
|
205 |
+
],
|
206 |
+
),
|
207 |
+
),
|
208 |
+
retriever=dict(type=ZeroRetriever),
|
209 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
210 |
+
)
|
211 |
+
|
212 |
+
|
213 |
+
LC_cn_infer_cfg = dict(
|
214 |
+
prompt_template=dict(
|
215 |
+
type=PromptTemplate,
|
216 |
+
template=dict(
|
217 |
+
round=[
|
218 |
+
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你三个 正整数 n 、x 和 y 。\n在城市中,存在编号从 1 到 n 的房屋,由 n 条街道相连。对所有 1 <= i < n ,都存在一条街道连接编号为 i 的房屋与编号为 i + 1 的房屋。另存在一条街道连接编号为 x 的房屋与编号为 y 的房屋。\n对于每个 k(1 <= k <= n),你需要找出所有满足要求的 房屋对 [house1, house2] ,即从 house1 到 house2 需要经过的 最少 街道数为 k 。\n返回一个下标从 1 开始且长度为 n 的数组 result ,其中 result[k] 表示所有满足要求的房屋对的数量,即从一个房屋到另一个房屋需要经过的 最少 街道数为 k 。\n注意,x 与 y 可以 相等。你的代码需要通过以下测试:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'),
|
219 |
+
dict(role='BOT', prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "),
|
220 |
+
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个字符串 word,由 不同 小写英文字母组成。\n电话键盘上的按键与 不同 小写英文字母集合相映射,可以通过按压按键来组成单词。例如,按键 2 对应 ["a","b","c"],我们需要按一次键来输入 "a",按两次键来输入 "b",按三次键来输入 "c"。\n现在允许你将编号为 2 到 9 的按键重新映射到 不同 字母集合。每个按键可以映射到 任意数量 的字母,但每个字母 必须 恰好 映射到 一个 按键上。你需要找到输入字符串 word 所需的 最少 按键次数。\n返回重新映射按键后输入 word 所需的 最少 按键次数。\n下面给出了一种电话键盘上字母到按键的映射作为示例。注意 1,*,# 和 0 不 对应任何字母。你的代码需要通过以下测试:\n\n assert minimumPushes("abcde") == 5 \n assert minimumPushes("xyzxyzxyzxyz") == 12 \n assert minimumPushes("aabbccddeeffgghhiiiiii") == 24 \n'),
|
221 |
+
dict(role='BOT', prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "),
|
222 |
+
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个整数 k 和一个整数 x 。\n令 s 为整数 num 的下标从 1 开始的二进制表示。我们说一个整数 num 的 价值 是满足 i % x == 0 且 s[i] 是 设置位 的 i 的数目。\n请你返回 最大 整数 num ,满足从 1 到 num 的所有整数的 价值 和小于等于 k 。\n注意:\n一个整数二进制表示下 设置位 是值为 1 的数位。\n一个整数的二进制表示下标从右到左编号,比方说如果 s == 11100 ,那么 s[4] == 1 且 s[2] == 0。你的代码需要通过以下测试:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n'),
|
223 |
+
dict(role='BOT', prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "),
|
224 |
+
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: {text} 你的代码需要通过以下测试:\n\n {test_list} \n'),
|
225 |
+
dict(role='BOT', prompt='[BEGIN]\n'),
|
226 |
+
],
|
227 |
+
),
|
228 |
+
),
|
229 |
+
retriever=dict(type=ZeroRetriever),
|
230 |
+
inferencer=dict(type=GenInferencer, max_out_len=512),
|
231 |
+
)
|
232 |
+
|
233 |
+
|
234 |
+
LC_eval_cfg = dict(evaluator=dict(type=LCPassKEvaluator), pred_role='BOT')
|
235 |
+
|
236 |
+
for difficulty in LC_difficulties_list:
|
237 |
+
compassbench_v1_1_code_datasets.append(
|
238 |
+
dict(
|
239 |
+
type=LCDataset,
|
240 |
+
abbr='lcbench_en-' + difficulty + '_public',
|
241 |
+
path='data/compassbench_v1.1.public/code/LCBench2023/LCBench2023.jsonl',
|
242 |
+
difficulty=difficulty,
|
243 |
+
reader_cfg=LC_reader_cfg,
|
244 |
+
infer_cfg=LC_en_infer_cfg,
|
245 |
+
eval_cfg=LC_eval_cfg,
|
246 |
+
)
|
247 |
+
)
|
248 |
+
compassbench_v1_1_code_datasets.append(
|
249 |
+
dict(
|
250 |
+
type=LCDataset,
|
251 |
+
abbr='lcbench_cn-' + difficulty + '_public',
|
252 |
+
path='data/compassbench_v1.1.public/code/LCBench2023/LCBench2023_cn.jsonl',
|
253 |
+
difficulty=difficulty,
|
254 |
+
reader_cfg=LC_reader_cfg,
|
255 |
+
infer_cfg=LC_cn_infer_cfg,
|
256 |
+
eval_cfg=LC_eval_cfg,
|
257 |
+
)
|
258 |
+
)
|
259 |
+
|
260 |
+
|
261 |
+
# --------------------------------------------------------------- TACO ---------------------------------------------------------------
|
262 |
+
TACO_difficulties_list = ['EASY', 'MEDIUM', 'MEDIUM_HARD', 'HARD', 'VERY_HARD']
|
263 |
+
TACO_reader_cfg = dict(input_columns=['question', 'starter'], output_column='problem_id', train_split='test', test_range='[0:5]')
|
264 |
+
|
265 |
+
TACO_infer_cfg = dict(
|
266 |
+
prompt_template=dict(
|
267 |
+
type=PromptTemplate,
|
268 |
+
template=dict(
|
269 |
+
round=[
|
270 |
+
dict(role='HUMAN', prompt='Please write a python program to address the following QUESTION. Your ANSWER should be in a code block format like this: ```python # Write your code here ```. \nQUESTION:\n{question} {starter}\nANSWER:\n'),
|
271 |
+
],
|
272 |
+
),
|
273 |
+
),
|
274 |
+
retriever=dict(type=ZeroRetriever),
|
275 |
+
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
276 |
+
)
|
277 |
+
|
278 |
+
TACO_eval_cfg = dict(evaluator=dict(type=TACOEvaluator), pred_role='BOT')
|
279 |
+
|
280 |
+
for difficulty in TACO_difficulties_list:
|
281 |
+
compassbench_v1_1_code_datasets.append(
|
282 |
+
dict(
|
283 |
+
type=TACODataset,
|
284 |
+
abbr='TACO-' + difficulty + '_public',
|
285 |
+
path='data/compassbench_v1.1.public/code/BAAI-TACO',
|
286 |
+
difficulty=difficulty,
|
287 |
+
reader_cfg=TACO_reader_cfg,
|
288 |
+
infer_cfg=TACO_infer_cfg,
|
289 |
+
eval_cfg=TACO_eval_cfg,
|
290 |
+
)
|
291 |
+
)
|
opencompass/configs/datasets/compassbench_20_v1_1_public/knowledge/compassbench_v1_knowledge_gen_bd74e0.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
5 |
+
from opencompass.datasets import WikiBenchDataset
|
6 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
7 |
+
|
8 |
+
# ----------------------- Prompt Content----------------------- #
|
9 |
+
zero_shot_prompts = {
|
10 |
+
'single_choice_prompts': [
|
11 |
+
dict(role='HUMAN', prompt='以下是一道单项选择题,请你根据你了解的知识给出正确的答案选项。\n下面是你要回答的题目::\n{question}\n答案选项:'),
|
12 |
+
dict(role='BOT', prompt='{answer}')
|
13 |
+
]
|
14 |
+
}
|
15 |
+
few_shot_prompts = {
|
16 |
+
'single_choice_prompts': {
|
17 |
+
'single_choice_cn': [
|
18 |
+
dict(role='HUMAN', prompt='题目:“一丝不苟”中的“丝”的本意是( )。\nA. 计量单位\nB. 丝线\nC. 丝绸\nD. 发丝'),
|
19 |
+
dict(role='BOT', prompt='答案:A'),
|
20 |
+
dict(role='HUMAN', prompt='题目:五华县体育场位于哪个省?\nA. 湖南省\nB. 浙江省\nC. 广东省\nD. 江苏省'),
|
21 |
+
dict(role='BOT', prompt='答案:C'),
|
22 |
+
dict(role='HUMAN', prompt='题目:“西施犬的原产地是哪里?\nA. 印度\nB. 中国\nC. 西藏\nD. 台湾'),
|
23 |
+
dict(role='BOT', prompt='答案:C'),
|
24 |
+
dict(role='HUMAN', prompt='题目:四库全书的四库是指什么?\nA. 易、书、诗、礼\nB. 经、史、子、音\nC. 诗、书、音、律\nD. 经、史、子、集'),
|
25 |
+
dict(role='BOT', prompt='答案:D'),
|
26 |
+
dict(role='HUMAN', prompt='题目:{question}'),
|
27 |
+
]}
|
28 |
+
}
|
29 |
+
|
30 |
+
|
31 |
+
# ----------------------- Prompt Template----------------------- #
|
32 |
+
|
33 |
+
# Use Zero-Shot or not
|
34 |
+
with_few_shot = True
|
35 |
+
|
36 |
+
# Max for this dataset is 4, should be set with `with_few_shot`
|
37 |
+
few_shot_samples = 4
|
38 |
+
|
39 |
+
# Use circular evaluation or not
|
40 |
+
with_circular_eval = True
|
41 |
+
|
42 |
+
single_choice_prompts = zero_shot_prompts['single_choice_prompts'] if not with_few_shot else few_shot_prompts['single_choice_prompts']
|
43 |
+
|
44 |
+
# Set few shot prompt number
|
45 |
+
if with_few_shot:
|
46 |
+
assert few_shot_samples > 0
|
47 |
+
for _name in list(single_choice_prompts.keys()):
|
48 |
+
single_choice_prompts[_name] = single_choice_prompts[_name][- few_shot_samples * 2 - 2:]
|
49 |
+
|
50 |
+
compassbench_v1_knowledge_sets = {
|
51 |
+
'common_knowledge': ['single_choice_cn'],
|
52 |
+
'humanity': ['single_choice_cn'],
|
53 |
+
'natural_science': ['single_choice_cn'],
|
54 |
+
'social_science': ['single_choice_cn'],
|
55 |
+
}
|
56 |
+
|
57 |
+
|
58 |
+
# ----------------------- Dataset Config----------------------- #
|
59 |
+
compassbench_v1_knowledge_datasets = []
|
60 |
+
|
61 |
+
for _split in list(compassbench_v1_knowledge_sets.keys()):
|
62 |
+
for _name in compassbench_v1_knowledge_sets[_split]:
|
63 |
+
compassbench_v1_knowledge_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
64 |
+
|
65 |
+
compassbench_v1_knowledge_infer_cfg = dict(
|
66 |
+
ice_template=dict(
|
67 |
+
type=PromptTemplate,
|
68 |
+
template=dict(
|
69 |
+
begin='</E>',
|
70 |
+
round=single_choice_prompts[_name]
|
71 |
+
),
|
72 |
+
ice_token='</E>',
|
73 |
+
),
|
74 |
+
retriever=dict(type=ZeroRetriever),
|
75 |
+
inferencer=dict(type=GenInferencer),
|
76 |
+
)
|
77 |
+
compassbench_v1_knowledge_eval_cfg = dict(
|
78 |
+
evaluator=dict(type=CircularEvaluator if with_circular_eval else AccEvaluator),
|
79 |
+
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
|
80 |
+
)
|
81 |
+
|
82 |
+
compassbench_v1_knowledge_datasets.append(
|
83 |
+
dict(
|
84 |
+
type=WikiBenchDataset,
|
85 |
+
abbr='compassbench_v1_knowledge-' + _split + '-' + _name + '_' + ('circular' if with_circular_eval else '') + '_public',
|
86 |
+
path=f'data/compassbench_v1.1.public/knowledge/{_split}/{_name}.jsonl',
|
87 |
+
name=_name + '_circular' if with_circular_eval else _name,
|
88 |
+
reader_cfg=compassbench_v1_knowledge_reader_cfg,
|
89 |
+
infer_cfg=compassbench_v1_knowledge_infer_cfg,
|
90 |
+
eval_cfg=compassbench_v1_knowledge_eval_cfg,
|
91 |
+
)
|
92 |
+
)
|
93 |
+
|
94 |
+
|
95 |
+
from opencompass.datasets import TriviaQADatasetV3, TriviaQAEvaluator
|
96 |
+
|
97 |
+
triviaqa_and_nq_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
98 |
+
|
99 |
+
triviaqa_and_nq_infer_cfg = dict(
|
100 |
+
prompt_template=dict(
|
101 |
+
type=PromptTemplate,
|
102 |
+
template=dict(
|
103 |
+
round=[
|
104 |
+
dict(role='HUMAN', prompt='Question: When do stores stop selling alcohol in indiana?'),
|
105 |
+
dict(role='BOT', prompt='Answer: 3 a.m.'),
|
106 |
+
dict(role='HUMAN', prompt='Question: Edinburgh of the Seven Seas is the capital of which group of islands?'),
|
107 |
+
dict(role='BOT', prompt='Answer: Tristan da Cunha'),
|
108 |
+
dict(role='HUMAN', prompt='Question: Which book of the Christian Bible\'s new testament comprises a letter from St Paul to members of a church that he had founded at Macedonia?'),
|
109 |
+
dict(role='BOT', prompt='Answer: Philippians'),
|
110 |
+
dict(role='HUMAN', prompt='Question: The Hindu deity Hanuman appears in the form of which animal?'),
|
111 |
+
dict(role='BOT', prompt='Answer: A monkey'),
|
112 |
+
dict(role='HUMAN', prompt='Question: Who hosts the ITV quiz show The Chase?'),
|
113 |
+
dict(role='BOT', prompt='Answer: Bradley Walsh'),
|
114 |
+
dict(role='HUMAN', prompt='Question: {question}'),
|
115 |
+
]
|
116 |
+
),
|
117 |
+
),
|
118 |
+
retriever=dict(type=ZeroRetriever),
|
119 |
+
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=['Question:']),
|
120 |
+
)
|
121 |
+
|
122 |
+
triviaqa_and_nq_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role='BOT')
|
123 |
+
|
124 |
+
compassbench_v1_knowledge_datasets.append(
|
125 |
+
dict(
|
126 |
+
type=TriviaQADatasetV3,
|
127 |
+
abbr='compassbench_v1_knowledge-mixed-cloze_en_public',
|
128 |
+
path='data/compassbench_v1.1.public/knowledge/mixed/cloze_en.jsonl',
|
129 |
+
reader_cfg=triviaqa_and_nq_reader_cfg,
|
130 |
+
infer_cfg=triviaqa_and_nq_infer_cfg,
|
131 |
+
eval_cfg=triviaqa_and_nq_eval_cfg
|
132 |
+
)
|
133 |
+
)
|
opencompass/configs/datasets/compassbench_20_v1_1_public/language/compassbench_v1_language_gen_7aa06d.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, BleuFloresEvaluator, RougeEvaluator, JiebaRougeEvaluator
|
2 |
+
|
3 |
+
compassbench_v1_language_datasets = [
|
4 |
+
# dict(path='data/compassbench_v1.1.public/language/information_retrieval_en.jsonl',
|
5 |
+
# abbr='information_retrieval_en_public', data_type= 'qa', infer_method='gen', output_column='answers',
|
6 |
+
# human_prompt='{context}\n{origin_question}\nConcise and direct answer is',
|
7 |
+
# evaluator=EMEvaluator),
|
8 |
+
# dict(path='data/compassbench_v1.1.public/language/information_retrieval_zh.jsonl',
|
9 |
+
# abbr='information_retrieval_zh_public', data_type= 'qa', infer_method='gen', output_column='answers',
|
10 |
+
# human_prompt='{context}\n{origin_question}\n简单直接的回答是',
|
11 |
+
# evaluator=EMEvaluator),
|
12 |
+
|
13 |
+
dict(path='data/compassbench_v1.1.public/language/intention_recognition_en.jsonl',
|
14 |
+
abbr='intention_recognition_en_circular_public', data_type='circular-mcq', infer_method='gen'),
|
15 |
+
dict(path='data/compassbench_v1.1.public/language/intention_recognition_zh.jsonl',
|
16 |
+
abbr='intention_recognition_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
17 |
+
|
18 |
+
dict(path='data/compassbench_v1.1.public/language/sentiment_analysis_en.jsonl',
|
19 |
+
abbr='sentiment_analysis_en_circular_public', data_type='circular-mcq', infer_method='gen'),
|
20 |
+
dict(path='data/compassbench_v1.1.public/language/sentiment_analysis_zh.jsonl',
|
21 |
+
abbr='sentiment_analysis_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
22 |
+
|
23 |
+
dict(path='data/compassbench_v1.1.public/language/translation.jsonl',
|
24 |
+
abbr='translation_public', data_type= 'qa', infer_method='gen',
|
25 |
+
evaluator=BleuFloresEvaluator),
|
26 |
+
|
27 |
+
dict(path='data/compassbench_v1.1.public/language/content_critic_en.jsonl',
|
28 |
+
abbr='content_critic_en_circular_public', data_type='circular-mcq', infer_method='gen'),
|
29 |
+
dict(path='data/compassbench_v1.1.public/language/content_critic_zh.jsonl',
|
30 |
+
abbr='content_critic_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
31 |
+
|
32 |
+
dict(path='data/compassbench_v1.1.public/language/content_summarization_en.jsonl',
|
33 |
+
abbr='content_summarization_en_public', data_type= 'qa', infer_method='gen', output_column='summary',
|
34 |
+
human_prompt='{article}\nSummary of the article is:\n',
|
35 |
+
evaluator=RougeEvaluator),
|
36 |
+
dict(path='data/compassbench_v1.1.public/language/content_summarization_zh.jsonl',
|
37 |
+
abbr='content_summarization_zh_public', data_type= 'qa', infer_method='gen', output_column='summary',
|
38 |
+
human_prompt='{article}\n上述内容摘要如下:\n',
|
39 |
+
evaluator=JiebaRougeEvaluator),
|
40 |
+
|
41 |
+
dict(path='data/compassbench_v1.1.public/language/traditional_cultural_understanding_zh.jsonl',
|
42 |
+
abbr='traditional_cultural_understanding_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
43 |
+
|
44 |
+
dict(path='data/compassbench_v1.1.public/language/chinese_semantic_understanding_zh.jsonl',
|
45 |
+
abbr='chinese_semantic_understanding_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
46 |
+
]
|
opencompass/configs/datasets/compassbench_20_v1_1_public/math/compassbench_v1_1_math_gen_1dc21d.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
from copy import deepcopy
|
3 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
4 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
5 |
+
from opencompass.openicl.icl_inferencer import GenInferencer, PPLInferencer
|
6 |
+
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
7 |
+
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
8 |
+
from opencompass.utils.text_postprocessors import first_option_postprocess
|
9 |
+
|
10 |
+
with read_base():
|
11 |
+
from .mathbench_prompt import zero_shot_prompts, few_shot_prompts, mathbench_sets
|
12 |
+
|
13 |
+
# Max for this dataset is 4
|
14 |
+
num_shot = 4
|
15 |
+
# Generate reasoning path or not, only for single choice
|
16 |
+
with_reasoning = False
|
17 |
+
# Use circular evaluation or not
|
18 |
+
with_circular_eval = True
|
19 |
+
# Use PPL mode in single choice test or not
|
20 |
+
use_ppl_single_choice = False
|
21 |
+
|
22 |
+
assert 0 <= num_shot <= 4
|
23 |
+
if num_shot == 0:
|
24 |
+
prompts = zero_shot_prompts
|
25 |
+
else:
|
26 |
+
prompts = {name: p[- 2 * num_shot - 2:] for name, p in few_shot_prompts.items()}
|
27 |
+
|
28 |
+
compassbench_v1_1_math_datasets = []
|
29 |
+
for _split in mathbench_sets:
|
30 |
+
for _name in mathbench_sets[_split]:
|
31 |
+
if 'single_choice' in _name:
|
32 |
+
if with_reasoning:
|
33 |
+
template_round = prompts[_name + '_with_reasoning']
|
34 |
+
else:
|
35 |
+
template_round = prompts[_name]
|
36 |
+
else:
|
37 |
+
template_round = prompts[_name]
|
38 |
+
|
39 |
+
if 'single_choice' in _name:
|
40 |
+
pred_postprocessor = dict(type=first_option_postprocess, options='ABCD')
|
41 |
+
else:
|
42 |
+
pred_postprocessor = dict(type=mathbench_postprocess, name=_name)
|
43 |
+
|
44 |
+
if 'single_choice' in _name and with_circular_eval:
|
45 |
+
evaluator = dict(type=CircularEvaluator)
|
46 |
+
else:
|
47 |
+
evaluator = dict(type=AccEvaluator)
|
48 |
+
|
49 |
+
# assemble the final config
|
50 |
+
mathbench_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
51 |
+
if use_ppl_single_choice and 'single_choice' in _name and not with_reasoning:
|
52 |
+
template = {}
|
53 |
+
for answer in ['A', 'B', 'C', 'D']:
|
54 |
+
one_template_round = deepcopy(template_round)
|
55 |
+
one_template_round['round'][-1]['prompt'] = one_template_round['round'][-1]['prompt'].format(answer=answer)
|
56 |
+
template[answer] = dict(round=one_template_round)
|
57 |
+
mathbench_infer_cfg = dict(
|
58 |
+
prompt_template=dict(type=PromptTemplate, template=template),
|
59 |
+
retriever=dict(type=ZeroRetriever),
|
60 |
+
inferencer=dict(type=PPLInferencer),
|
61 |
+
)
|
62 |
+
else:
|
63 |
+
mathbench_infer_cfg = dict(
|
64 |
+
prompt_template=dict(type=PromptTemplate, template=dict(round=template_round)),
|
65 |
+
retriever=dict(type=ZeroRetriever),
|
66 |
+
inferencer=dict(type=GenInferencer, max_out_len=2048),
|
67 |
+
)
|
68 |
+
mathbench_eval_cfg = dict(evaluator=evaluator, pred_postprocessor=pred_postprocessor)
|
69 |
+
|
70 |
+
compassbench_v1_1_math_datasets.append(
|
71 |
+
dict(
|
72 |
+
abbr='mathbench-' + _split + '-' + _name + '_public',
|
73 |
+
type=MathBenchDataset,
|
74 |
+
path=f'data/compassbench_v1.1.public/math/{_split}',
|
75 |
+
name=_name,
|
76 |
+
with_circular=with_circular_eval,
|
77 |
+
reader_cfg=mathbench_reader_cfg,
|
78 |
+
infer_cfg=mathbench_infer_cfg,
|
79 |
+
eval_cfg=mathbench_eval_cfg,
|
80 |
+
)
|
81 |
+
)
|
opencompass/configs/datasets/compassbench_20_v1_1_public/math/mathbench_prompt.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
zero_shot_prompts = {
|
2 |
+
'single_choice_cn_with_reasoning': [
|
3 |
+
dict(role='HUMAN', prompt='问题: 以下是一道关于数学的单项选择题,请你一步一步推理,并在最后用“所以答案为选项X”给出答案,其中“X”为选项A,B,C,D中你认为正确的选项。下面是你要回答的问题\n{question}\n让我们一步一步思考:'),
|
4 |
+
],
|
5 |
+
'single_choice_cn': [
|
6 |
+
dict(role='HUMAN', prompt='问题: 以下是一道关于数学的单项选择题,请你直接回答正确答案的选项序号。\n下面是你要回答的题目:\n{question}\n所以答案是:'),
|
7 |
+
],
|
8 |
+
'single_choice_en_with_reasoning': [
|
9 |
+
dict(role='HUMAN', prompt='Question: Here is a multiple-choice question about mathematics. Please reason through it step by step, and at the end, provide your answer option with "Therefore, the correct answer is option X", Where "X" is the correct option you think from A,B,C,D. Here is the question you need to answer:\n{question}\nLet\'s think step by step:'),
|
10 |
+
],
|
11 |
+
'single_choice_en': [
|
12 |
+
dict(role='HUMAN', prompt='Question: Here is a multiple-choice question about mathematics. Please provide the correct answer option directly.\nHere is the question you need to answer:\n{question}\nThe answer is:'),
|
13 |
+
],
|
14 |
+
}
|
15 |
+
|
16 |
+
few_shot_prompts = {
|
17 |
+
'single_choice_cn': [
|
18 |
+
dict(role='HUMAN', prompt='问题: 已知i是虚数单位,z为复数,$2+\\frac{1}{i}=z(3+i)$,则在复平面内z对应的点位于____?\nA. 第一象限\nB. 第二象限\nC. 第三象限\nD. 第四象限'),
|
19 |
+
dict(role='BOT', prompt='回答: D'),
|
20 |
+
dict(role='HUMAN', prompt='问题: 将函数$y=\\tan(\\omega x-1)(\\omega>0)$的图像向左平移2个单位长度后,与函数$y=\\tan(\\omega x+3)$的图象重合,则的最小值等于____\nA. $2-\\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
21 |
+
dict(role='BOT', prompt='回答: D'),
|
22 |
+
dict(role='HUMAN', prompt='问题: 设$(1+2i)a+b=2i$,其中a,b为实数,则( )\nA. $a=1,b=-1$\nB. $a=1,b=1$\nC. $a=-1,b=1$\nD. $a=-1,b=-1$'),
|
23 |
+
dict(role='BOT', prompt='回答: A'),
|
24 |
+
dict(role='HUMAN', prompt='问题: 圆锥的底面半径为2,高为4.一个圆柱的下底面在圆锥的底面上,上底面的圆周在圆锥的侧面上,当圆柱侧面积为$4 \\pi$时,该圆柱的体积为____\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
|
25 |
+
dict(role='BOT', prompt='回答: B'),
|
26 |
+
dict(role='HUMAN', prompt='问题: {question}'),
|
27 |
+
dict(role='BOT', prompt='回答: {answer}'),
|
28 |
+
],
|
29 |
+
'single_choice_cn_with_reasoning' : [
|
30 |
+
dict(role='HUMAN', prompt='问题: 已知i是虚数单位,z为复数,$2+\\frac{1}{i}=z(3+i)$,则在复平面内z对应的点位于____\nA. 第一象限\nB. 第二象限\nC. 第三象限\nD. 第四象限'),
|
31 |
+
dict(role='BOT', prompt='回答: 因为首先,我们将等式两边同时乘以$(3 + i)$的共轭复数$(3 - i)$,以便消去分母中的虚数部分:$z = \\frac{2 - i}{3 + i} \\cdot \\frac{3 - i}{3 - i}$,这样做的目的是利用复数乘法的性质,从而消去分母中的虚数部分。我们进行乘法运算:$z = \\frac{(2 - i)(3 - i)}{(3 + i)(3 - i)}$,$z = \\frac{6 - 2i - 3i + i^2}{9 - 3i + 3i - i^2}$,由于$i^2 = -1$,我们可以进一步简化:$z = \\frac{6 - 5i - 1}{9 + 1}$ $z = \\frac{{1}{2} - \\frac{1}{2}i$,在复平面上,这个点位于第四象限,因为它的实部是正的($\\frac{1}{2}$),而虚部是负的($-\\frac{1}{2}$)\n答案选项:D'),
|
32 |
+
dict(role='HUMAN', prompt='问题: 将函数$y=\\tan(\\omega x-1)(\\omega>0)$的图像向左平移2个单位长度后,与函数$y=\\tan(\\omega x+3)$的图象重合,则的最小值等于____\nA. $2-\\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
33 |
+
dict(role='BOT', prompt='回答: 将函数$y=\\tan(\\omega x-1)$(其中$\\omega>0$)的图像向左平移2个单位长度后,得到的新函数为$y=\\tan(\\omega(x+2)-1)$。为了使这个新函数的图像与$y=\\tan(\\omega x+3)$的图像重合,我们需要满足以下条件:\n$\\tan(\\omega(x+2)-1) = \\tan(\\omega x+3)$\n由于正切函数的周期性,我们知道$\\tan(\\theta) = \\tan(\\theta + k\\pi)$,其中$k$是任意整数。因此,我们可以写出:\n$\\omega(x+2)-1 = \\omega x+3 + k\\pi$\n解这个方程,我们得到:$\\omega x + 2\\omega - 1 = \\omega x + 3 + k\\pi$\n$2\\omega - 1 = 3 + k\\pi$\n$\\omega = 2 + \\frac{k\\pi}{2}$\n由于$\\omega>0$,我们需要找到满足条件的最小正整数$k$。当$k=0$时,我们得到$\\omega=2$,这是满足条件的最小正值。因此,最小值等于2。答案选项:D'),
|
34 |
+
dict(role='HUMAN', prompt='问题: 设$(1+2i)a+b=2i$,其中a,b为实数,则( )\nA. $a=1,b=-1$\nB. $a=1,b=1$\nC. $a=-1,b=1$\nD. $a=-1,b=-1$'),
|
35 |
+
dict(role='BOT', prompt='回答: 我们可以将这个方程写成:$a + 2ai + b = 2i$,然后将实部和虚部分开:\n$a + b = 0$\n$2a = 2$\n由于$a + b = 0$,我们可以得到$a = -b$。将这个���代入$2a = 2$,我们得到$2(-b) = 2$,因此$b = -1$。因此,$a = 1$。答案选项:A'),
|
36 |
+
dict(role='HUMAN', prompt='问题: 一直角三角形的两边长分别为3和4.则第三边的长为____\nA. 5\nB. $\\sqrt 7$\nC. $\\sqrt 5$\nD. 5或者$\\sqrt 7$'),
|
37 |
+
dict(role='BOT', prompt='回答: 我们可以使用勾股定理来解决这个问题。勾股定理告诉我们,如果一个三角形的两条边长分别为$a$和$b$,那么第三条边的长度为$c = \\sqrt{a^2 + b^2}$。在这个问题中,$a = 3$,$b = 4$,因此$c = \\sqrt{3^2 + 4^2} = \\sqrt{9 + 16} = \\sqrt{25} = 5$。答案选项:A'),
|
38 |
+
dict(role='HUMAN', prompt='问题: {question}'),
|
39 |
+
dict(role='BOT', prompt='回答: {answer}'),
|
40 |
+
],
|
41 |
+
'single_choice_en': [
|
42 |
+
dict(role='HUMAN', prompt='Question: Given that $i$ is the imaginary unit and $z$ is a complex number, $2 + \\frac{1}{i} = z(3 + i)$, the point corresponding to $z$ in the complex plane is located in the ______\nA. First quadrant\nB. Second quadrant\nC. Third quadrant\nD. Fourth quadrant'),
|
43 |
+
dict(role='BOT', prompt='Response: D'),
|
44 |
+
dict(role='HUMAN', prompt='Question: The graph of the function $y = \\tan(\\omega x - 1)$ is shifted 2 units to the left and coincides with the graph of the function $y = \\tan(\\omega x + 3)$. The minimum value of $\\omega$ is ______\nA. $2 - \\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
45 |
+
dict(role='BOT', prompt='Response: D'),
|
46 |
+
dict(role='HUMAN', prompt='Question: If $(1 + 2i)a + b = 2i$, where $a$ and $b$ are real numbers, then ______\nA. $a = 1, b = -1$\nB. $a = 1, b = 1$\nC. $a = -1, b = 1$\nD. $a = -1, b = -1$'),
|
47 |
+
dict(role='BOT', prompt='Response: A'),
|
48 |
+
dict(role='HUMAN', prompt='Question: The radius of a cone is 2 and its height is 4. A cylinder has its lower base on the base of the cone and its upper base on the lateral surface of the cone. When the lateral surface area of the cylinder is $4\\pi$, the volume of the cylinder is ______\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
|
49 |
+
dict(role='BOT', prompt='Response: B'),
|
50 |
+
dict(role='HUMAN', prompt='Question: {question}'),
|
51 |
+
dict(role='BOT', prompt='Response: {answer}'),
|
52 |
+
],
|
53 |
+
'single_choice_en_with_reasoning': [
|
54 |
+
dict(role='HUMAN', prompt='Question: Given that $i$ is the imaginary unit and $z$ is a complex number, $2 + \\frac{1}{i} = z(3 + i)$, the point corresponding to $z$ in the complex plane is located in the ______\nA. First quadrant\nB. Second quadrant\nC. Third quadrant\nD. Fourth quadrant'),
|
55 |
+
dict(role='BOT', prompt='Response: First, we multiply both sides of the equation by the conjugate of $(3 + i)$: $z = \\frac{2 - i}{3 + i} \\cdot \\frac{3 - i}{3 - i}$. We perform the multiplication: $z = \\frac{(2 - i)(3 - i)}{(3 + i)(3 - i)}$, $z = \\frac{6 - 2i - 3i + i^2}{9 - 3i + 3i - i^2}$. Since $i^2 = -1$, we can simplify this further: $z = \\frac{6 - 5i - 1}{9 + 1}$ $z = \\frac{1}{2} - \\frac{1}{2}i$. In the complex plane, this point is located in the fourth quadrant, because its real part is positive ($\\frac{1}{2}$) and its imaginary part is negative ($-\\frac{1}{2}$)\nAnswer option: D'),
|
56 |
+
dict(role='HUMAN', prompt='Question: The graph of the function $y = \\tan(\\omega x - 1)$ is shifted 2 units to the left and coincides with the graph of the function $y = \\tan(\\omega x + 3)$. The minimum value of $\\omega$ is ______\nA. $2 - \\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
57 |
+
dict(role='BOT', prompt='Response: In order for the graph of this new function to coincide with the graph of $y = \\tan(\\omega x + 3)$, we need to satisfy the following condition: $\\tan(\\omega(x + 2) - 1) = \\tan(\\omega x + 3)$. Therefore, we can write: $\\omega(x + 2) - 1 = \\omega x + 3 + k\\pi$. Solving this equation, we get: $\\omega x + 2\\omega - 1 = \\omega x + 3 + k\\pi$. $2\\omega - 1 = 3 + k\\pi$. $\\omega = 2 + \\frac{k\\pi}{2}$. Since $\\omega > 0$, we need to find the smallest positive integer $k$ that satisfies the condition. When $k = 0$, we get $\\omega = 2$, which is the smallest positive value that satisfies the condition. Therefore, the minimum value is 2. Answer option: D'),
|
58 |
+
dict(role='HUMAN', prompt='Question: If $(1 + 2i)a + b = 2i$, where $a$ and $b$ are real numbers, then ______\nA. $a = 1, b = -1$\nB. $a = 1, b = 1$\nC. $a = -1, b = 1$\nD. $a = -1, b = -1$'),
|
59 |
+
dict(role='BOT', prompt='Response: We can write this equation as: $a + 2ai + b = 2i$, and then separate the real and imaginary parts: $a + b = 0$. $2a = 2$. Since $a + b = 0$, we can get $a = -b$. Substituting this value into $2a = 2$, we get $2(-b) = 2$, so $b = -1$. Therefore, $a = 1$. Answer option: A'),
|
60 |
+
dict(role='HUMAN', prompt='Question: The radius of a cone is 2 and its height is 4. A cylinder has its lower base on the base of the cone and its upper base on the lateral surface of the cone. When the lateral surface area of the cylinder is $4\\pi$, the volume of the cylinder is ______\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
|
61 |
+
dict(role='BOT', prompt='Response: We can use the Pythagorean theorem to solve this problem. The Pythagorean theorem tells us that if the two sides of a triangle are $a$ and $b$, then the length of the third side is $c = \\sqrt{a^2 + b^2}$. In this problem, $a = 3$ and $b = 4$, so $c = \\sqrt{3^2 + 4^2} = \\sqrt{9 + 16} = \\sqrt{25} = 5$. Answer option: A'),
|
62 |
+
dict(role='HUMAN', prompt='Question: {question}'),
|
63 |
+
dict(role='BOT', prompt='Response: {answer}'),
|
64 |
+
],
|
65 |
+
'cloze_cn': [
|
66 |
+
dict(role='HUMAN', prompt='问题: 莎莎有五个玩具。在圣诞节,他从他的爸爸和妈妈那里各得到了两个玩具。现在他有多少个玩具?'),
|
67 |
+
dict(role='BOT', prompt='回答: 她有5个玩具。他从妈妈那里得到了2个,所以之后他有5 + 2 = 7个玩具。然后他从爸爸那里得到了2个,所以总共他有7 + 2 = 9个玩具。答案是 9'),
|
68 |
+
dict(role='HUMAN', prompt='问题: 服务器房间里有九台电脑。从周一到周四每天增加五台电脑。现在服务器房里有多少台电脑?'),
|
69 |
+
dict(role='BOT', prompt='回答: 从周一到周四有4天。每天增加5台电脑。这意味着总共增加了4 * 5 = 20台电脑。一开始有9台电脑,所以现在有9 + 20 = 29台电脑。答案是 29'),
|
70 |
+
dict(role='HUMAN', prompt='问题: 迈克尔有58个高尔夫球。星期二,他丢失了23个高尔夫球。星期三,他又丢失了2个。星期三结束时他还剩下多少个高尔夫球?'),
|
71 |
+
dict(role='BOT', prompt='回答: 迈克尔一开始有58个球。星期二他丢失了23个,所以之后他还剩下58 - 23 = 35个球。星期三他又丢失了2个,所以现在他还剩下35 - 2 = 33个球。答案是 33'),
|
72 |
+
dict(role='HUMAN', prompt='问题: 奥利弗有23美元。她用每个3美元的价格买了五个百吉饼。她还剩下多少钱?'),
|
73 |
+
dict(role='BOT', prompt='回答: 她以每个3美元的价格买了5个百吉饼。这意味着她在百吉饼上花费了5 * 3 = 15美元。她一开始有23美元,所以现在她还剩下23 - 15 = 8美元。答案是 8'),
|
74 |
+
dict(role='HUMAN', prompt='问题: {question}'),
|
75 |
+
dict(role='BOT', prompt='回答: {answer}'),
|
76 |
+
],
|
77 |
+
'cloze_en': [
|
78 |
+
dict(role='HUMAN', prompt='Question: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?'),
|
79 |
+
dict(role='BOT', prompt='Response: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.'),
|
80 |
+
dict(role='HUMAN', prompt='Question: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?'),
|
81 |
+
dict(role='BOT', prompt='Response: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.'),
|
82 |
+
dict(role='HUMAN', prompt='Question: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?'),
|
83 |
+
dict(role='BOT', prompt='Response: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.'),
|
84 |
+
dict(role='HUMAN', prompt='Question: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?'),
|
85 |
+
dict(role='BOT', prompt='Response: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.'),
|
86 |
+
dict(role='HUMAN', prompt='Question: {question}'),
|
87 |
+
dict(role='BOT', prompt='Response: {answer}'),
|
88 |
+
],
|
89 |
+
}
|
90 |
+
|
91 |
+
mathbench_sets = {
|
92 |
+
# Practice Part
|
93 |
+
'college': ['single_choice_cn', 'single_choice_en'],
|
94 |
+
'high': ['single_choice_cn', 'single_choice_en'],
|
95 |
+
'middle': ['single_choice_cn', 'single_choice_en'],
|
96 |
+
'primary': ['cloze_cn', 'cloze_en'],
|
97 |
+
'arithmetic': ['cloze_en'],
|
98 |
+
# Theory part
|
99 |
+
'college_knowledge': ['single_choice_cn', 'single_choice_en'],
|
100 |
+
'high_knowledge': ['single_choice_cn', 'single_choice_en'],
|
101 |
+
'middle_knowledge': ['single_choice_cn', 'single_choice_en'],
|
102 |
+
'primary_knowledge': ['single_choice_cn', 'single_choice_en'],
|
103 |
+
}
|
opencompass/configs/datasets/compassbench_20_v1_1_public/reason/compassbench_v1_reason_gen_d26d08.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
compassbench_v1_reason_datasets = [
|
2 |
+
dict(path='data/compassbench_v1.1.public/reason/CN_Commonsense.jsonl',
|
3 |
+
abbr='reasonbench_cn_commonsense_circular_public', data_type='circular-mcq', infer_method='gen'),
|
4 |
+
dict(path='data/compassbench_v1.1.public/reason/CN_Abductive_Alphanlg_translated.jsonl',
|
5 |
+
abbr='reasonbench_cn_abductive_alphanlg_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
6 |
+
dict(path='data/compassbench_v1.1.public/reason/CN_Deductive_BBH3obj_translated.jsonl',
|
7 |
+
abbr='reasonbench_cn_deductive_bbh3obj_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
8 |
+
dict(path='data/compassbench_v1.1.public/reason/CN_Deductive_LogiQA_zh.jsonl',
|
9 |
+
abbr='reasonbench_cn_deductive_logiqa_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
10 |
+
dict(path='data/compassbench_v1.1.public/reason/CN_Inductive_DEER_translated.jsonl',
|
11 |
+
abbr='reasonbench_cn_inductive_deer_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
12 |
+
dict(path='data/compassbench_v1.1.public/reason/CN_Inductive_selfgenerated.jsonl',
|
13 |
+
abbr='reasonbench_cn_inductive_selfgenerated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
14 |
+
dict(path='data/compassbench_v1.1.public/reason/EN_Commonsense.jsonl',
|
15 |
+
abbr='reasonbench_en_commonsense_circular_public', data_type='circular-mcq', infer_method='gen'),
|
16 |
+
dict(path='data/compassbench_v1.1.public/reason/EN_Abductive_Alphanlg.jsonl',
|
17 |
+
abbr='reasonbench_en_abductive_alphanlg_circular_public', data_type='circular-mcq', infer_method='gen'),
|
18 |
+
dict(path='data/compassbench_v1.1.public/reason/EN_Deductive_BBH7obj.jsonl',
|
19 |
+
abbr='reasonbench_en_deductive_bbh7obj_circular_public', data_type='circular-mcq', infer_method='gen'),
|
20 |
+
dict(path='data/compassbench_v1.1.public/reason/EN_Deductive_LogiQA_zh_translated.jsonl',
|
21 |
+
abbr='reasonbench_en_deductive_logiqa_zh_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
22 |
+
dict(path='data/compassbench_v1.1.public/reason/EN_Deductive_OCNLI_translated.jsonl',
|
23 |
+
abbr='reasonbench_en_deductive_ocnli_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
24 |
+
dict(path='data/compassbench_v1.1.public/reason/EN_Inductive_DEER.jsonl',
|
25 |
+
abbr='reasonbench_en_inductive_deer_circular_public', data_type='circular-mcq', infer_method='gen'),
|
26 |
+
dict(path='data/compassbench_v1.1.public/reason/EN_Inductive_selfgenerated.jsonl',
|
27 |
+
abbr='reasonbench_en_inductive_selfgenerated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
28 |
+
]
|
opencompass/configs/datasets/leval/levalcoursera/leval_coursera_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_coursera_gen_36a006 import LEval_coursera_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalcoursera/leval_coursera_gen_36a006.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator, AccEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalCourseraDataset
|
6 |
+
from opencompass.utils.text_postprocessors import first_capital_postprocess, first_capital_postprocess_multi
|
7 |
+
|
8 |
+
LEval_coursera_reader_cfg = dict(
|
9 |
+
input_columns=['context', 'question'],
|
10 |
+
output_column='answer',
|
11 |
+
train_split='test',
|
12 |
+
test_split='test'
|
13 |
+
)
|
14 |
+
|
15 |
+
LEval_coursera_infer_cfg = dict(
|
16 |
+
prompt_template=dict(
|
17 |
+
type=PromptTemplate,
|
18 |
+
template=dict(
|
19 |
+
begin=[
|
20 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction based on this document. For multi-choice questions, there could be a single correct option or multiple correct options. Please only provide the letter corresponding to the answer (like A or AB) when answering.'),
|
21 |
+
],
|
22 |
+
round=[
|
23 |
+
dict(role='HUMAN', prompt='Document is as follows.\n{context}\nQuestion:{question}\nAnswer:'),
|
24 |
+
dict(role='BOT', prompt=''),
|
25 |
+
], )),
|
26 |
+
retriever=dict(type=ZeroRetriever),
|
27 |
+
inferencer=dict(type=GenInferencer, max_out_len=10)
|
28 |
+
)
|
29 |
+
|
30 |
+
LEval_coursera_eval_cfg = dict(
|
31 |
+
evaluator=dict(type=AccEvaluator),
|
32 |
+
pred_postprocessor=dict(type=first_capital_postprocess_multi),
|
33 |
+
pred_role='BOT'
|
34 |
+
)
|
35 |
+
|
36 |
+
LEval_coursera_datasets = [
|
37 |
+
dict(
|
38 |
+
type=LEvalCourseraDataset,
|
39 |
+
abbr='LEval_coursera',
|
40 |
+
path='L4NLP/LEval',
|
41 |
+
name='coursera',
|
42 |
+
reader_cfg=LEval_coursera_reader_cfg,
|
43 |
+
infer_cfg=LEval_coursera_infer_cfg,
|
44 |
+
eval_cfg=LEval_coursera_eval_cfg)
|
45 |
+
]
|
opencompass/configs/datasets/leval/levalfinancialqa/leval_financialqa_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_financialqa_gen_b03798 import LEval_financialqa_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalfinancialqa/leval_financialqa_gen_b03798.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalFinancialQADataset
|
6 |
+
|
7 |
+
LEval_financialqa_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_financialqa_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=512)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_financialqa_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_financialqa_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalFinancialQADataset,
|
37 |
+
abbr='LEval_financialqa',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='financial_qa',
|
40 |
+
reader_cfg=LEval_financialqa_reader_cfg,
|
41 |
+
infer_cfg=LEval_financialqa_infer_cfg,
|
42 |
+
eval_cfg=LEval_financialqa_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalgovreportsumm/leval_gov_report_summ_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_gov_report_summ_gen_b03798 import LEval_govreport_summ_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalgovreportsumm/leval_gov_report_summ_gen_b03798.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalGovReportSummDataset
|
6 |
+
|
7 |
+
LEval_govreport_summ_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_govreport_summ_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=512)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_govreport_summ_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_govreport_summ_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalGovReportSummDataset,
|
37 |
+
abbr='LEval_gov_report_summ',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='gov_report_summ',
|
40 |
+
reader_cfg=LEval_govreport_summ_reader_cfg,
|
41 |
+
infer_cfg=LEval_govreport_summ_infer_cfg,
|
42 |
+
eval_cfg=LEval_govreport_summ_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalgsm100/leval_gsm100_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_gsm100_gen_77dd94 import LEval_gsm100_datasets
|
opencompass/configs/datasets/leval/levalgsm100/leval_gsm100_gen_77dd94.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator, AccEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGSM100Dataset
|
6 |
+
from opencompass.utils.text_postprocessors import first_capital_postprocess, first_capital_postprocess_multi
|
7 |
+
from opencompass.registry import TEXT_POSTPROCESSORS
|
8 |
+
from opencompass.datasets import gsm100_dataset_postprocess, gsm100_postprocess
|
9 |
+
|
10 |
+
LEval_gsm100_reader_cfg = dict(
|
11 |
+
input_columns=['context', 'question'],
|
12 |
+
output_column='answer',
|
13 |
+
train_split='test',
|
14 |
+
test_split='test'
|
15 |
+
)
|
16 |
+
|
17 |
+
LEval_gsm100_infer_cfg = dict(
|
18 |
+
prompt_template=dict(
|
19 |
+
type=PromptTemplate,
|
20 |
+
template=dict(
|
21 |
+
begin=[
|
22 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Given several question answer pairs, you need to follow a similar format to answer the last question. Make sure the response is end with The answer is _ . '),
|
23 |
+
],
|
24 |
+
round=[
|
25 |
+
dict(role='HUMAN', prompt='{context}\n\n{question}\n'),
|
26 |
+
], )),
|
27 |
+
retriever=dict(type=ZeroRetriever),
|
28 |
+
inferencer=dict(type=GenInferencer, max_out_len=512)
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
LEval_gsm100_eval_cfg = dict(evaluator=dict(type=AccEvaluator),
|
33 |
+
pred_postprocessor=dict(type=gsm100_postprocess),
|
34 |
+
dataset_postprocessor=dict(type=gsm100_dataset_postprocess)
|
35 |
+
)
|
36 |
+
|
37 |
+
LEval_gsm100_datasets = [
|
38 |
+
dict(
|
39 |
+
type=LEvalGSM100Dataset,
|
40 |
+
abbr='LEval_gsm100',
|
41 |
+
path='L4NLP/LEval',
|
42 |
+
name='gsm100',
|
43 |
+
reader_cfg=LEval_gsm100_reader_cfg,
|
44 |
+
infer_cfg=LEval_gsm100_infer_cfg,
|
45 |
+
eval_cfg=LEval_gsm100_eval_cfg)
|
46 |
+
]
|
opencompass/configs/datasets/leval/levallegalcontractqa/leval_legalcontractqa_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_legalcontractqa_gen_68a2ac import LEval_legalqa_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levallegalcontractqa/leval_legalcontractqa_gen_68a2ac.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalLegalContractQADataset
|
6 |
+
|
7 |
+
LEval_legalqa_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_legalqa_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=128)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_legalqa_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_legalqa_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalLegalContractQADataset,
|
37 |
+
abbr='LEval_legal_contract_qa',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='legal_contract_qa',
|
40 |
+
reader_cfg=LEval_legalqa_reader_cfg,
|
41 |
+
infer_cfg=LEval_legalqa_infer_cfg,
|
42 |
+
eval_cfg=LEval_legalqa_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalmeetingsumm/leval_meetingsumm_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_meetingsumm_gen_b03798 import LEval_meetingsumm_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalmeetingsumm/leval_meetingsumm_gen_b03798.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalMeetingSummDataset
|
6 |
+
|
7 |
+
LEval_meetingsumm_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_meetingsumm_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=512)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_meetingsumm_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_meetingsumm_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalMeetingSummDataset,
|
37 |
+
abbr='LEval_meeting_summ',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='meeting_summ',
|
40 |
+
reader_cfg=LEval_meetingsumm_reader_cfg,
|
41 |
+
infer_cfg=LEval_meetingsumm_infer_cfg,
|
42 |
+
eval_cfg=LEval_meetingsumm_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalmultidocqa/leval_multidocqa_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_multidocqa_gen_96bf3f import LEval_multidocqa_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalmultidocqa/leval_multidocqa_gen_96bf3f.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalMultidocQADataset
|
6 |
+
|
7 |
+
LEval_multidocqa_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_multidocqa_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=64)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_multidocqa_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_multidocqa_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalMultidocQADataset,
|
37 |
+
abbr='LEval_multidocqa',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='multidoc_qa',
|
40 |
+
reader_cfg=LEval_multidocqa_reader_cfg,
|
41 |
+
infer_cfg=LEval_multidocqa_infer_cfg,
|
42 |
+
eval_cfg=LEval_multidocqa_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalnarrativeqa/leval_narrativeqa_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_narrativeqa_gen_766dd0 import LEval_narrativeqa_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalnarrativeqa/leval_narrativeqa_gen_766dd0.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalNarrativeQADataset
|
6 |
+
|
7 |
+
LEval_narrativeqa_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_narrativeqa_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=50)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_narrativeqa_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_narrativeqa_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalNarrativeQADataset,
|
37 |
+
abbr='LEval_narrativeqa',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='narrative_qa',
|
40 |
+
reader_cfg=LEval_narrativeqa_reader_cfg,
|
41 |
+
infer_cfg=LEval_narrativeqa_infer_cfg,
|
42 |
+
eval_cfg=LEval_narrativeqa_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalnaturalquestion/leval_naturalquestion_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_naturalquestion_gen_52c33f import LEval_nq_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalnaturalquestion/leval_naturalquestion_gen_52c33f.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalNaturalQuestionDataset
|
6 |
+
|
7 |
+
LEval_nq_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_nq_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}?\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=50)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_nq_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_nq_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalNaturalQuestionDataset,
|
37 |
+
abbr='LEval_nq',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='natural_question',
|
40 |
+
reader_cfg=LEval_nq_reader_cfg,
|
41 |
+
infer_cfg=LEval_nq_infer_cfg,
|
42 |
+
eval_cfg=LEval_nq_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalnewssumm/leval_newssumm_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_newssumm_gen_b03798 import LEval_newssumm_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalnewssumm/leval_newssumm_gen_b03798.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalNewsSummDataset
|
6 |
+
|
7 |
+
LEval_newssumm_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_newssumm_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=512)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_newssumm_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_newssumm_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalNewsSummDataset,
|
37 |
+
abbr='LEval_news_summ',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='news_summ',
|
40 |
+
reader_cfg=LEval_newssumm_reader_cfg,
|
41 |
+
infer_cfg=LEval_newssumm_infer_cfg,
|
42 |
+
eval_cfg=LEval_newssumm_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalpaperassistant/leval_paper_assistant_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_paper_assistant_gen_b03798 import LEval_ps_summ_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalpaperassistant/leval_paper_assistant_gen_b03798.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalPaperAssistantDataset
|
6 |
+
|
7 |
+
LEval_ps_summ_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_ps_summ_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=512)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_ps_summ_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_ps_summ_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalPaperAssistantDataset,
|
37 |
+
abbr='LEval_paper_assistant',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='paper_assistant',
|
40 |
+
reader_cfg=LEval_ps_summ_reader_cfg,
|
41 |
+
infer_cfg=LEval_ps_summ_infer_cfg,
|
42 |
+
eval_cfg=LEval_ps_summ_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalpatentsumm/leval_patent_summ_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_patent_summ_gen_b03798 import LEval_patent_summ_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalpatentsumm/leval_patent_summ_gen_b03798.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalPatentSummDataset
|
6 |
+
|
7 |
+
LEval_patent_summ_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_patent_summ_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=512)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_patent_summ_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_patent_summ_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalPatentSummDataset,
|
37 |
+
abbr='LEval_patent_summ',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='patent_summ',
|
40 |
+
reader_cfg=LEval_patent_summ_reader_cfg,
|
41 |
+
infer_cfg=LEval_patent_summ_infer_cfg,
|
42 |
+
eval_cfg=LEval_patent_summ_eval_cfg)
|
43 |
+
]
|
opencompass/configs/datasets/leval/levalquality/leval_quality_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_quality_gen_36a006 import LEval_quality_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalquality/leval_quality_gen_36a006.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator, AccEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalQualityDataset
|
6 |
+
from opencompass.utils.text_postprocessors import first_capital_postprocess, first_capital_postprocess_multi
|
7 |
+
|
8 |
+
LEval_quality_reader_cfg = dict(
|
9 |
+
input_columns=['context', 'question'],
|
10 |
+
output_column='answer',
|
11 |
+
train_split='test',
|
12 |
+
test_split='test'
|
13 |
+
)
|
14 |
+
|
15 |
+
LEval_quality_infer_cfg = dict(
|
16 |
+
prompt_template=dict(
|
17 |
+
type=PromptTemplate,
|
18 |
+
template=dict(
|
19 |
+
begin=[
|
20 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction based on this document. For multi-choice questions, there could be a single correct option or multiple correct options. Please only provide the letter corresponding to the answer (like A or AB) when answering.'),
|
21 |
+
],
|
22 |
+
round=[
|
23 |
+
dict(role='HUMAN', prompt='Document is as follows.\n{context}\nQuestion:{question}\nAnswer:'),
|
24 |
+
dict(role='BOT', prompt=''),
|
25 |
+
], )),
|
26 |
+
retriever=dict(type=ZeroRetriever),
|
27 |
+
inferencer=dict(type=GenInferencer, max_out_len=10)
|
28 |
+
)
|
29 |
+
|
30 |
+
LEval_quality_eval_cfg = dict(
|
31 |
+
evaluator=dict(type=AccEvaluator),
|
32 |
+
pred_postprocessor=dict(type=first_capital_postprocess),
|
33 |
+
pred_role='BOT'
|
34 |
+
)
|
35 |
+
|
36 |
+
LEval_quality_datasets = [
|
37 |
+
dict(
|
38 |
+
type=LEvalQualityDataset,
|
39 |
+
abbr='LEval_quality',
|
40 |
+
path='L4NLP/LEval',
|
41 |
+
name='quality',
|
42 |
+
reader_cfg=LEval_quality_reader_cfg,
|
43 |
+
infer_cfg=LEval_quality_infer_cfg,
|
44 |
+
eval_cfg=LEval_quality_eval_cfg)
|
45 |
+
]
|
opencompass/configs/datasets/leval/levalreviewsumm/leval_review_summ_gen.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmengine.config import read_base
|
2 |
+
|
3 |
+
with read_base():
|
4 |
+
from .leval_review_summ_gen_b03798 import LEval_review_summ_datasets # noqa: F401, F403
|
opencompass/configs/datasets/leval/levalreviewsumm/leval_review_summ_gen_b03798.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
2 |
+
from opencompass.openicl.icl_retriever import ZeroRetriever
|
3 |
+
from opencompass.openicl.icl_inferencer import GenInferencer
|
4 |
+
from opencompass.openicl.icl_evaluator import EMEvaluator, RougeEvaluator, SquadEvaluator
|
5 |
+
from opencompass.datasets.leval import LEvalGPTEvaluator, LEvalReviewSummDataset
|
6 |
+
|
7 |
+
LEval_review_summ_reader_cfg = dict(
|
8 |
+
input_columns=['context', 'question', 'length'],
|
9 |
+
output_column='answer',
|
10 |
+
train_split='test',
|
11 |
+
test_split='test'
|
12 |
+
)
|
13 |
+
|
14 |
+
LEval_review_summ_infer_cfg = dict(
|
15 |
+
prompt_template=dict(
|
16 |
+
type=PromptTemplate,
|
17 |
+
template=dict(
|
18 |
+
begin=[
|
19 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='Now you are given a very long document. Please follow the instruction after this document. These instructions may include summarizing a document, answering questions based on the document, or writing a required paragraph.'),
|
20 |
+
],
|
21 |
+
round=[
|
22 |
+
dict(role='HUMAN', prompt='Document is as follows. {context}\nInstruction: {question}\nAnswer this question with {length} words.'),
|
23 |
+
dict(role='BOT', prompt=''),
|
24 |
+
], )),
|
25 |
+
retriever=dict(type=ZeroRetriever),
|
26 |
+
inferencer=dict(type=GenInferencer, max_out_len=512)
|
27 |
+
)
|
28 |
+
|
29 |
+
LEval_review_summ_eval_cfg = dict(
|
30 |
+
evaluator=dict(type=RougeEvaluator),
|
31 |
+
pred_role='BOT'
|
32 |
+
)
|
33 |
+
|
34 |
+
LEval_review_summ_datasets = [
|
35 |
+
dict(
|
36 |
+
type=LEvalReviewSummDataset,
|
37 |
+
abbr='LEval_review_summ',
|
38 |
+
path='L4NLP/LEval',
|
39 |
+
name='review_summ',
|
40 |
+
reader_cfg=LEval_review_summ_reader_cfg,
|
41 |
+
infer_cfg=LEval_review_summ_infer_cfg,
|
42 |
+
eval_cfg=LEval_review_summ_eval_cfg)
|
43 |
+
]
|