naazahrani's picture
Adding evaluation results
661e616 verified
{
"results": {
"openaimmlu": {
"acc,none": 0.3854151830223615,
"acc_stderr,none": 0.004031384548470796,
"alias": "openaimmlu"
},
"openaimmlu_STEM": {
"acc,none": 0.3258278145695364,
"acc_stderr,none": 0.008457779824528174,
"alias": " - STEM"
},
"openaimmlu_abstract_algebra": {
"alias": " - abstract_algebra",
"acc,none": 0.26,
"acc_stderr,none": 0.04408440022768077
},
"openaimmlu_astronomy": {
"alias": " - astronomy",
"acc,none": 0.4276315789473684,
"acc_stderr,none": 0.04026097083296558
},
"openaimmlu_college_biology": {
"alias": " - college_biology",
"acc,none": 0.4097222222222222,
"acc_stderr,none": 0.04112490974670787
},
"openaimmlu_college_chemistry": {
"alias": " - college_chemistry",
"acc,none": 0.22,
"acc_stderr,none": 0.04163331998932269
},
"openaimmlu_college_computer_science": {
"alias": " - college_computer_science",
"acc,none": 0.36,
"acc_stderr,none": 0.04824181513244218
},
"openaimmlu_college_mathematics": {
"alias": " - college_mathematics",
"acc,none": 0.25,
"acc_stderr,none": 0.04351941398892446
},
"openaimmlu_college_physics": {
"alias": " - college_physics",
"acc,none": 0.22549019607843138,
"acc_stderr,none": 0.041583075330832865
},
"openaimmlu_computer_security": {
"alias": " - computer_security",
"acc,none": 0.49,
"acc_stderr,none": 0.05024183937956912
},
"openaimmlu_conceptual_physics": {
"alias": " - conceptual_physics",
"acc,none": 0.3191489361702128,
"acc_stderr,none": 0.030472973363380045
},
"openaimmlu_econometrics": {
"alias": " - econometrics",
"acc,none": 0.2894736842105263,
"acc_stderr,none": 0.04266339443159394
},
"openaimmlu_electrical_engineering": {
"alias": " - electrical_engineering",
"acc,none": 0.4,
"acc_stderr,none": 0.040824829046386284
},
"openaimmlu_elementary_mathematics": {
"alias": " - elementary_mathematics",
"acc,none": 0.2804232804232804,
"acc_stderr,none": 0.023135287974325628
},
"openaimmlu_high_school_biology": {
"alias": " - high_school_biology",
"acc,none": 0.3741935483870968,
"acc_stderr,none": 0.027528904299845777
},
"openaimmlu_high_school_chemistry": {
"alias": " - high_school_chemistry",
"acc,none": 0.3694581280788177,
"acc_stderr,none": 0.03395970381998575
},
"openaimmlu_high_school_computer_science": {
"alias": " - high_school_computer_science",
"acc,none": 0.43,
"acc_stderr,none": 0.049756985195624284
},
"openaimmlu_high_school_mathematics": {
"alias": " - high_school_mathematics",
"acc,none": 0.3,
"acc_stderr,none": 0.027940457136228402
},
"openaimmlu_high_school_physics": {
"alias": " - high_school_physics",
"acc,none": 0.271523178807947,
"acc_stderr,none": 0.03631329803969654
},
"openaimmlu_high_school_statistics": {
"alias": " - high_school_statistics",
"acc,none": 0.2361111111111111,
"acc_stderr,none": 0.02896370257079102
},
"openaimmlu_humanities": {
"acc,none": 0.4861419068736142,
"acc_stderr,none": 0.011703480584172478,
"alias": " - Humanities"
},
"openaimmlu_high_school_european_history": {
"alias": " - high_school_european_history",
"acc,none": 0.5151515151515151,
"acc_stderr,none": 0.039025510073744475
},
"openaimmlu_high_school_us_history": {
"alias": " - high_school_us_history",
"acc,none": 0.45588235294117646,
"acc_stderr,none": 0.034956245220154746
},
"openaimmlu_high_school_world_history": {
"alias": " - high_school_world_history",
"acc,none": 0.5991561181434599,
"acc_stderr,none": 0.031900803894732356
},
"openaimmlu_international_law": {
"alias": " - international_law",
"acc,none": 0.5867768595041323,
"acc_stderr,none": 0.04495087843548408
},
"openaimmlu_jurisprudence": {
"alias": " - jurisprudence",
"acc,none": 0.48148148148148145,
"acc_stderr,none": 0.04830366024635331
},
"openaimmlu_logical_fallacies": {
"alias": " - logical_fallacies",
"acc,none": 0.43558282208588955,
"acc_stderr,none": 0.03895632464138937
},
"openaimmlu_philosophy": {
"alias": " - philosophy",
"acc,none": 0.43729903536977494,
"acc_stderr,none": 0.028173917761762878
},
"openaimmlu_prehistory": {
"alias": " - prehistory",
"acc,none": 0.42592592592592593,
"acc_stderr,none": 0.027513747284379424
},
"openaimmlu_world_religions": {
"alias": " - world_religions",
"acc,none": 0.52046783625731,
"acc_stderr,none": 0.038316105328219316
},
"openaimmlu_other": {
"acc,none": 0.3792987188132165,
"acc_stderr,none": 0.006232325281499182,
"alias": " - Other"
},
"openaimmlu_anatomy": {
"alias": " - anatomy",
"acc,none": 0.4222222222222222,
"acc_stderr,none": 0.04266763404099582
},
"openaimmlu_clinical_knowledge": {
"alias": " - clinical_knowledge",
"acc,none": 0.3622641509433962,
"acc_stderr,none": 0.0295822451283843
},
"openaimmlu_college_medicine": {
"alias": " - college_medicine",
"acc,none": 0.3179190751445087,
"acc_stderr,none": 0.0355068398916558
},
"openaimmlu_formal_logic": {
"alias": " - formal_logic",
"acc,none": 0.29365079365079366,
"acc_stderr,none": 0.040735243221471255
},
"openaimmlu_global_facts": {
"alias": " - global_facts",
"acc,none": 0.37,
"acc_stderr,none": 0.04852365870939098
},
"openaimmlu_high_school_geography": {
"alias": " - high_school_geography",
"acc,none": 0.4797979797979798,
"acc_stderr,none": 0.03559443565563919
},
"openaimmlu_high_school_psychology": {
"alias": " - high_school_psychology",
"acc,none": 0.42018348623853213,
"acc_stderr,none": 0.021162420048273515
},
"openaimmlu_human_aging": {
"alias": " - human_aging",
"acc,none": 0.47085201793721976,
"acc_stderr,none": 0.03350073248773404
},
"openaimmlu_machine_learning": {
"alias": " - machine_learning",
"acc,none": 0.375,
"acc_stderr,none": 0.04595091388086298
},
"openaimmlu_medical_genetics": {
"alias": " - medical_genetics",
"acc,none": 0.38,
"acc_stderr,none": 0.048783173121456316
},
"openaimmlu_miscellaneous": {
"alias": " - miscellaneous",
"acc,none": 0.5057471264367817,
"acc_stderr,none": 0.017878782326129227
},
"openaimmlu_nutrition": {
"alias": " - nutrition",
"acc,none": 0.4542483660130719,
"acc_stderr,none": 0.02850980780262657
},
"openaimmlu_professional_accounting": {
"alias": " - professional_accounting",
"acc,none": 0.3333333333333333,
"acc_stderr,none": 0.028121636040639882
},
"openaimmlu_professional_law": {
"alias": " - professional_law",
"acc,none": 0.3239895697522816,
"acc_stderr,none": 0.011952840809646566
},
"openaimmlu_professional_medicine": {
"alias": " - professional_medicine",
"acc,none": 0.22058823529411764,
"acc_stderr,none": 0.025187786660227265
},
"openaimmlu_professional_psychology": {
"alias": " - professional_psychology",
"acc,none": 0.34967320261437906,
"acc_stderr,none": 0.01929196189506638
},
"openaimmlu_virology": {
"alias": " - virology",
"acc,none": 0.35542168674698793,
"acc_stderr,none": 0.03726214354322415
},
"openaimmlu_social_science": {
"acc,none": 0.3959220937309799,
"acc_stderr,none": 0.00827574379380361,
"alias": " - Social Science"
},
"openaimmlu_business_ethics": {
"alias": " - business_ethics",
"acc,none": 0.39,
"acc_stderr,none": 0.04902071300001974
},
"openaimmlu_high_school_government_and_politics": {
"alias": " - high_school_government_and_politics",
"acc,none": 0.39378238341968913,
"acc_stderr,none": 0.03526077095548237
},
"openaimmlu_high_school_macroeconomics": {
"alias": " - high_school_macroeconomics",
"acc,none": 0.34615384615384615,
"acc_stderr,none": 0.024121125416941183
},
"openaimmlu_high_school_microeconomics": {
"alias": " - high_school_microeconomics",
"acc,none": 0.3445378151260504,
"acc_stderr,none": 0.030868682604121633
},
"openaimmlu_human_sexuality": {
"alias": " - human_sexuality",
"acc,none": 0.4732824427480916,
"acc_stderr,none": 0.04379024936553894
},
"openaimmlu_management": {
"alias": " - management",
"acc,none": 0.44660194174757284,
"acc_stderr,none": 0.04922424153458933
},
"openaimmlu_marketing": {
"alias": " - marketing",
"acc,none": 0.5982905982905983,
"acc_stderr,none": 0.03211693751051621
},
"openaimmlu_moral_disputes": {
"alias": " - moral_disputes",
"acc,none": 0.4797687861271676,
"acc_stderr,none": 0.026897049996382875
},
"openaimmlu_moral_scenarios": {
"alias": " - moral_scenarios",
"acc,none": 0.24022346368715083,
"acc_stderr,none": 0.014288343803925307
},
"openaimmlu_public_relations": {
"alias": " - public_relations",
"acc,none": 0.4,
"acc_stderr,none": 0.0469237132203465
},
"openaimmlu_security_studies": {
"alias": " - security_studies",
"acc,none": 0.5061224489795918,
"acc_stderr,none": 0.032006820201639086
},
"openaimmlu_sociology": {
"alias": " - sociology",
"acc,none": 0.5373134328358209,
"acc_stderr,none": 0.03525675167467974
},
"openaimmlu_us_foreign_policy": {
"alias": " - us_foreign_policy",
"acc,none": 0.64,
"acc_stderr,none": 0.04824181513244218
}
},
"groups": {
"openaimmlu": {
"acc,none": 0.3854151830223615,
"acc_stderr,none": 0.004031384548470796,
"alias": "openaimmlu"
},
"openaimmlu_STEM": {
"acc,none": 0.3258278145695364,
"acc_stderr,none": 0.008457779824528174,
"alias": " - STEM"
},
"openaimmlu_humanities": {
"acc,none": 0.4861419068736142,
"acc_stderr,none": 0.011703480584172478,
"alias": " - Humanities"
},
"openaimmlu_other": {
"acc,none": 0.3792987188132165,
"acc_stderr,none": 0.006232325281499182,
"alias": " - Other"
},
"openaimmlu_social_science": {
"acc,none": 0.3959220937309799,
"acc_stderr,none": 0.00827574379380361,
"alias": " - Social Science"
}
},
"group_subtasks": {
"openaimmlu_humanities": [
"openaimmlu_logical_fallacies",
"openaimmlu_high_school_us_history",
"openaimmlu_prehistory",
"openaimmlu_high_school_world_history",
"openaimmlu_philosophy",
"openaimmlu_international_law",
"openaimmlu_jurisprudence",
"openaimmlu_world_religions",
"openaimmlu_high_school_european_history"
],
"openaimmlu_social_science": [
"openaimmlu_marketing",
"openaimmlu_moral_scenarios",
"openaimmlu_high_school_macroeconomics",
"openaimmlu_high_school_government_and_politics",
"openaimmlu_business_ethics",
"openaimmlu_high_school_microeconomics",
"openaimmlu_security_studies",
"openaimmlu_moral_disputes",
"openaimmlu_public_relations",
"openaimmlu_us_foreign_policy",
"openaimmlu_management",
"openaimmlu_sociology",
"openaimmlu_human_sexuality"
],
"openaimmlu_other": [
"openaimmlu_professional_law",
"openaimmlu_medical_genetics",
"openaimmlu_nutrition",
"openaimmlu_miscellaneous",
"openaimmlu_formal_logic",
"openaimmlu_high_school_geography",
"openaimmlu_professional_medicine",
"openaimmlu_clinical_knowledge",
"openaimmlu_professional_accounting",
"openaimmlu_professional_psychology",
"openaimmlu_college_medicine",
"openaimmlu_human_aging",
"openaimmlu_high_school_psychology",
"openaimmlu_anatomy",
"openaimmlu_global_facts",
"openaimmlu_machine_learning",
"openaimmlu_virology"
],
"openaimmlu_STEM": [
"openaimmlu_high_school_physics",
"openaimmlu_college_biology",
"openaimmlu_computer_security",
"openaimmlu_electrical_engineering",
"openaimmlu_college_computer_science",
"openaimmlu_abstract_algebra",
"openaimmlu_high_school_chemistry",
"openaimmlu_high_school_biology",
"openaimmlu_high_school_mathematics",
"openaimmlu_high_school_statistics",
"openaimmlu_elementary_mathematics",
"openaimmlu_college_mathematics",
"openaimmlu_college_physics",
"openaimmlu_astronomy",
"openaimmlu_college_chemistry",
"openaimmlu_econometrics",
"openaimmlu_high_school_computer_science",
"openaimmlu_conceptual_physics"
],
"openaimmlu": [
"openaimmlu_STEM",
"openaimmlu_other",
"openaimmlu_social_science",
"openaimmlu_humanities"
]
},
"configs": {
"openaimmlu_abstract_algebra": {
"task": "openaimmlu_abstract_algebra",
"task_alias": "abstract_algebra",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "abstract_algebra",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_anatomy": {
"task": "openaimmlu_anatomy",
"task_alias": "anatomy",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "anatomy",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_astronomy": {
"task": "openaimmlu_astronomy",
"task_alias": "astronomy",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "astronomy",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_business_ethics": {
"task": "openaimmlu_business_ethics",
"task_alias": "business_ethics",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "business_ethics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_clinical_knowledge": {
"task": "openaimmlu_clinical_knowledge",
"task_alias": "clinical_knowledge",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "clinical_knowledge",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_college_biology": {
"task": "openaimmlu_college_biology",
"task_alias": "college_biology",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "college_biology",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_college_chemistry": {
"task": "openaimmlu_college_chemistry",
"task_alias": "college_chemistry",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "college_chemistry",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_college_computer_science": {
"task": "openaimmlu_college_computer_science",
"task_alias": "college_computer_science",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "college_computer_science",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_college_mathematics": {
"task": "openaimmlu_college_mathematics",
"task_alias": "college_mathematics",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "college_mathematics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_college_medicine": {
"task": "openaimmlu_college_medicine",
"task_alias": "college_medicine",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "college_medicine",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_college_physics": {
"task": "openaimmlu_college_physics",
"task_alias": "college_physics",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "college_physics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_computer_security": {
"task": "openaimmlu_computer_security",
"task_alias": "computer_security",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "computer_security",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_conceptual_physics": {
"task": "openaimmlu_conceptual_physics",
"task_alias": "conceptual_physics",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "conceptual_physics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_econometrics": {
"task": "openaimmlu_econometrics",
"task_alias": "econometrics",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "econometrics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_electrical_engineering": {
"task": "openaimmlu_electrical_engineering",
"task_alias": "electrical_engineering",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "electrical_engineering",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_elementary_mathematics": {
"task": "openaimmlu_elementary_mathematics",
"task_alias": "elementary_mathematics",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "elementary_mathematics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_formal_logic": {
"task": "openaimmlu_formal_logic",
"task_alias": "formal_logic",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "formal_logic",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_global_facts": {
"task": "openaimmlu_global_facts",
"task_alias": "global_facts",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "global_facts",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_biology": {
"task": "openaimmlu_high_school_biology",
"task_alias": "high_school_biology",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_biology",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_chemistry": {
"task": "openaimmlu_high_school_chemistry",
"task_alias": "high_school_chemistry",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_chemistry",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_computer_science": {
"task": "openaimmlu_high_school_computer_science",
"task_alias": "high_school_computer_science",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_computer_science",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_european_history": {
"task": "openaimmlu_high_school_european_history",
"task_alias": "high_school_european_history",
"tag": "openaimmlu_humanities_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_european_history",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_geography": {
"task": "openaimmlu_high_school_geography",
"task_alias": "high_school_geography",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_geography",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_government_and_politics": {
"task": "openaimmlu_high_school_government_and_politics",
"task_alias": "high_school_government_and_politics",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_government_and_politics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_macroeconomics": {
"task": "openaimmlu_high_school_macroeconomics",
"task_alias": "high_school_macroeconomics",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_macroeconomics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_mathematics": {
"task": "openaimmlu_high_school_mathematics",
"task_alias": "high_school_mathematics",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_mathematics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_microeconomics": {
"task": "openaimmlu_high_school_microeconomics",
"task_alias": "high_school_microeconomics",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_microeconomics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_physics": {
"task": "openaimmlu_high_school_physics",
"task_alias": "high_school_physics",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_physics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_psychology": {
"task": "openaimmlu_high_school_psychology",
"task_alias": "high_school_psychology",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_psychology",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_statistics": {
"task": "openaimmlu_high_school_statistics",
"task_alias": "high_school_statistics",
"tag": "openaimmlu_STEM_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_statistics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_us_history": {
"task": "openaimmlu_high_school_us_history",
"task_alias": "high_school_us_history",
"tag": "openaimmlu_humanities_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_us_history",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_high_school_world_history": {
"task": "openaimmlu_high_school_world_history",
"task_alias": "high_school_world_history",
"tag": "openaimmlu_humanities_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "high_school_world_history",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_human_aging": {
"task": "openaimmlu_human_aging",
"task_alias": "human_aging",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "human_aging",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_human_sexuality": {
"task": "openaimmlu_human_sexuality",
"task_alias": "human_sexuality",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "human_sexuality",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_international_law": {
"task": "openaimmlu_international_law",
"task_alias": "international_law",
"tag": "openaimmlu_humanities_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "international_law",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_jurisprudence": {
"task": "openaimmlu_jurisprudence",
"task_alias": "jurisprudence",
"tag": "openaimmlu_humanities_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "jurisprudence",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_logical_fallacies": {
"task": "openaimmlu_logical_fallacies",
"task_alias": "logical_fallacies",
"tag": "openaimmlu_humanities_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "logical_fallacies",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_machine_learning": {
"task": "openaimmlu_machine_learning",
"task_alias": "machine_learning",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "machine_learning",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_management": {
"task": "openaimmlu_management",
"task_alias": "management",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "management",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_marketing": {
"task": "openaimmlu_marketing",
"task_alias": "marketing",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "marketing",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_medical_genetics": {
"task": "openaimmlu_medical_genetics",
"task_alias": "medical_genetics",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "medical_genetics",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_miscellaneous": {
"task": "openaimmlu_miscellaneous",
"task_alias": "miscellaneous",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "miscellaneous",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_moral_disputes": {
"task": "openaimmlu_moral_disputes",
"task_alias": "moral_disputes",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "moral_disputes",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_moral_scenarios": {
"task": "openaimmlu_moral_scenarios",
"task_alias": "moral_scenarios",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "moral_scenarios",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_nutrition": {
"task": "openaimmlu_nutrition",
"task_alias": "nutrition",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "nutrition",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_philosophy": {
"task": "openaimmlu_philosophy",
"task_alias": "philosophy",
"tag": "openaimmlu_humanities_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "philosophy",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_prehistory": {
"task": "openaimmlu_prehistory",
"task_alias": "prehistory",
"tag": "openaimmlu_humanities_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "prehistory",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_professional_accounting": {
"task": "openaimmlu_professional_accounting",
"task_alias": "professional_accounting",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "professional_accounting",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_professional_law": {
"task": "openaimmlu_professional_law",
"task_alias": "professional_law",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "professional_law",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_professional_medicine": {
"task": "openaimmlu_professional_medicine",
"task_alias": "professional_medicine",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "professional_medicine",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_professional_psychology": {
"task": "openaimmlu_professional_psychology",
"task_alias": "professional_psychology",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "professional_psychology",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_public_relations": {
"task": "openaimmlu_public_relations",
"task_alias": "public_relations",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "public_relations",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_security_studies": {
"task": "openaimmlu_security_studies",
"task_alias": "security_studies",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "security_studies",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_sociology": {
"task": "openaimmlu_sociology",
"task_alias": "sociology",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "sociology",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_us_foreign_policy": {
"task": "openaimmlu_us_foreign_policy",
"task_alias": "us_foreign_policy",
"tag": "openaimmlu_social_science_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "us_foreign_policy",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_virology": {
"task": "openaimmlu_virology",
"task_alias": "virology",
"tag": "openaimmlu_other_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "virology",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"openaimmlu_world_religions": {
"task": "openaimmlu_world_religions",
"task_alias": "world_religions",
"tag": "openaimmlu_humanities_tasks",
"dataset_path": "khalidalt/openai_mmlu_arabic",
"dataset_name": "world_religions",
"test_split": "test",
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_docs(doc):\n\n def format_example(doc, choices):\n options = []\n for _, choice in enumerate(choices):\n options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n ar_subject = SUBJECTS[doc['Subject']]\n query = PROMPT.format(ar_subject, #doc['Subject'],\n doc['Question'],\n \"\\n\".join(options))\n return query\n\n keys_en = [\"A\", \"B\", \"C\", \"D\"]\n keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n ar_label = en2ar[doc['Answer']]\n\n out_doc = {\n \"query\": format_example(doc, keys_en),\n \"choices\": keys_ar,\n \"gold\": keys_ar.index(ar_label)\n }\n\n return out_doc\n\n return dataset.map(_process_docs) \n",
"doc_to_text": "query",
"doc_to_target": "gold",
"doc_to_choice": "choices",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
}
},
"versions": {
"openaimmlu": 0,
"openaimmlu_STEM": 0,
"openaimmlu_abstract_algebra": 0.0,
"openaimmlu_anatomy": 0.0,
"openaimmlu_astronomy": 0.0,
"openaimmlu_business_ethics": 0.0,
"openaimmlu_clinical_knowledge": 0.0,
"openaimmlu_college_biology": 0.0,
"openaimmlu_college_chemistry": 0.0,
"openaimmlu_college_computer_science": 0.0,
"openaimmlu_college_mathematics": 0.0,
"openaimmlu_college_medicine": 0.0,
"openaimmlu_college_physics": 0.0,
"openaimmlu_computer_security": 0.0,
"openaimmlu_conceptual_physics": 0.0,
"openaimmlu_econometrics": 0.0,
"openaimmlu_electrical_engineering": 0.0,
"openaimmlu_elementary_mathematics": 0.0,
"openaimmlu_formal_logic": 0.0,
"openaimmlu_global_facts": 0.0,
"openaimmlu_high_school_biology": 0.0,
"openaimmlu_high_school_chemistry": 0.0,
"openaimmlu_high_school_computer_science": 0.0,
"openaimmlu_high_school_european_history": 0.0,
"openaimmlu_high_school_geography": 0.0,
"openaimmlu_high_school_government_and_politics": 0.0,
"openaimmlu_high_school_macroeconomics": 0.0,
"openaimmlu_high_school_mathematics": 0.0,
"openaimmlu_high_school_microeconomics": 0.0,
"openaimmlu_high_school_physics": 0.0,
"openaimmlu_high_school_psychology": 0.0,
"openaimmlu_high_school_statistics": 0.0,
"openaimmlu_high_school_us_history": 0.0,
"openaimmlu_high_school_world_history": 0.0,
"openaimmlu_human_aging": 0.0,
"openaimmlu_human_sexuality": 0.0,
"openaimmlu_humanities": 0,
"openaimmlu_international_law": 0.0,
"openaimmlu_jurisprudence": 0.0,
"openaimmlu_logical_fallacies": 0.0,
"openaimmlu_machine_learning": 0.0,
"openaimmlu_management": 0.0,
"openaimmlu_marketing": 0.0,
"openaimmlu_medical_genetics": 0.0,
"openaimmlu_miscellaneous": 0.0,
"openaimmlu_moral_disputes": 0.0,
"openaimmlu_moral_scenarios": 0.0,
"openaimmlu_nutrition": 0.0,
"openaimmlu_other": 0,
"openaimmlu_philosophy": 0.0,
"openaimmlu_prehistory": 0.0,
"openaimmlu_professional_accounting": 0.0,
"openaimmlu_professional_law": 0.0,
"openaimmlu_professional_medicine": 0.0,
"openaimmlu_professional_psychology": 0.0,
"openaimmlu_public_relations": 0.0,
"openaimmlu_security_studies": 0.0,
"openaimmlu_social_science": 0,
"openaimmlu_sociology": 0.0,
"openaimmlu_us_foreign_policy": 0.0,
"openaimmlu_virology": 0.0,
"openaimmlu_world_religions": 0.0
},
"n-shot": {
"openaimmlu_abstract_algebra": 0,
"openaimmlu_anatomy": 0,
"openaimmlu_astronomy": 0,
"openaimmlu_business_ethics": 0,
"openaimmlu_clinical_knowledge": 0,
"openaimmlu_college_biology": 0,
"openaimmlu_college_chemistry": 0,
"openaimmlu_college_computer_science": 0,
"openaimmlu_college_mathematics": 0,
"openaimmlu_college_medicine": 0,
"openaimmlu_college_physics": 0,
"openaimmlu_computer_security": 0,
"openaimmlu_conceptual_physics": 0,
"openaimmlu_econometrics": 0,
"openaimmlu_electrical_engineering": 0,
"openaimmlu_elementary_mathematics": 0,
"openaimmlu_formal_logic": 0,
"openaimmlu_global_facts": 0,
"openaimmlu_high_school_biology": 0,
"openaimmlu_high_school_chemistry": 0,
"openaimmlu_high_school_computer_science": 0,
"openaimmlu_high_school_european_history": 0,
"openaimmlu_high_school_geography": 0,
"openaimmlu_high_school_government_and_politics": 0,
"openaimmlu_high_school_macroeconomics": 0,
"openaimmlu_high_school_mathematics": 0,
"openaimmlu_high_school_microeconomics": 0,
"openaimmlu_high_school_physics": 0,
"openaimmlu_high_school_psychology": 0,
"openaimmlu_high_school_statistics": 0,
"openaimmlu_high_school_us_history": 0,
"openaimmlu_high_school_world_history": 0,
"openaimmlu_human_aging": 0,
"openaimmlu_human_sexuality": 0,
"openaimmlu_international_law": 0,
"openaimmlu_jurisprudence": 0,
"openaimmlu_logical_fallacies": 0,
"openaimmlu_machine_learning": 0,
"openaimmlu_management": 0,
"openaimmlu_marketing": 0,
"openaimmlu_medical_genetics": 0,
"openaimmlu_miscellaneous": 0,
"openaimmlu_moral_disputes": 0,
"openaimmlu_moral_scenarios": 0,
"openaimmlu_nutrition": 0,
"openaimmlu_philosophy": 0,
"openaimmlu_prehistory": 0,
"openaimmlu_professional_accounting": 0,
"openaimmlu_professional_law": 0,
"openaimmlu_professional_medicine": 0,
"openaimmlu_professional_psychology": 0,
"openaimmlu_public_relations": 0,
"openaimmlu_security_studies": 0,
"openaimmlu_sociology": 0,
"openaimmlu_us_foreign_policy": 0,
"openaimmlu_virology": 0,
"openaimmlu_world_religions": 0
},
"higher_is_better": {
"openaimmlu": {
"acc": true
},
"openaimmlu_STEM": {
"acc": true
},
"openaimmlu_abstract_algebra": {
"acc": true
},
"openaimmlu_anatomy": {
"acc": true
},
"openaimmlu_astronomy": {
"acc": true
},
"openaimmlu_business_ethics": {
"acc": true
},
"openaimmlu_clinical_knowledge": {
"acc": true
},
"openaimmlu_college_biology": {
"acc": true
},
"openaimmlu_college_chemistry": {
"acc": true
},
"openaimmlu_college_computer_science": {
"acc": true
},
"openaimmlu_college_mathematics": {
"acc": true
},
"openaimmlu_college_medicine": {
"acc": true
},
"openaimmlu_college_physics": {
"acc": true
},
"openaimmlu_computer_security": {
"acc": true
},
"openaimmlu_conceptual_physics": {
"acc": true
},
"openaimmlu_econometrics": {
"acc": true
},
"openaimmlu_electrical_engineering": {
"acc": true
},
"openaimmlu_elementary_mathematics": {
"acc": true
},
"openaimmlu_formal_logic": {
"acc": true
},
"openaimmlu_global_facts": {
"acc": true
},
"openaimmlu_high_school_biology": {
"acc": true
},
"openaimmlu_high_school_chemistry": {
"acc": true
},
"openaimmlu_high_school_computer_science": {
"acc": true
},
"openaimmlu_high_school_european_history": {
"acc": true
},
"openaimmlu_high_school_geography": {
"acc": true
},
"openaimmlu_high_school_government_and_politics": {
"acc": true
},
"openaimmlu_high_school_macroeconomics": {
"acc": true
},
"openaimmlu_high_school_mathematics": {
"acc": true
},
"openaimmlu_high_school_microeconomics": {
"acc": true
},
"openaimmlu_high_school_physics": {
"acc": true
},
"openaimmlu_high_school_psychology": {
"acc": true
},
"openaimmlu_high_school_statistics": {
"acc": true
},
"openaimmlu_high_school_us_history": {
"acc": true
},
"openaimmlu_high_school_world_history": {
"acc": true
},
"openaimmlu_human_aging": {
"acc": true
},
"openaimmlu_human_sexuality": {
"acc": true
},
"openaimmlu_humanities": {
"acc": true
},
"openaimmlu_international_law": {
"acc": true
},
"openaimmlu_jurisprudence": {
"acc": true
},
"openaimmlu_logical_fallacies": {
"acc": true
},
"openaimmlu_machine_learning": {
"acc": true
},
"openaimmlu_management": {
"acc": true
},
"openaimmlu_marketing": {
"acc": true
},
"openaimmlu_medical_genetics": {
"acc": true
},
"openaimmlu_miscellaneous": {
"acc": true
},
"openaimmlu_moral_disputes": {
"acc": true
},
"openaimmlu_moral_scenarios": {
"acc": true
},
"openaimmlu_nutrition": {
"acc": true
},
"openaimmlu_other": {
"acc": true
},
"openaimmlu_philosophy": {
"acc": true
},
"openaimmlu_prehistory": {
"acc": true
},
"openaimmlu_professional_accounting": {
"acc": true
},
"openaimmlu_professional_law": {
"acc": true
},
"openaimmlu_professional_medicine": {
"acc": true
},
"openaimmlu_professional_psychology": {
"acc": true
},
"openaimmlu_public_relations": {
"acc": true
},
"openaimmlu_security_studies": {
"acc": true
},
"openaimmlu_social_science": {
"acc": true
},
"openaimmlu_sociology": {
"acc": true
},
"openaimmlu_us_foreign_policy": {
"acc": true
},
"openaimmlu_virology": {
"acc": true
},
"openaimmlu_world_religions": {
"acc": true
}
},
"n-samples": {
"openaimmlu_high_school_physics": {
"original": 151,
"effective": 151
},
"openaimmlu_college_biology": {
"original": 144,
"effective": 144
},
"openaimmlu_computer_security": {
"original": 100,
"effective": 100
},
"openaimmlu_electrical_engineering": {
"original": 145,
"effective": 145
},
"openaimmlu_college_computer_science": {
"original": 100,
"effective": 100
},
"openaimmlu_abstract_algebra": {
"original": 100,
"effective": 100
},
"openaimmlu_high_school_chemistry": {
"original": 203,
"effective": 203
},
"openaimmlu_high_school_biology": {
"original": 310,
"effective": 310
},
"openaimmlu_high_school_mathematics": {
"original": 270,
"effective": 270
},
"openaimmlu_high_school_statistics": {
"original": 216,
"effective": 216
},
"openaimmlu_elementary_mathematics": {
"original": 378,
"effective": 378
},
"openaimmlu_college_mathematics": {
"original": 100,
"effective": 100
},
"openaimmlu_college_physics": {
"original": 102,
"effective": 102
},
"openaimmlu_astronomy": {
"original": 152,
"effective": 152
},
"openaimmlu_college_chemistry": {
"original": 100,
"effective": 100
},
"openaimmlu_econometrics": {
"original": 114,
"effective": 114
},
"openaimmlu_high_school_computer_science": {
"original": 100,
"effective": 100
},
"openaimmlu_conceptual_physics": {
"original": 235,
"effective": 235
},
"openaimmlu_professional_law": {
"original": 1534,
"effective": 1534
},
"openaimmlu_medical_genetics": {
"original": 100,
"effective": 100
},
"openaimmlu_nutrition": {
"original": 306,
"effective": 306
},
"openaimmlu_miscellaneous": {
"original": 783,
"effective": 783
},
"openaimmlu_formal_logic": {
"original": 126,
"effective": 126
},
"openaimmlu_high_school_geography": {
"original": 198,
"effective": 198
},
"openaimmlu_professional_medicine": {
"original": 272,
"effective": 272
},
"openaimmlu_clinical_knowledge": {
"original": 265,
"effective": 265
},
"openaimmlu_professional_accounting": {
"original": 282,
"effective": 282
},
"openaimmlu_professional_psychology": {
"original": 612,
"effective": 612
},
"openaimmlu_college_medicine": {
"original": 173,
"effective": 173
},
"openaimmlu_human_aging": {
"original": 223,
"effective": 223
},
"openaimmlu_high_school_psychology": {
"original": 545,
"effective": 545
},
"openaimmlu_anatomy": {
"original": 135,
"effective": 135
},
"openaimmlu_global_facts": {
"original": 100,
"effective": 100
},
"openaimmlu_machine_learning": {
"original": 112,
"effective": 112
},
"openaimmlu_virology": {
"original": 166,
"effective": 166
},
"openaimmlu_marketing": {
"original": 234,
"effective": 234
},
"openaimmlu_moral_scenarios": {
"original": 895,
"effective": 895
},
"openaimmlu_high_school_macroeconomics": {
"original": 390,
"effective": 390
},
"openaimmlu_high_school_government_and_politics": {
"original": 193,
"effective": 193
},
"openaimmlu_business_ethics": {
"original": 100,
"effective": 100
},
"openaimmlu_high_school_microeconomics": {
"original": 238,
"effective": 238
},
"openaimmlu_security_studies": {
"original": 245,
"effective": 245
},
"openaimmlu_moral_disputes": {
"original": 346,
"effective": 346
},
"openaimmlu_public_relations": {
"original": 110,
"effective": 110
},
"openaimmlu_us_foreign_policy": {
"original": 100,
"effective": 100
},
"openaimmlu_management": {
"original": 103,
"effective": 103
},
"openaimmlu_sociology": {
"original": 201,
"effective": 201
},
"openaimmlu_human_sexuality": {
"original": 131,
"effective": 131
},
"openaimmlu_logical_fallacies": {
"original": 163,
"effective": 163
},
"openaimmlu_high_school_us_history": {
"original": 204,
"effective": 204
},
"openaimmlu_prehistory": {
"original": 324,
"effective": 324
},
"openaimmlu_high_school_world_history": {
"original": 237,
"effective": 237
},
"openaimmlu_philosophy": {
"original": 311,
"effective": 311
},
"openaimmlu_international_law": {
"original": 121,
"effective": 121
},
"openaimmlu_jurisprudence": {
"original": 108,
"effective": 108
},
"openaimmlu_world_religions": {
"original": 171,
"effective": 171
},
"openaimmlu_high_school_european_history": {
"original": 165,
"effective": 165
}
},
"config": {
"model": "hf",
"model_args": "pretrained=inceptionai/jais-adapted-7b-chat,trust_remote_code=True,cache_dir=/tmp,parallelize=False",
"model_num_parameters": 7000559616,
"model_dtype": "torch.float32",
"model_revision": "main",
"model_sha": "f2de64b06baedc5546928fbdea10fca517f7cbc7",
"batch_size": "auto",
"batch_sizes": [
16
],
"device": null,
"use_cache": null,
"limit": null,
"bootstrap_iters": 100000,
"gen_kwargs": null,
"random_seed": 0,
"numpy_seed": 1234,
"torch_seed": 1234,
"fewshot_seed": 1234
},
"git_hash": "5e10e017",
"date": 1736968038.6495116,
"pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.3 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: version 3.27.1\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-5.15.0-1064-azure-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.2.128\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA A100 80GB PCIe\nGPU 1: NVIDIA A100 80GB PCIe\n\nNvidia driver version: 535.161.08\ncuDNN version: Probably one of the following:\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.4\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 48 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: AuthenticAMD\nModel name: AMD EPYC 7V13 64-Core Processor\nCPU family: 25\nModel: 1\nThread(s) per core: 1\nCore(s) per socket: 48\nSocket(s): 1\nStepping: 1\nBogoMIPS: 4890.89\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl tsc_reliable nonstop_tsc cpuid extd_apicid aperfmperf pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core invpcid_single vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves clzero xsaveerptr rdpru arat umip vaes vpclmulqdq rdpid fsrm\nHypervisor vendor: Microsoft\nVirtualization type: full\nL1d cache: 1.5 MiB (48 instances)\nL1i cache: 1.5 MiB (48 instances)\nL2 cache: 24 MiB (48 instances)\nL3 cache: 192 MiB (6 instances)\nNUMA node(s): 2\nNUMA node0 CPU(s): 0-23\nNUMA node1 CPU(s): 24-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Retbleed: Not affected\nVulnerability Spec rstack overflow: Mitigation; safe RET, no microcode\nVulnerability Spec store bypass: Vulnerable\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] onnx==1.14.0\n[pip3] pytorch-lightning==2.0.7\n[pip3] pytorch-quantization==2.1.2\n[pip3] torch==2.4.0\n[pip3] torch-tensorrt==2.0.0.dev0\n[pip3] torchaudio==2.1.0\n[pip3] torchdata==0.7.0a0\n[pip3] torchmetrics==1.2.0\n[pip3] torchvision==0.19.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
"transformers_version": "4.48.0",
"upper_git_hash": "2e5cd5395faf76fea1afc96dd0f7161a9d3aa145",
"tokenizer_pad_token": [
"<unk>",
"0"
],
"tokenizer_eos_token": [
"</s>",
"2"
],
"tokenizer_bos_token": [
"<s>",
"1"
],
"eot_token_id": 2,
"max_length": 4096,
"task_hashes": {},
"model_source": "hf",
"model_name": "inceptionai/jais-adapted-7b-chat",
"model_name_sanitized": "inceptionai__jais-adapted-7b-chat",
"system_instruction": null,
"system_instruction_sha": null,
"fewshot_as_multiturn": false,
"chat_template": null,
"chat_template_sha": null,
"start_time": 3227.626114991,
"end_time": 3509.415462885,
"total_evaluation_time_seconds": "281.789347894"
}