{
  "results": {
    "openaimmlu": {
      "acc,none": 0.4615439396097422,
      "acc_stderr,none": 0.004090287961453241,
      "alias": "openaimmlu"
    },
    "openaimmlu_STEM": {
      "acc,none": 0.4198675496688742,
      "acc_stderr,none": 0.008819083118680756,
      "alias": " - STEM"
    },
    "openaimmlu_abstract_algebra": {
      "alias": "  - abstract_algebra",
      "acc,none": 0.24,
      "acc_stderr,none": 0.042923469599092816
    },
    "openaimmlu_astronomy": {
      "alias": "  - astronomy",
      "acc,none": 0.5197368421052632,
      "acc_stderr,none": 0.04065771002562603
    },
    "openaimmlu_college_biology": {
      "alias": "  - college_biology",
      "acc,none": 0.4652777777777778,
      "acc_stderr,none": 0.041711158581816184
    },
    "openaimmlu_college_chemistry": {
      "alias": "  - college_chemistry",
      "acc,none": 0.37,
      "acc_stderr,none": 0.04852365870939099
    },
    "openaimmlu_college_computer_science": {
      "alias": "  - college_computer_science",
      "acc,none": 0.36,
      "acc_stderr,none": 0.048241815132442176
    },
    "openaimmlu_college_mathematics": {
      "alias": "  - college_mathematics",
      "acc,none": 0.27,
      "acc_stderr,none": 0.044619604333847394
    },
    "openaimmlu_college_physics": {
      "alias": "  - college_physics",
      "acc,none": 0.28431372549019607,
      "acc_stderr,none": 0.04488482852329017
    },
    "openaimmlu_computer_security": {
      "alias": "  - computer_security",
      "acc,none": 0.52,
      "acc_stderr,none": 0.050211673156867795
    },
    "openaimmlu_conceptual_physics": {
      "alias": "  - conceptual_physics",
      "acc,none": 0.4297872340425532,
      "acc_stderr,none": 0.03236214467715564
    },
    "openaimmlu_econometrics": {
      "alias": "  - econometrics",
      "acc,none": 0.3333333333333333,
      "acc_stderr,none": 0.044346007015849245
    },
    "openaimmlu_electrical_engineering": {
      "alias": "  - electrical_engineering",
      "acc,none": 0.5241379310344828,
      "acc_stderr,none": 0.0416180850350153
    },
    "openaimmlu_elementary_mathematics": {
      "alias": "  - elementary_mathematics",
      "acc,none": 0.3835978835978836,
      "acc_stderr,none": 0.025043757318520196
    },
    "openaimmlu_high_school_biology": {
      "alias": "  - high_school_biology",
      "acc,none": 0.5935483870967742,
      "acc_stderr,none": 0.027941727346256308
    },
    "openaimmlu_high_school_chemistry": {
      "alias": "  - high_school_chemistry",
      "acc,none": 0.43349753694581283,
      "acc_stderr,none": 0.03486731727419872
    },
    "openaimmlu_high_school_computer_science": {
      "alias": "  - high_school_computer_science",
      "acc,none": 0.57,
      "acc_stderr,none": 0.04975698519562428
    },
    "openaimmlu_high_school_mathematics": {
      "alias": "  - high_school_mathematics",
      "acc,none": 0.2962962962962963,
      "acc_stderr,none": 0.02784081149587193
    },
    "openaimmlu_high_school_physics": {
      "alias": "  - high_school_physics",
      "acc,none": 0.3443708609271523,
      "acc_stderr,none": 0.038796870240733264
    },
    "openaimmlu_high_school_statistics": {
      "alias": "  - high_school_statistics",
      "acc,none": 0.4444444444444444,
      "acc_stderr,none": 0.03388857118502325
    },
    "openaimmlu_humanities": {
      "acc,none": 0.5720620842572062,
      "acc_stderr,none": 0.011582619725483814,
      "alias": " - Humanities"
    },
    "openaimmlu_high_school_european_history": {
      "alias": "  - high_school_european_history",
      "acc,none": 0.6606060606060606,
      "acc_stderr,none": 0.03697442205031595
    },
    "openaimmlu_high_school_us_history": {
      "alias": "  - high_school_us_history",
      "acc,none": 0.6176470588235294,
      "acc_stderr,none": 0.03410785338904719
    },
    "openaimmlu_high_school_world_history": {
      "alias": "  - high_school_world_history",
      "acc,none": 0.6624472573839663,
      "acc_stderr,none": 0.03078154910202622
    },
    "openaimmlu_international_law": {
      "alias": "  - international_law",
      "acc,none": 0.628099173553719,
      "acc_stderr,none": 0.04412015806624505
    },
    "openaimmlu_jurisprudence": {
      "alias": "  - jurisprudence",
      "acc,none": 0.5648148148148148,
      "acc_stderr,none": 0.04792898170907062
    },
    "openaimmlu_logical_fallacies": {
      "alias": "  - logical_fallacies",
      "acc,none": 0.4723926380368098,
      "acc_stderr,none": 0.03922378290610991
    },
    "openaimmlu_philosophy": {
      "alias": "  - philosophy",
      "acc,none": 0.5241157556270096,
      "acc_stderr,none": 0.028365041542564577
    },
    "openaimmlu_prehistory": {
      "alias": "  - prehistory",
      "acc,none": 0.5277777777777778,
      "acc_stderr,none": 0.027777777777777797
    },
    "openaimmlu_world_religions": {
      "alias": "  - world_religions",
      "acc,none": 0.5380116959064327,
      "acc_stderr,none": 0.03823727092882307
    },
    "openaimmlu_other": {
      "acc,none": 0.44622387053270396,
      "acc_stderr,none": 0.0063302986349148774,
      "alias": " - Other"
    },
    "openaimmlu_anatomy": {
      "alias": "  - anatomy",
      "acc,none": 0.4444444444444444,
      "acc_stderr,none": 0.04292596718256981
    },
    "openaimmlu_clinical_knowledge": {
      "alias": "  - clinical_knowledge",
      "acc,none": 0.5094339622641509,
      "acc_stderr,none": 0.0307673947078081
    },
    "openaimmlu_college_medicine": {
      "alias": "  - college_medicine",
      "acc,none": 0.41040462427745666,
      "acc_stderr,none": 0.03750757044895537
    },
    "openaimmlu_formal_logic": {
      "alias": "  - formal_logic",
      "acc,none": 0.2619047619047619,
      "acc_stderr,none": 0.03932537680392871
    },
    "openaimmlu_global_facts": {
      "alias": "  - global_facts",
      "acc,none": 0.36,
      "acc_stderr,none": 0.048241815132442176
    },
    "openaimmlu_high_school_geography": {
      "alias": "  - high_school_geography",
      "acc,none": 0.5858585858585859,
      "acc_stderr,none": 0.035094383488796295
    },
    "openaimmlu_high_school_psychology": {
      "alias": "  - high_school_psychology",
      "acc,none": 0.5614678899082569,
      "acc_stderr,none": 0.021274713073954565
    },
    "openaimmlu_human_aging": {
      "alias": "  - human_aging",
      "acc,none": 0.47085201793721976,
      "acc_stderr,none": 0.03350073248773404
    },
    "openaimmlu_machine_learning": {
      "alias": "  - machine_learning",
      "acc,none": 0.24107142857142858,
      "acc_stderr,none": 0.04059867246952685
    },
    "openaimmlu_medical_genetics": {
      "alias": "  - medical_genetics",
      "acc,none": 0.48,
      "acc_stderr,none": 0.050211673156867795
    },
    "openaimmlu_miscellaneous": {
      "alias": "  - miscellaneous",
      "acc,none": 0.5925925925925926,
      "acc_stderr,none": 0.017570705239256555
    },
    "openaimmlu_nutrition": {
      "alias": "  - nutrition",
      "acc,none": 0.5294117647058824,
      "acc_stderr,none": 0.02858034106513829
    },
    "openaimmlu_professional_accounting": {
      "alias": "  - professional_accounting",
      "acc,none": 0.30851063829787234,
      "acc_stderr,none": 0.027553366165101362
    },
    "openaimmlu_professional_law": {
      "alias": "  - professional_law",
      "acc,none": 0.3546284224250326,
      "acc_stderr,none": 0.012218576439090169
    },
    "openaimmlu_professional_medicine": {
      "alias": "  - professional_medicine",
      "acc,none": 0.44485294117647056,
      "acc_stderr,none": 0.03018753206032938
    },
    "openaimmlu_professional_psychology": {
      "alias": "  - professional_psychology",
      "acc,none": 0.42483660130718953,
      "acc_stderr,none": 0.01999797303545833
    },
    "openaimmlu_virology": {
      "alias": "  - virology",
      "acc,none": 0.43373493975903615,
      "acc_stderr,none": 0.03858158940685517
    },
    "openaimmlu_social_science": {
      "acc,none": 0.46682897139379187,
      "acc_stderr,none": 0.008294155824875415,
      "alias": " - Social Science"
    },
    "openaimmlu_business_ethics": {
      "alias": "  - business_ethics",
      "acc,none": 0.49,
      "acc_stderr,none": 0.05024183937956912
    },
    "openaimmlu_high_school_government_and_politics": {
      "alias": "  - high_school_government_and_politics",
      "acc,none": 0.6373056994818653,
      "acc_stderr,none": 0.03469713791704371
    },
    "openaimmlu_high_school_macroeconomics": {
      "alias": "  - high_school_macroeconomics",
      "acc,none": 0.4512820512820513,
      "acc_stderr,none": 0.02523038123893484
    },
    "openaimmlu_high_school_microeconomics": {
      "alias": "  - high_school_microeconomics",
      "acc,none": 0.44537815126050423,
      "acc_stderr,none": 0.0322841062671639
    },
    "openaimmlu_human_sexuality": {
      "alias": "  - human_sexuality",
      "acc,none": 0.5114503816793893,
      "acc_stderr,none": 0.043841400240780176
    },
    "openaimmlu_management": {
      "alias": "  - management",
      "acc,none": 0.5436893203883495,
      "acc_stderr,none": 0.049318019942204146
    },
    "openaimmlu_marketing": {
      "alias": "  - marketing",
      "acc,none": 0.6410256410256411,
      "acc_stderr,none": 0.03142616993791924
    },
    "openaimmlu_moral_disputes": {
      "alias": "  - moral_disputes",
      "acc,none": 0.4884393063583815,
      "acc_stderr,none": 0.026911898686377913
    },
    "openaimmlu_moral_scenarios": {
      "alias": "  - moral_scenarios",
      "acc,none": 0.24692737430167597,
      "acc_stderr,none": 0.01442229220480885
    },
    "openaimmlu_public_relations": {
      "alias": "  - public_relations",
      "acc,none": 0.5727272727272728,
      "acc_stderr,none": 0.04738198703545483
    },
    "openaimmlu_security_studies": {
      "alias": "  - security_studies",
      "acc,none": 0.5918367346938775,
      "acc_stderr,none": 0.03146465712827424
    },
    "openaimmlu_sociology": {
      "alias": "  - sociology",
      "acc,none": 0.7064676616915423,
      "acc_stderr,none": 0.03220024104534205
    },
    "openaimmlu_us_foreign_policy": {
      "alias": "  - us_foreign_policy",
      "acc,none": 0.67,
      "acc_stderr,none": 0.047258156262526066
    }
  },
  "groups": {
    "openaimmlu": {
      "acc,none": 0.4615439396097422,
      "acc_stderr,none": 0.004090287961453241,
      "alias": "openaimmlu"
    },
    "openaimmlu_STEM": {
      "acc,none": 0.4198675496688742,
      "acc_stderr,none": 0.008819083118680756,
      "alias": " - STEM"
    },
    "openaimmlu_humanities": {
      "acc,none": 0.5720620842572062,
      "acc_stderr,none": 0.011582619725483814,
      "alias": " - Humanities"
    },
    "openaimmlu_other": {
      "acc,none": 0.44622387053270396,
      "acc_stderr,none": 0.0063302986349148774,
      "alias": " - Other"
    },
    "openaimmlu_social_science": {
      "acc,none": 0.46682897139379187,
      "acc_stderr,none": 0.008294155824875415,
      "alias": " - Social Science"
    }
  },
  "group_subtasks": {
    "openaimmlu_humanities": [
      "openaimmlu_logical_fallacies",
      "openaimmlu_high_school_us_history",
      "openaimmlu_prehistory",
      "openaimmlu_high_school_world_history",
      "openaimmlu_philosophy",
      "openaimmlu_international_law",
      "openaimmlu_jurisprudence",
      "openaimmlu_world_religions",
      "openaimmlu_high_school_european_history"
    ],
    "openaimmlu_social_science": [
      "openaimmlu_marketing",
      "openaimmlu_moral_scenarios",
      "openaimmlu_high_school_macroeconomics",
      "openaimmlu_high_school_government_and_politics",
      "openaimmlu_business_ethics",
      "openaimmlu_high_school_microeconomics",
      "openaimmlu_security_studies",
      "openaimmlu_moral_disputes",
      "openaimmlu_public_relations",
      "openaimmlu_us_foreign_policy",
      "openaimmlu_management",
      "openaimmlu_sociology",
      "openaimmlu_human_sexuality"
    ],
    "openaimmlu_other": [
      "openaimmlu_professional_law",
      "openaimmlu_medical_genetics",
      "openaimmlu_nutrition",
      "openaimmlu_miscellaneous",
      "openaimmlu_formal_logic",
      "openaimmlu_high_school_geography",
      "openaimmlu_professional_medicine",
      "openaimmlu_clinical_knowledge",
      "openaimmlu_professional_accounting",
      "openaimmlu_professional_psychology",
      "openaimmlu_college_medicine",
      "openaimmlu_human_aging",
      "openaimmlu_high_school_psychology",
      "openaimmlu_anatomy",
      "openaimmlu_global_facts",
      "openaimmlu_machine_learning",
      "openaimmlu_virology"
    ],
    "openaimmlu_STEM": [
      "openaimmlu_high_school_physics",
      "openaimmlu_college_biology",
      "openaimmlu_computer_security",
      "openaimmlu_electrical_engineering",
      "openaimmlu_college_computer_science",
      "openaimmlu_abstract_algebra",
      "openaimmlu_high_school_chemistry",
      "openaimmlu_high_school_biology",
      "openaimmlu_high_school_mathematics",
      "openaimmlu_high_school_statistics",
      "openaimmlu_elementary_mathematics",
      "openaimmlu_college_mathematics",
      "openaimmlu_college_physics",
      "openaimmlu_astronomy",
      "openaimmlu_college_chemistry",
      "openaimmlu_econometrics",
      "openaimmlu_high_school_computer_science",
      "openaimmlu_conceptual_physics"
    ],
    "openaimmlu": [
      "openaimmlu_STEM",
      "openaimmlu_other",
      "openaimmlu_social_science",
      "openaimmlu_humanities"
    ]
  },
  "configs": {
    "openaimmlu_abstract_algebra": {
      "task": "openaimmlu_abstract_algebra",
      "task_alias": "abstract_algebra",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "abstract_algebra",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_anatomy": {
      "task": "openaimmlu_anatomy",
      "task_alias": "anatomy",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "anatomy",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_astronomy": {
      "task": "openaimmlu_astronomy",
      "task_alias": "astronomy",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "astronomy",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_business_ethics": {
      "task": "openaimmlu_business_ethics",
      "task_alias": "business_ethics",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "business_ethics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_clinical_knowledge": {
      "task": "openaimmlu_clinical_knowledge",
      "task_alias": "clinical_knowledge",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "clinical_knowledge",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_college_biology": {
      "task": "openaimmlu_college_biology",
      "task_alias": "college_biology",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "college_biology",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_college_chemistry": {
      "task": "openaimmlu_college_chemistry",
      "task_alias": "college_chemistry",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "college_chemistry",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_college_computer_science": {
      "task": "openaimmlu_college_computer_science",
      "task_alias": "college_computer_science",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "college_computer_science",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_college_mathematics": {
      "task": "openaimmlu_college_mathematics",
      "task_alias": "college_mathematics",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "college_mathematics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_college_medicine": {
      "task": "openaimmlu_college_medicine",
      "task_alias": "college_medicine",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "college_medicine",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_college_physics": {
      "task": "openaimmlu_college_physics",
      "task_alias": "college_physics",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "college_physics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_computer_security": {
      "task": "openaimmlu_computer_security",
      "task_alias": "computer_security",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "computer_security",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_conceptual_physics": {
      "task": "openaimmlu_conceptual_physics",
      "task_alias": "conceptual_physics",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "conceptual_physics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_econometrics": {
      "task": "openaimmlu_econometrics",
      "task_alias": "econometrics",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "econometrics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_electrical_engineering": {
      "task": "openaimmlu_electrical_engineering",
      "task_alias": "electrical_engineering",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "electrical_engineering",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_elementary_mathematics": {
      "task": "openaimmlu_elementary_mathematics",
      "task_alias": "elementary_mathematics",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "elementary_mathematics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_formal_logic": {
      "task": "openaimmlu_formal_logic",
      "task_alias": "formal_logic",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "formal_logic",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_global_facts": {
      "task": "openaimmlu_global_facts",
      "task_alias": "global_facts",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "global_facts",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_biology": {
      "task": "openaimmlu_high_school_biology",
      "task_alias": "high_school_biology",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_biology",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_chemistry": {
      "task": "openaimmlu_high_school_chemistry",
      "task_alias": "high_school_chemistry",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_chemistry",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_computer_science": {
      "task": "openaimmlu_high_school_computer_science",
      "task_alias": "high_school_computer_science",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_computer_science",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_european_history": {
      "task": "openaimmlu_high_school_european_history",
      "task_alias": "high_school_european_history",
      "tag": "openaimmlu_humanities_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_european_history",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_geography": {
      "task": "openaimmlu_high_school_geography",
      "task_alias": "high_school_geography",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_geography",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_government_and_politics": {
      "task": "openaimmlu_high_school_government_and_politics",
      "task_alias": "high_school_government_and_politics",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_government_and_politics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_macroeconomics": {
      "task": "openaimmlu_high_school_macroeconomics",
      "task_alias": "high_school_macroeconomics",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_macroeconomics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_mathematics": {
      "task": "openaimmlu_high_school_mathematics",
      "task_alias": "high_school_mathematics",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_mathematics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_microeconomics": {
      "task": "openaimmlu_high_school_microeconomics",
      "task_alias": "high_school_microeconomics",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_microeconomics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_physics": {
      "task": "openaimmlu_high_school_physics",
      "task_alias": "high_school_physics",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_physics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_psychology": {
      "task": "openaimmlu_high_school_psychology",
      "task_alias": "high_school_psychology",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_psychology",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_statistics": {
      "task": "openaimmlu_high_school_statistics",
      "task_alias": "high_school_statistics",
      "tag": "openaimmlu_STEM_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_statistics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_us_history": {
      "task": "openaimmlu_high_school_us_history",
      "task_alias": "high_school_us_history",
      "tag": "openaimmlu_humanities_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_us_history",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_high_school_world_history": {
      "task": "openaimmlu_high_school_world_history",
      "task_alias": "high_school_world_history",
      "tag": "openaimmlu_humanities_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "high_school_world_history",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_human_aging": {
      "task": "openaimmlu_human_aging",
      "task_alias": "human_aging",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "human_aging",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_human_sexuality": {
      "task": "openaimmlu_human_sexuality",
      "task_alias": "human_sexuality",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "human_sexuality",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_international_law": {
      "task": "openaimmlu_international_law",
      "task_alias": "international_law",
      "tag": "openaimmlu_humanities_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "international_law",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_jurisprudence": {
      "task": "openaimmlu_jurisprudence",
      "task_alias": "jurisprudence",
      "tag": "openaimmlu_humanities_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "jurisprudence",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_logical_fallacies": {
      "task": "openaimmlu_logical_fallacies",
      "task_alias": "logical_fallacies",
      "tag": "openaimmlu_humanities_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "logical_fallacies",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_machine_learning": {
      "task": "openaimmlu_machine_learning",
      "task_alias": "machine_learning",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "machine_learning",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_management": {
      "task": "openaimmlu_management",
      "task_alias": "management",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "management",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_marketing": {
      "task": "openaimmlu_marketing",
      "task_alias": "marketing",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "marketing",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_medical_genetics": {
      "task": "openaimmlu_medical_genetics",
      "task_alias": "medical_genetics",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "medical_genetics",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_miscellaneous": {
      "task": "openaimmlu_miscellaneous",
      "task_alias": "miscellaneous",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "miscellaneous",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_moral_disputes": {
      "task": "openaimmlu_moral_disputes",
      "task_alias": "moral_disputes",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "moral_disputes",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_moral_scenarios": {
      "task": "openaimmlu_moral_scenarios",
      "task_alias": "moral_scenarios",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "moral_scenarios",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_nutrition": {
      "task": "openaimmlu_nutrition",
      "task_alias": "nutrition",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "nutrition",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_philosophy": {
      "task": "openaimmlu_philosophy",
      "task_alias": "philosophy",
      "tag": "openaimmlu_humanities_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "philosophy",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_prehistory": {
      "task": "openaimmlu_prehistory",
      "task_alias": "prehistory",
      "tag": "openaimmlu_humanities_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "prehistory",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_professional_accounting": {
      "task": "openaimmlu_professional_accounting",
      "task_alias": "professional_accounting",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "professional_accounting",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_professional_law": {
      "task": "openaimmlu_professional_law",
      "task_alias": "professional_law",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "professional_law",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_professional_medicine": {
      "task": "openaimmlu_professional_medicine",
      "task_alias": "professional_medicine",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "professional_medicine",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_professional_psychology": {
      "task": "openaimmlu_professional_psychology",
      "task_alias": "professional_psychology",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "professional_psychology",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_public_relations": {
      "task": "openaimmlu_public_relations",
      "task_alias": "public_relations",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "public_relations",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_security_studies": {
      "task": "openaimmlu_security_studies",
      "task_alias": "security_studies",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "security_studies",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_sociology": {
      "task": "openaimmlu_sociology",
      "task_alias": "sociology",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "sociology",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_us_foreign_policy": {
      "task": "openaimmlu_us_foreign_policy",
      "task_alias": "us_foreign_policy",
      "tag": "openaimmlu_social_science_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "us_foreign_policy",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_virology": {
      "task": "openaimmlu_virology",
      "task_alias": "virology",
      "tag": "openaimmlu_other_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "virology",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "openaimmlu_world_religions": {
      "task": "openaimmlu_world_religions",
      "task_alias": "world_religions",
      "tag": "openaimmlu_humanities_tasks",
      "dataset_path": "khalidalt/openai_mmlu_arabic",
      "dataset_name": "world_religions",
      "test_split": "test",
      "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n    def _process_docs(doc):\n\n        def format_example(doc, choices):\n            options = []\n            for _, choice in enumerate(choices):\n                options.append(f'{en2ar[choice]}. {doc[choice]}')\n\n            ar_subject = SUBJECTS[doc['Subject']]\n            query = PROMPT.format(ar_subject, #doc['Subject'],\n                                    doc['Question'],\n                                    \"\\n\".join(options))\n            return query\n\n        keys_en = [\"A\", \"B\", \"C\", \"D\"]\n        keys_ar = ['\u0623', '\u0628', '\u062c', '\u062f']\n        ar_label = en2ar[doc['Answer']]\n\n        out_doc = {\n            \"query\": format_example(doc, keys_en),\n            \"choices\": keys_ar,\n            \"gold\": keys_ar.index(ar_label)\n                }\n\n        return out_doc\n\n    return dataset.map(_process_docs)             \n",
      "doc_to_text": "query",
      "doc_to_target": "gold",
      "doc_to_choice": "choices",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    }
  },
  "versions": {
    "openaimmlu": 0,
    "openaimmlu_STEM": 0,
    "openaimmlu_abstract_algebra": 0.0,
    "openaimmlu_anatomy": 0.0,
    "openaimmlu_astronomy": 0.0,
    "openaimmlu_business_ethics": 0.0,
    "openaimmlu_clinical_knowledge": 0.0,
    "openaimmlu_college_biology": 0.0,
    "openaimmlu_college_chemistry": 0.0,
    "openaimmlu_college_computer_science": 0.0,
    "openaimmlu_college_mathematics": 0.0,
    "openaimmlu_college_medicine": 0.0,
    "openaimmlu_college_physics": 0.0,
    "openaimmlu_computer_security": 0.0,
    "openaimmlu_conceptual_physics": 0.0,
    "openaimmlu_econometrics": 0.0,
    "openaimmlu_electrical_engineering": 0.0,
    "openaimmlu_elementary_mathematics": 0.0,
    "openaimmlu_formal_logic": 0.0,
    "openaimmlu_global_facts": 0.0,
    "openaimmlu_high_school_biology": 0.0,
    "openaimmlu_high_school_chemistry": 0.0,
    "openaimmlu_high_school_computer_science": 0.0,
    "openaimmlu_high_school_european_history": 0.0,
    "openaimmlu_high_school_geography": 0.0,
    "openaimmlu_high_school_government_and_politics": 0.0,
    "openaimmlu_high_school_macroeconomics": 0.0,
    "openaimmlu_high_school_mathematics": 0.0,
    "openaimmlu_high_school_microeconomics": 0.0,
    "openaimmlu_high_school_physics": 0.0,
    "openaimmlu_high_school_psychology": 0.0,
    "openaimmlu_high_school_statistics": 0.0,
    "openaimmlu_high_school_us_history": 0.0,
    "openaimmlu_high_school_world_history": 0.0,
    "openaimmlu_human_aging": 0.0,
    "openaimmlu_human_sexuality": 0.0,
    "openaimmlu_humanities": 0,
    "openaimmlu_international_law": 0.0,
    "openaimmlu_jurisprudence": 0.0,
    "openaimmlu_logical_fallacies": 0.0,
    "openaimmlu_machine_learning": 0.0,
    "openaimmlu_management": 0.0,
    "openaimmlu_marketing": 0.0,
    "openaimmlu_medical_genetics": 0.0,
    "openaimmlu_miscellaneous": 0.0,
    "openaimmlu_moral_disputes": 0.0,
    "openaimmlu_moral_scenarios": 0.0,
    "openaimmlu_nutrition": 0.0,
    "openaimmlu_other": 0,
    "openaimmlu_philosophy": 0.0,
    "openaimmlu_prehistory": 0.0,
    "openaimmlu_professional_accounting": 0.0,
    "openaimmlu_professional_law": 0.0,
    "openaimmlu_professional_medicine": 0.0,
    "openaimmlu_professional_psychology": 0.0,
    "openaimmlu_public_relations": 0.0,
    "openaimmlu_security_studies": 0.0,
    "openaimmlu_social_science": 0,
    "openaimmlu_sociology": 0.0,
    "openaimmlu_us_foreign_policy": 0.0,
    "openaimmlu_virology": 0.0,
    "openaimmlu_world_religions": 0.0
  },
  "n-shot": {
    "openaimmlu_abstract_algebra": 0,
    "openaimmlu_anatomy": 0,
    "openaimmlu_astronomy": 0,
    "openaimmlu_business_ethics": 0,
    "openaimmlu_clinical_knowledge": 0,
    "openaimmlu_college_biology": 0,
    "openaimmlu_college_chemistry": 0,
    "openaimmlu_college_computer_science": 0,
    "openaimmlu_college_mathematics": 0,
    "openaimmlu_college_medicine": 0,
    "openaimmlu_college_physics": 0,
    "openaimmlu_computer_security": 0,
    "openaimmlu_conceptual_physics": 0,
    "openaimmlu_econometrics": 0,
    "openaimmlu_electrical_engineering": 0,
    "openaimmlu_elementary_mathematics": 0,
    "openaimmlu_formal_logic": 0,
    "openaimmlu_global_facts": 0,
    "openaimmlu_high_school_biology": 0,
    "openaimmlu_high_school_chemistry": 0,
    "openaimmlu_high_school_computer_science": 0,
    "openaimmlu_high_school_european_history": 0,
    "openaimmlu_high_school_geography": 0,
    "openaimmlu_high_school_government_and_politics": 0,
    "openaimmlu_high_school_macroeconomics": 0,
    "openaimmlu_high_school_mathematics": 0,
    "openaimmlu_high_school_microeconomics": 0,
    "openaimmlu_high_school_physics": 0,
    "openaimmlu_high_school_psychology": 0,
    "openaimmlu_high_school_statistics": 0,
    "openaimmlu_high_school_us_history": 0,
    "openaimmlu_high_school_world_history": 0,
    "openaimmlu_human_aging": 0,
    "openaimmlu_human_sexuality": 0,
    "openaimmlu_international_law": 0,
    "openaimmlu_jurisprudence": 0,
    "openaimmlu_logical_fallacies": 0,
    "openaimmlu_machine_learning": 0,
    "openaimmlu_management": 0,
    "openaimmlu_marketing": 0,
    "openaimmlu_medical_genetics": 0,
    "openaimmlu_miscellaneous": 0,
    "openaimmlu_moral_disputes": 0,
    "openaimmlu_moral_scenarios": 0,
    "openaimmlu_nutrition": 0,
    "openaimmlu_philosophy": 0,
    "openaimmlu_prehistory": 0,
    "openaimmlu_professional_accounting": 0,
    "openaimmlu_professional_law": 0,
    "openaimmlu_professional_medicine": 0,
    "openaimmlu_professional_psychology": 0,
    "openaimmlu_public_relations": 0,
    "openaimmlu_security_studies": 0,
    "openaimmlu_sociology": 0,
    "openaimmlu_us_foreign_policy": 0,
    "openaimmlu_virology": 0,
    "openaimmlu_world_religions": 0
  },
  "higher_is_better": {
    "openaimmlu": {
      "acc": true
    },
    "openaimmlu_STEM": {
      "acc": true
    },
    "openaimmlu_abstract_algebra": {
      "acc": true
    },
    "openaimmlu_anatomy": {
      "acc": true
    },
    "openaimmlu_astronomy": {
      "acc": true
    },
    "openaimmlu_business_ethics": {
      "acc": true
    },
    "openaimmlu_clinical_knowledge": {
      "acc": true
    },
    "openaimmlu_college_biology": {
      "acc": true
    },
    "openaimmlu_college_chemistry": {
      "acc": true
    },
    "openaimmlu_college_computer_science": {
      "acc": true
    },
    "openaimmlu_college_mathematics": {
      "acc": true
    },
    "openaimmlu_college_medicine": {
      "acc": true
    },
    "openaimmlu_college_physics": {
      "acc": true
    },
    "openaimmlu_computer_security": {
      "acc": true
    },
    "openaimmlu_conceptual_physics": {
      "acc": true
    },
    "openaimmlu_econometrics": {
      "acc": true
    },
    "openaimmlu_electrical_engineering": {
      "acc": true
    },
    "openaimmlu_elementary_mathematics": {
      "acc": true
    },
    "openaimmlu_formal_logic": {
      "acc": true
    },
    "openaimmlu_global_facts": {
      "acc": true
    },
    "openaimmlu_high_school_biology": {
      "acc": true
    },
    "openaimmlu_high_school_chemistry": {
      "acc": true
    },
    "openaimmlu_high_school_computer_science": {
      "acc": true
    },
    "openaimmlu_high_school_european_history": {
      "acc": true
    },
    "openaimmlu_high_school_geography": {
      "acc": true
    },
    "openaimmlu_high_school_government_and_politics": {
      "acc": true
    },
    "openaimmlu_high_school_macroeconomics": {
      "acc": true
    },
    "openaimmlu_high_school_mathematics": {
      "acc": true
    },
    "openaimmlu_high_school_microeconomics": {
      "acc": true
    },
    "openaimmlu_high_school_physics": {
      "acc": true
    },
    "openaimmlu_high_school_psychology": {
      "acc": true
    },
    "openaimmlu_high_school_statistics": {
      "acc": true
    },
    "openaimmlu_high_school_us_history": {
      "acc": true
    },
    "openaimmlu_high_school_world_history": {
      "acc": true
    },
    "openaimmlu_human_aging": {
      "acc": true
    },
    "openaimmlu_human_sexuality": {
      "acc": true
    },
    "openaimmlu_humanities": {
      "acc": true
    },
    "openaimmlu_international_law": {
      "acc": true
    },
    "openaimmlu_jurisprudence": {
      "acc": true
    },
    "openaimmlu_logical_fallacies": {
      "acc": true
    },
    "openaimmlu_machine_learning": {
      "acc": true
    },
    "openaimmlu_management": {
      "acc": true
    },
    "openaimmlu_marketing": {
      "acc": true
    },
    "openaimmlu_medical_genetics": {
      "acc": true
    },
    "openaimmlu_miscellaneous": {
      "acc": true
    },
    "openaimmlu_moral_disputes": {
      "acc": true
    },
    "openaimmlu_moral_scenarios": {
      "acc": true
    },
    "openaimmlu_nutrition": {
      "acc": true
    },
    "openaimmlu_other": {
      "acc": true
    },
    "openaimmlu_philosophy": {
      "acc": true
    },
    "openaimmlu_prehistory": {
      "acc": true
    },
    "openaimmlu_professional_accounting": {
      "acc": true
    },
    "openaimmlu_professional_law": {
      "acc": true
    },
    "openaimmlu_professional_medicine": {
      "acc": true
    },
    "openaimmlu_professional_psychology": {
      "acc": true
    },
    "openaimmlu_public_relations": {
      "acc": true
    },
    "openaimmlu_security_studies": {
      "acc": true
    },
    "openaimmlu_social_science": {
      "acc": true
    },
    "openaimmlu_sociology": {
      "acc": true
    },
    "openaimmlu_us_foreign_policy": {
      "acc": true
    },
    "openaimmlu_virology": {
      "acc": true
    },
    "openaimmlu_world_religions": {
      "acc": true
    }
  },
  "n-samples": {
    "openaimmlu_high_school_physics": {
      "original": 151,
      "effective": 151
    },
    "openaimmlu_college_biology": {
      "original": 144,
      "effective": 144
    },
    "openaimmlu_computer_security": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_electrical_engineering": {
      "original": 145,
      "effective": 145
    },
    "openaimmlu_college_computer_science": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_abstract_algebra": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_high_school_chemistry": {
      "original": 203,
      "effective": 203
    },
    "openaimmlu_high_school_biology": {
      "original": 310,
      "effective": 310
    },
    "openaimmlu_high_school_mathematics": {
      "original": 270,
      "effective": 270
    },
    "openaimmlu_high_school_statistics": {
      "original": 216,
      "effective": 216
    },
    "openaimmlu_elementary_mathematics": {
      "original": 378,
      "effective": 378
    },
    "openaimmlu_college_mathematics": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_college_physics": {
      "original": 102,
      "effective": 102
    },
    "openaimmlu_astronomy": {
      "original": 152,
      "effective": 152
    },
    "openaimmlu_college_chemistry": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_econometrics": {
      "original": 114,
      "effective": 114
    },
    "openaimmlu_high_school_computer_science": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_conceptual_physics": {
      "original": 235,
      "effective": 235
    },
    "openaimmlu_professional_law": {
      "original": 1534,
      "effective": 1534
    },
    "openaimmlu_medical_genetics": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_nutrition": {
      "original": 306,
      "effective": 306
    },
    "openaimmlu_miscellaneous": {
      "original": 783,
      "effective": 783
    },
    "openaimmlu_formal_logic": {
      "original": 126,
      "effective": 126
    },
    "openaimmlu_high_school_geography": {
      "original": 198,
      "effective": 198
    },
    "openaimmlu_professional_medicine": {
      "original": 272,
      "effective": 272
    },
    "openaimmlu_clinical_knowledge": {
      "original": 265,
      "effective": 265
    },
    "openaimmlu_professional_accounting": {
      "original": 282,
      "effective": 282
    },
    "openaimmlu_professional_psychology": {
      "original": 612,
      "effective": 612
    },
    "openaimmlu_college_medicine": {
      "original": 173,
      "effective": 173
    },
    "openaimmlu_human_aging": {
      "original": 223,
      "effective": 223
    },
    "openaimmlu_high_school_psychology": {
      "original": 545,
      "effective": 545
    },
    "openaimmlu_anatomy": {
      "original": 135,
      "effective": 135
    },
    "openaimmlu_global_facts": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_machine_learning": {
      "original": 112,
      "effective": 112
    },
    "openaimmlu_virology": {
      "original": 166,
      "effective": 166
    },
    "openaimmlu_marketing": {
      "original": 234,
      "effective": 234
    },
    "openaimmlu_moral_scenarios": {
      "original": 895,
      "effective": 895
    },
    "openaimmlu_high_school_macroeconomics": {
      "original": 390,
      "effective": 390
    },
    "openaimmlu_high_school_government_and_politics": {
      "original": 193,
      "effective": 193
    },
    "openaimmlu_business_ethics": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_high_school_microeconomics": {
      "original": 238,
      "effective": 238
    },
    "openaimmlu_security_studies": {
      "original": 245,
      "effective": 245
    },
    "openaimmlu_moral_disputes": {
      "original": 346,
      "effective": 346
    },
    "openaimmlu_public_relations": {
      "original": 110,
      "effective": 110
    },
    "openaimmlu_us_foreign_policy": {
      "original": 100,
      "effective": 100
    },
    "openaimmlu_management": {
      "original": 103,
      "effective": 103
    },
    "openaimmlu_sociology": {
      "original": 201,
      "effective": 201
    },
    "openaimmlu_human_sexuality": {
      "original": 131,
      "effective": 131
    },
    "openaimmlu_logical_fallacies": {
      "original": 163,
      "effective": 163
    },
    "openaimmlu_high_school_us_history": {
      "original": 204,
      "effective": 204
    },
    "openaimmlu_prehistory": {
      "original": 324,
      "effective": 324
    },
    "openaimmlu_high_school_world_history": {
      "original": 237,
      "effective": 237
    },
    "openaimmlu_philosophy": {
      "original": 311,
      "effective": 311
    },
    "openaimmlu_international_law": {
      "original": 121,
      "effective": 121
    },
    "openaimmlu_jurisprudence": {
      "original": 108,
      "effective": 108
    },
    "openaimmlu_world_religions": {
      "original": 171,
      "effective": 171
    },
    "openaimmlu_high_school_european_history": {
      "original": 165,
      "effective": 165
    }
  },
  "config": {
    "model": "hf",
    "model_args": "pretrained=mistralai/Mistral-Nemo-Instruct-2407,trust_remote_code=True,cache_dir=/tmp,parallelize=False",
    "model_num_parameters": 12247782400,
    "model_dtype": "torch.bfloat16",
    "model_revision": "main",
    "model_sha": "8aedd450f2583e9c67fae1929f6936b8fc5aef9c",
    "batch_size": "auto",
    "batch_sizes": [
      32
    ],
    "device": null,
    "use_cache": null,
    "limit": null,
    "bootstrap_iters": 100000,
    "gen_kwargs": null,
    "random_seed": 0,
    "numpy_seed": 1234,
    "torch_seed": 1234,
    "fewshot_seed": 1234
  },
  "git_hash": "5e10e017",
  "date": 1736969874.3072467,
  "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.3 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: version 3.27.1\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-5.15.0-1064-azure-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.2.128\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA A100 80GB PCIe\nGPU 1: NVIDIA A100 80GB PCIe\n\nNvidia driver version: 535.161.08\ncuDNN version: Probably one of the following:\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.4\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture:                       x86_64\nCPU op-mode(s):                     32-bit, 64-bit\nAddress sizes:                      48 bits physical, 48 bits virtual\nByte Order:                         Little Endian\nCPU(s):                             48\nOn-line CPU(s) list:                0-47\nVendor ID:                          AuthenticAMD\nModel name:                         AMD EPYC 7V13 64-Core Processor\nCPU family:                         25\nModel:                              1\nThread(s) per core:                 1\nCore(s) per socket:                 48\nSocket(s):                          1\nStepping:                           1\nBogoMIPS:                           4890.89\nFlags:                              fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl tsc_reliable nonstop_tsc cpuid extd_apicid aperfmperf pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core invpcid_single vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves clzero xsaveerptr rdpru arat umip vaes vpclmulqdq rdpid fsrm\nHypervisor vendor:                  Microsoft\nVirtualization type:                full\nL1d cache:                          1.5 MiB (48 instances)\nL1i cache:                          1.5 MiB (48 instances)\nL2 cache:                           24 MiB (48 instances)\nL3 cache:                           192 MiB (6 instances)\nNUMA node(s):                       2\nNUMA node0 CPU(s):                  0-23\nNUMA node1 CPU(s):                  24-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit:        Not affected\nVulnerability L1tf:                 Not affected\nVulnerability Mds:                  Not affected\nVulnerability Meltdown:             Not affected\nVulnerability Mmio stale data:      Not affected\nVulnerability Retbleed:             Not affected\nVulnerability Spec rstack overflow: Mitigation; safe RET, no microcode\nVulnerability Spec store bypass:    Vulnerable\nVulnerability Spectre v1:           Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2:           Mitigation; Retpolines; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected\nVulnerability Srbds:                Not affected\nVulnerability Tsx async abort:      Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] onnx==1.14.0\n[pip3] pytorch-lightning==2.0.7\n[pip3] pytorch-quantization==2.1.2\n[pip3] torch==2.4.0\n[pip3] torch-tensorrt==2.0.0.dev0\n[pip3] torchaudio==2.1.0\n[pip3] torchdata==0.7.0a0\n[pip3] torchmetrics==1.2.0\n[pip3] torchvision==0.19.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
  "transformers_version": "4.48.0",
  "upper_git_hash": "2e5cd5395faf76fea1afc96dd0f7161a9d3aa145",
  "tokenizer_pad_token": [
    "<unk>",
    "0"
  ],
  "tokenizer_eos_token": [
    "</s>",
    "2"
  ],
  "tokenizer_bos_token": [
    "<s>",
    "1"
  ],
  "eot_token_id": 2,
  "max_length": 131072,
  "task_hashes": {},
  "model_source": "hf",
  "model_name": "mistralai/Mistral-Nemo-Instruct-2407",
  "model_name_sanitized": "mistralai__Mistral-Nemo-Instruct-2407",
  "system_instruction": null,
  "system_instruction_sha": null,
  "fewshot_as_multiturn": false,
  "chat_template": null,
  "chat_template_sha": null,
  "start_time": 5063.260085979,
  "end_time": 5346.967923807,
  "total_evaluation_time_seconds": "283.70783782800027"
}