config
dict | results
dict | versions
null |
---|---|---|
{
"model_dtype": "bfloat16",
"model_name": "Almawave/Velvet-14B (0-shot)",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 67.63
},
"TextualEntailment_best": {
"acc": 78.5
},
"Sentiment Analysis": {
"acc": 60.49
},
"Sentiment Analysis_best": {
"acc": 66.59
},
"Hate Speech": {
"acc": 55.93
},
"Hate Speech_best": {
"acc": 65.64
},
"Admission Test": {
"acc": 42.4
},
"Admission Test_best": {
"acc": 57.4
},
"Word in Context": {
"acc": 37.91
},
"Word in Context_best": {
"acc": 64.31
},
"FAQ": {
"acc": 45.47
},
"FAQ_best": {
"acc": 89.53
},
"Lexical Substitution": {
"acc": 0.07
},
"Lexical Substitution_best": {
"acc": 0.13
},
"Summarization": {
"acc": 31.09
},
"Summarization_best": {
"acc": 31.11
},
"NER": {
"acc": 31.48
},
"NER_best": {
"acc": 43.79
},
"REL": {
"acc": 6.95
},
"REL_best": {
"acc": 9.98
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "Almawave/Velvet-14B (5-shot)",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 74.46
},
"TextualEntailment_best": {
"acc": 76.75
},
"Sentiment Analysis": {
"acc": 67.99
},
"Sentiment Analysis_best": {
"acc": 68.93
},
"Hate Speech": {
"acc": 66.35
},
"Hate Speech_best": {
"acc": 68.97
},
"Admission Test": {
"acc": 49.73
},
"Admission Test_best": {
"acc": 61.2
},
"Word in Context": {
"acc": 15.41
},
"Word in Context_best": {
"acc": 37.69
},
"FAQ": {
"acc": 41.73
},
"FAQ_best": {
"acc": 80.55
},
"Lexical Substitution": {
"acc": 9.21
},
"Lexical Substitution_best": {
"acc": 9.45
},
"Summarization": {
"acc": 34.26
},
"Summarization_best": {
"acc": 34.88
},
"NER": {
"acc": 48.95
},
"NER_best": {
"acc": 48.73
},
"REL": {
"acc": 13.07
},
"REL_best": {
"acc": 15.1
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "CohereForAI/aya-expanse-8b",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 60.33
},
"TextualEntailment_best": {
"acc": 75
},
"Sentiment Analysis": {
"acc": 60.16
},
"Sentiment Analysis_best": {
"acc": 70.62
},
"Hate Speech": {
"acc": 60.48
},
"Hate Speech_best": {
"acc": 62.97
},
"Admission Test": {
"acc": 38.53
},
"Admission Test_best": {
"acc": 57.8
},
"Word in Context": {
"acc": 45.95
},
"Word in Context_best": {
"acc": 66.19
},
"FAQ": {
"acc": 38.03
},
"FAQ_best": {
"acc": 66.33
},
"Lexical Substitution": {
"acc": 11.62
},
"Lexical Substitution_best": {
"acc": 15.92
},
"Summarization": {
"acc": 18.85
},
"Summarization_best": {
"acc": 19.21
},
"NER": {
"acc": 34.71
},
"NER_best": {
"acc": 39
},
"REL": {
"acc": 7.52
},
"REL_best": {
"acc": 9.57
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "FairMind/Llama-3-8B-4bit-UltraChat-Ita",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 58.08
},
"TextualEntailment_best": {
"acc": 64.25
},
"Sentiment Analysis": {
"acc": 46.01
},
"Sentiment Analysis_best": {
"acc": 67.3
},
"Hate Speech": {
"acc": 48.72
},
"Hate Speech_best": {
"acc": 63.38
},
"Admission Test": {
"acc": 33.37
},
"Admission Test_best": {
"acc": 49.2
},
"Word in Context": {
"acc": 66.03
},
"Word in Context_best": {
"acc": 66.58
},
"FAQ": {
"acc": 34.54
},
"FAQ_best": {
"acc": 55.61
},
"Lexical Substitution": {
"acc": 0
},
"Lexical Substitution_best": {
"acc": 0
},
"Summarization": {
"acc": 23.6
},
"Summarization_best": {
"acc": 24.35
},
"NER": {
"acc": 27.44
},
"NER_best": {
"acc": 31.89
},
"REL": {
"acc": 2.62
},
"REL_best": {
"acc": 4.27
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "MoxoffSpA/Volare",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 51.25
},
"TextualEntailment_best": {
"acc": 55.5
},
"Sentiment Analysis": {
"acc": 44.15
},
"Sentiment Analysis_best": {
"acc": 58.49
},
"Hate Speech": {
"acc": 43.57
},
"Hate Speech_best": {
"acc": 62.7
},
"Admission Test": {
"acc": 22.77
},
"Admission Test_best": {
"acc": 27.4
},
"Word in Context": {
"acc": 27.8
},
"Word in Context_best": {
"acc": 66.4
},
"FAQ": {
"acc": 27.35
},
"FAQ_best": {
"acc": 28.18
},
"Lexical Substitution": {
"acc": 0.02
},
"Lexical Substitution_best": {
"acc": 0.04
},
"Summarization": {
"acc": 23.15
},
"Summarization_best": {
"acc": 23.32
},
"NER": {
"acc": 21.26
},
"NER_best": {
"acc": 21.57
},
"REL": {
"acc": 15.95
},
"REL_best": {
"acc": 20.37
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "arcee-ai/Llama-3.1-SuperNova-Lite",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 68.54
},
"TextualEntailment_best": {
"acc": 74.5
},
"Sentiment Analysis": {
"acc": 63.8
},
"Sentiment Analysis_best": {
"acc": 77.12
},
"Hate Speech": {
"acc": 52.21
},
"Hate Speech_best": {
"acc": 63.8
},
"Admission Test": {
"acc": 43.63
},
"Admission Test_best": {
"acc": 68.6
},
"Word in Context": {
"acc": 42.88
},
"Word in Context_best": {
"acc": 66.58
},
"FAQ": {
"acc": 51.16
},
"FAQ_best": {
"acc": 90.52
},
"Lexical Substitution": {
"acc": 22.05
},
"Lexical Substitution_best": {
"acc": 24.29
},
"Summarization": {
"acc": 22.74
},
"Summarization_best": {
"acc": 22.78
},
"NER": {
"acc": 17.72
},
"NER_best": {
"acc": 20.21
},
"REL": {
"acc": 17.82
},
"REL_best": {
"acc": 20.06
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "iGeniusAI/Italia-9B-Instruct-v0.1",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 59.33
},
"TextualEntailment_best": {
"acc": 70.25
},
"Sentiment Analysis": {
"acc": 48.41
},
"Sentiment Analysis_best": {
"acc": 59.26
},
"Hate Speech": {
"acc": 58.9
},
"Hate Speech_best": {
"acc": 63.58
},
"Admission Test": {
"acc": 26.13
},
"Admission Test_best": {
"acc": 30.4
},
"Word in Context": {
"acc": 49.92
},
"Word in Context_best": {
"acc": 66.67
},
"FAQ": {
"acc": 31.26
},
"FAQ_best": {
"acc": 38.9
},
"Lexical Substitution": {
"acc": 0
},
"Lexical Substitution_best": {
"acc": 0
},
"Summarization": {
"acc": 0
},
"Summarization_best": {
"acc": 0.01
},
"NER": {
"acc": 15.07
},
"NER_best": {
"acc": 20.82
},
"REL": {
"acc": 0
},
"REL_best": {
"acc": 0
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "ibm-granite/granite-3.1-8b-instruct",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 51.08
},
"TextualEntailment_best": {
"acc": 67
},
"Sentiment Analysis": {
"acc": 47.82
},
"Sentiment Analysis_best": {
"acc": 69.45
},
"Hate Speech": {
"acc": 39.51
},
"Hate Speech_best": {
"acc": 62.64
},
"Admission Test": {
"acc": 41.97
},
"Admission Test_best": {
"acc": 59.4
},
"Word in Context": {
"acc": 42
},
"Word in Context_best": {
"acc": 66.85
},
"FAQ": {
"acc": 51.29
},
"FAQ_best": {
"acc": 91.52
},
"Lexical Substitution": {
"acc": 0.09
},
"Lexical Substitution_best": {
"acc": 0.17
},
"Summarization": {
"acc": 30.04
},
"Summarization_best": {
"acc": 30.39
},
"NER": {
"acc": 32.05
},
"NER_best": {
"acc": 37.13
},
"REL": {
"acc": 9.18
},
"REL_best": {
"acc": 11.47
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "microsoft/Phi-3.5-mini-instruct",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 70.04
},
"TextualEntailment_best": {
"acc": 81.5
},
"Sentiment Analysis": {
"acc": 43.73
},
"Sentiment Analysis_best": {
"acc": 70.59
},
"Hate Speech": {
"acc": 64.49
},
"Hate Speech_best": {
"acc": 69.17
},
"Admission Test": {
"acc": 40.67
},
"Admission Test_best": {
"acc": 62.8
},
"Word in Context": {
"acc": 56.97
},
"Word in Context_best": {
"acc": 67.41
},
"FAQ": {
"acc": 44.22
},
"FAQ_best": {
"acc": 79.05
},
"Lexical Substitution": {
"acc": 18.29
},
"Lexical Substitution_best": {
"acc": 20.94
},
"Summarization": {
"acc": 22.7
},
"Summarization_best": {
"acc": 23.4
},
"NER": {
"acc": 60.69
},
"NER_best": {
"acc": 61.35
},
"REL": {
"acc": 14.4
},
"REL_best": {
"acc": 20.61
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "mistralai/Mistral-7B-Instruct-v0.3",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 60.83
},
"TextualEntailment_best": {
"acc": 71.25
},
"Sentiment Analysis": {
"acc": 58.71
},
"Sentiment Analysis_best": {
"acc": 66.14
},
"Hate Speech": {
"acc": 55.86
},
"Hate Speech_best": {
"acc": 65.39
},
"Admission Test": {
"acc": 39.23
},
"Admission Test_best": {
"acc": 54
},
"Word in Context": {
"acc": 61.07
},
"Word in Context_best": {
"acc": 66.49
},
"FAQ": {
"acc": 47.63
},
"FAQ_best": {
"acc": 84.79
},
"Lexical Substitution": {
"acc": 6.47
},
"Lexical Substitution_best": {
"acc": 10.26
},
"Summarization": {
"acc": 27.28
},
"Summarization_best": {
"acc": 28.43
},
"NER": {
"acc": 19.27
},
"NER_best": {
"acc": 19.46
},
"REL": {
"acc": 20.29
},
"REL_best": {
"acc": 21.83
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "occiglot/occiglot-7b-it-en-instruct",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 49.92
},
"TextualEntailment_best": {
"acc": 56
},
"Sentiment Analysis": {
"acc": 51.87
},
"Sentiment Analysis_best": {
"acc": 61.09
},
"Hate Speech": {
"acc": 48.49
},
"Hate Speech_best": {
"acc": 62.72
},
"Admission Test": {
"acc": 42.8
},
"Admission Test_best": {
"acc": 54
},
"Word in Context": {
"acc": 42.86
},
"Word in Context_best": {
"acc": 66.49
},
"FAQ": {
"acc": 42.89
},
"FAQ_best": {
"acc": 83.29
},
"Lexical Substitution": {
"acc": 1.55
},
"Lexical Substitution_best": {
"acc": 2.88
},
"Summarization": {
"acc": 25.5
},
"Summarization_best": {
"acc": 28.56
},
"NER": {
"acc": 10.53
},
"NER_best": {
"acc": 14.15
},
"REL": {
"acc": 32.03
},
"REL_best": {
"acc": 32.45
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "sapienzanlp/Minerva-7B-instruct-v1.0",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 54.13
},
"TextualEntailment_best": {
"acc": 56.5
},
"Sentiment Analysis": {
"acc": 44.52
},
"Sentiment Analysis_best": {
"acc": 59.46
},
"Hate Speech": {
"acc": 39.23
},
"Hate Speech_best": {
"acc": 60.48
},
"Admission Test": {
"acc": 28.87
},
"Admission Test_best": {
"acc": 34
},
"Word in Context": {
"acc": 53.41
},
"Word in Context_best": {
"acc": 66.04
},
"FAQ": {
"acc": 31.05
},
"FAQ_best": {
"acc": 37.66
},
"Lexical Substitution": {
"acc": 0
},
"Lexical Substitution_best": {
"acc": 0
},
"Summarization": {
"acc": 16.22
},
"Summarization_best": {
"acc": 16.36
},
"NER": {
"acc": 18.31
},
"NER_best": {
"acc": 19.31
},
"REL": {
"acc": 17.75
},
"REL_best": {
"acc": 21.38
}
} | null |
{
"model_dtype": "bfloat16",
"model_name": "swap-uniba/LLaMAntino-3-ANITA-8B-Inst-DPO-ITA",
"model_sha": "main"
} | {
"TextualEntailment": {
"acc": 58.92
},
"TextualEntailment_best": {
"acc": 69.1
},
"Sentiment Analysis": {
"acc": 60.95
},
"Sentiment Analysis_best": {
"acc": 72.04
},
"Hate Speech": {
"acc": 39.59
},
"Hate Speech_best": {
"acc": 66.32
},
"Admission Test": {
"acc": 40.73
},
"Admission Test_best": {
"acc": 62.2
},
"Word in Context": {
"acc": 52.6
},
"Word in Context_best": {
"acc": 66.57
},
"FAQ": {
"acc": 42.85
},
"FAQ_best": {
"acc": 71.82
},
"Lexical Substitution": {
"acc": 19.35
},
"Lexical Substitution_best": {
"acc": 19.37
},
"Summarization": {
"acc": 22.37
},
"Summarization_best": {
"acc": 22.71
},
"NER": {
"acc": 38.04
},
"NER_best": {
"acc": 47.57
},
"REL": {
"acc": 17.81
},
"REL_best": {
"acc": 21.61
}
} | null |
README.md exists but content is empty.
- Downloads last month
- 97