vlmfinegrained
/
outputs_vlmeval
/molmo-qwen2-clip-pt
/molmo-qwen2-clip-pt_MMBench_DEV_EN_V11_acc.csv
"split","Overall","AR","CP","FP-C","FP-S","LR","RR","action_recognition","attribute_comparison","attribute_recognition","celebrity_recognition","function_reasoning","future_prediction","identity_reasoning","image_emotion","image_quality","image_scene","image_style","image_topic","nature_relation","object_localization","ocr","physical_property_reasoning","physical_relation","social_relation","spatial_relationship","structuralized_imagetext_understanding" | |
"dev","0.1540247678018576","0.16463414634146342","0.143646408839779","0.061452513966480445","0.2768166089965398","0.04838709677419355","0.13218390804597702","0.0","0.17647058823529413","0.35","0.36363636363636365","0.1","0.04","0.23529411764705882","0.43333333333333335","0.011904761904761904","0.13541666666666666","0.11290322580645161","0.08333333333333333","0.0967741935483871","0.1","0.26666666666666666","0.16981132075471697","0.26","0.06451612903225806","0.04","0.05405405405405406" | |