{ | |
"paper": { | |
"title": "Does Math Reasoning Improve General LLM Capabilities? Understanding Transferability of LLM Reasoning", | |
"arxiv_id": "2507.00432", | |
"arxiv_url": "https://arxiv.org/abs/2507.00432", | |
"abstract": "Math reasoning has become the poster child of progress in large language models (LLMs), with new models rapidly surpassing human-level performance on benchmarks like MATH and AIME. But as math leaderboards improve week by week, it is worth asking: do these gains reflect broader problem-solving ability or just narrow overfitting?" | |
}, | |
"model": { | |
"name": "UniReason-Qwen3-14B-think-SFT", | |
"base_model": "Qwen3-14B-Base", | |
"training_method": "Distill from Qwen3-32B-Instruct (thinking mode) through Reject Sampling", | |
"task_focus": "math-reasoning", | |
"upload_date": "2025-07-05T00:53:17.090139" | |
}, | |
"repository": { | |
"repo_name": "ReasoningTransferability/UniReason-Qwen3-14B-think-SFT", | |
"huggingface_url": "https://huggingface.co/ReasoningTransferability/UniReason-Qwen3-14B-think-SFT" | |
} | |
} |