Spaces:
Running
Running
File size: 2,805 Bytes
a2586ce 13834dd a2586ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
DND_HEADER = """
<style>
.header-gradient {
top: 40%;
bottom: 40%;
padding: 10px 0px;
font-weight: bold;
font-size: 40px;
font-family: Inter, Arial, Helvetica, sans-serif;
background: linear-gradient(to right, #67a102, #c0dc90);
-webkit-text-fill-color: transparent;
-webkit-background-clip: text;
}
.header-normal {
top: 40%;
bottom: 40%;
padding: 10px 0px;
font-weight: bold;
font-size: 40px;
font-family: Inter, Arial, Helvetica, sans-serif;
}
</style>
<div align="center">
<span class="header-gradient"> Drag-and-Drop LLMs: Zero-Shot Prompt-to-Weights </span>
</div>
<p align="center">
| <a href=""><b>Documentation</b></a> | <a href=""><b>Github</b></a> | <a href="https://arxiv.org/abs/2506.16406"><b>Paper </b> </a> | <a href="https://x.com/VictorKaiWang1/status/1935905121659240513"><b>Twitter/X</b> </a> |
</p>"""
DND_INTRODUCTION = """
π Welcome to the Drag-and-Drop LLMs: Zero-Shot Prompt-to-Weights!
> Drag-and-Drop LLMs: Zero-Shot Prompt-to-Weights is a zero-shot prompt-to-weights model that can generate a model from a prompt.
- **Zero-Shot**: Drag-and-Drop LLMs: Zero-Shot Prompt-to-Weights can generate a model from a prompt without any training data.
- **Prompt-to-Weights**: Drag-and-Drop LLMs: Zero-Shot Prompt-to-Weights can generate a model from a prompt.
- **Easy-to-use**: Drag-and-Drop LLMs: Zero-Shot Prompt-to-Weights provides a unified interface for prompt-to-weights model generation.
"""
TASK_LIST = ["π§ Commonsense Reasoning", "π’ Math", "π» Coding"]
TASK_DATASET_LIST = {
"π§ Commonsense Reasoning": ["ARC-c", "OBQA"],
"π’ Math": ["GSM-8K"],
"π» Coding": ["HumanEval"],
}
EXAMPLE_LIST = ["Example 1", "Example 2", "Example 3", "Example 4", "Example 5"]
TASK_PATH_MAPPING = {
"π§ Commonsense Reasoning": "common",
"π’ Math": "math",
"π» Coding": "coding",
}
DATASET_PATH_MAPPING = {
"ARC-c": "arc_c",
"OBQA": "obqa",
"GSM-8K": "gsm8k",
"HumanEval": "humaneval",
}
EXAMPLE_PATH_MAPPING = {
"Example 1": "1",
"Example 2": "2",
"Example 3": "3",
"Example 4": "4",
"Example 5": "5",
}
COLUMN_NAMES = ["Prompt", "Pass@1", "Pass@5", "Pass@10", "Correctness"]
DATA_TITLE_TYPE = ['markdown', 'number', 'number', 'number', 'markdown']
CITATION_BUTTON_LABEL = "NOTE: We only use preloaded samples. Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
@article{liang2025drag,
title={Drag-and-Drop LLMs: Zero-Shot Prompt-to-Weights},
author={Liang, Zhiyuan and Tang, Dongwen and Zhou, Yuhao and Zhao, Xuanlei and Shi, Mingjia and Zhao, Wangbo and Li, Zekai and Wang, Peihao and Sch{\"u}rholt, Konstantin and Borth, Damian and others},
journal={arXiv preprint arXiv:2506.16406},
year={2025}
}
"""
|