Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
| import os | |
| import torch | |
| from huggingface_hub import HfApi | |
| # replace this with our token | |
| TOKEN = os.environ.get("HF_TOKEN", None) | |
| OWNER = "vectara" | |
| REPO_ID = f"{OWNER}/leaderboard" | |
| QUEUE_REPO = f"{OWNER}/requests" | |
| RESULTS_REPO = f"{OWNER}/results" | |
| LEADERBOARD_DATASET_REPO = f"{OWNER}/leaderboard_results" | |
| CACHE_PATH=os.getenv("HF_HOME", ".") | |
| # Local caches | |
| EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue") | |
| EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results") | |
| EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk") | |
| EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk") | |
| DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #"cpu" | |
| API = HfApi(token=TOKEN) | |
| LEADERBOARD_DATASET_PATH = "leaderboard_results/leaderboard_summaries.csv" | |
| DATASET_PATH = "src/datasets/leaderboard_dataset.csv" | |
| SAMPLE_DATASET_PATH = "src/datasets/sample_dataset.csv" | |
| HEM_PATH = 'vectara/HHEM-2.1' | |
| SYSTEM_PROMPT = "You are a chat bot answering questions using data. You must stick to the answers provided solely by the text in the passage provided." | |
| USER_PROMPT = "You are asked the question 'Provide a concise summary of the following passage, covering the core pieces of information described': " | |