Spaces:
Runtime error
Runtime error
ffreemt
commited on
Commit
·
fa82d81
1
Parent(s):
42dc8e4
Update clean up and set 70b branch for forindo
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ import os
|
|
| 5 |
import platform
|
| 6 |
import random
|
| 7 |
import time
|
| 8 |
-
from dataclasses import asdict, dataclass
|
| 9 |
from pathlib import Path
|
| 10 |
|
| 11 |
# from types import SimpleNamespace
|
|
@@ -16,99 +16,19 @@ from ctransformers import AutoModelForCausalLM
|
|
| 16 |
from dl_hf_model import dl_hf_model
|
| 17 |
from loguru import logger
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
"
|
| 22 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_M.bin",
|
| 23 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_S.bin",
|
| 24 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin",
|
| 25 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin",
|
| 26 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin",
|
| 27 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_S.bin",
|
| 28 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_0.bin",
|
| 29 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin",
|
| 30 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_M.bin",
|
| 31 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_S.bin",
|
| 32 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q6_K.bin",
|
| 33 |
-
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q8_0.bin",
|
| 34 |
-
]
|
| 35 |
-
|
| 36 |
-
URL = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin" # 4.05G
|
| 37 |
-
|
| 38 |
-
url = "https://huggingface.co/savvamadar/ggml-gpt4all-j-v1.3-groovy/blob/main/ggml-gpt4all-j-v1.3-groovy.bin"
|
| 39 |
-
url = "https://huggingface.co/TheBloke/Llama-2-13B-GGML/blob/main/llama-2-13b.ggmlv3.q4_K_S.bin" # 7.37G
|
| 40 |
-
# url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin"
|
| 41 |
-
url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin" # 6.93G
|
| 42 |
-
# url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.binhttps://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_K_M.bin" # 7.87G
|
| 43 |
-
|
| 44 |
-
url = "https://huggingface.co/localmodels/Llama-2-13B-Chat-ggml/blob/main/llama-2-13b-chat.ggmlv3.q4_K_S.bin" # 7.37G
|
| 45 |
-
|
| 46 |
-
_ = (
|
| 47 |
-
"golay" in platform.node()
|
| 48 |
-
or "okteto" in platform.node()
|
| 49 |
-
or Path("/kaggle").exists()
|
| 50 |
-
# or psutil.cpu_count(logical=False) < 4
|
| 51 |
-
or 1 # run 7b in hf
|
| 52 |
-
)
|
| 53 |
-
|
| 54 |
-
if _:
|
| 55 |
-
# url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q2_K.bin"
|
| 56 |
-
url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q2_K.bin" # 2.87G
|
| 57 |
-
url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin" # 2.87G
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
| 61 |
-
|
| 62 |
-
### Instruction: {user_prompt}
|
| 63 |
-
|
| 64 |
-
### Response:
|
| 65 |
-
"""
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
the answer to a question, please don't share false
|
| 77 |
-
information.
|
| 78 |
-
User: {prompt}
|
| 79 |
-
Assistant: """
|
| 80 |
-
|
| 81 |
-
prompt_template = """System: You are a helpful assistant.
|
| 82 |
-
User: {prompt}
|
| 83 |
-
Assistant: """
|
| 84 |
-
|
| 85 |
-
prompt_template = """Question: {question}
|
| 86 |
-
Answer: Let's work this out in a step by step way to be sure we have the right answer."""
|
| 87 |
-
|
| 88 |
-
prompt_template = """[INST] <>
|
| 89 |
-
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step.
|
| 90 |
-
<>
|
| 91 |
-
|
| 92 |
-
What NFL team won the Super Bowl in the year Justin Bieber was born?
|
| 93 |
-
[/INST]"""
|
| 94 |
-
|
| 95 |
-
prompt_template = """[INST] <<SYS>>
|
| 96 |
-
You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <</SYS>>
|
| 97 |
-
|
| 98 |
-
{question} [/INST]
|
| 99 |
-
"""
|
| 100 |
-
|
| 101 |
-
prompt_template = """[INST] <<SYS>>
|
| 102 |
-
You are a helpful assistant.
|
| 103 |
-
<</SYS>>
|
| 104 |
-
|
| 105 |
-
{question} [/INST]
|
| 106 |
-
"""
|
| 107 |
-
|
| 108 |
-
_ = [elm for elm in prompt_template.splitlines() if elm.strip()]
|
| 109 |
-
stop_string = [elm.split(":")[0] + ":" for elm in _][-2]
|
| 110 |
-
|
| 111 |
-
logger.debug(f"{stop_string=}")
|
| 112 |
|
| 113 |
_ = psutil.cpu_count(logical=False) - 1
|
| 114 |
cpu_count: int = int(_) if _ else 1
|
|
@@ -125,7 +45,7 @@ except Exception as exc_:
|
|
| 125 |
LLM = AutoModelForCausalLM.from_pretrained(
|
| 126 |
model_loc,
|
| 127 |
model_type="llama",
|
| 128 |
-
|
| 129 |
)
|
| 130 |
|
| 131 |
logger.info(f"done load llm {model_loc=} {file_size=}G")
|
|
@@ -137,12 +57,6 @@ except Exception:
|
|
| 137 |
# Windows
|
| 138 |
logger.warning("Windows, cant run time.tzset()")
|
| 139 |
|
| 140 |
-
_ = """
|
| 141 |
-
ns = SimpleNamespace(
|
| 142 |
-
response="",
|
| 143 |
-
generator=(_ for _ in []),
|
| 144 |
-
)
|
| 145 |
-
# """
|
| 146 |
|
| 147 |
@dataclass
|
| 148 |
class GenerationConfig:
|
|
@@ -154,8 +68,8 @@ class GenerationConfig:
|
|
| 154 |
seed: int = 42
|
| 155 |
reset: bool = False
|
| 156 |
stream: bool = True
|
| 157 |
-
|
| 158 |
-
|
| 159 |
|
| 160 |
|
| 161 |
def generate(
|
|
@@ -237,7 +151,7 @@ def bot(history):
|
|
| 237 |
f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
|
| 238 |
)
|
| 239 |
|
| 240 |
-
history[-1][1] = "".join(response)
|
| 241 |
yield history
|
| 242 |
|
| 243 |
|
|
@@ -292,7 +206,9 @@ examples_list = [
|
|
| 292 |
"What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."
|
| 293 |
],
|
| 294 |
["How to pick a lock? Provide detailed steps."],
|
| 295 |
-
[
|
|
|
|
|
|
|
| 296 |
["is infinity + 1 bigger than infinity?"],
|
| 297 |
["Explain the plot of Cinderella in a sentence."],
|
| 298 |
[
|
|
@@ -332,9 +248,6 @@ with gr.Blocks(
|
|
| 332 |
) as block:
|
| 333 |
# buff_var = gr.State("")
|
| 334 |
with gr.Accordion("🎈 Info", open=False):
|
| 335 |
-
# gr.HTML(
|
| 336 |
-
# """<center><a href="https://huggingface.co/spaces/mikeee/mpt-30b-chat?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate"></a> and spin a CPU UPGRADE to avoid the queue</center>"""
|
| 337 |
-
# )
|
| 338 |
gr.Markdown(
|
| 339 |
f"""<h5><center>{Path(model_loc).name}</center></h4>
|
| 340 |
Most examples are meant for another model.
|
|
@@ -457,7 +370,6 @@ else:
|
|
| 457 |
# concurrency_count = max(int(16 / file_size) - 1, 1)
|
| 458 |
# """
|
| 459 |
|
| 460 |
-
concurrency_count = 1
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
|
|
|
|
| 5 |
import platform
|
| 6 |
import random
|
| 7 |
import time
|
| 8 |
+
from dataclasses import asdict, dataclass, field
|
| 9 |
from pathlib import Path
|
| 10 |
|
| 11 |
# from types import SimpleNamespace
|
|
|
|
| 16 |
from dl_hf_model import dl_hf_model
|
| 17 |
from loguru import logger
|
| 18 |
|
| 19 |
+
url = "https://huggingface.co/TheBloke/llama-2-13B-Guanaco-QLoRA-GGML/blob/main/llama-2-13b-guanaco-qlora.ggmlv3.q4_K_S.bin" # 8.14G
|
| 20 |
+
if "forindo" in platform.node():
|
| 21 |
+
url = "https://huggingface.co/TheBloke/llama-2-70b-Guanaco-QLoRA-GGML/blob/main/llama-2-70b-guanaco-qlora.ggmlv3.q3_K_S.bin" # 29.7G
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
+
# Prompt template: Guanaco
|
| 24 |
+
prompt_template = """You are a helpful assistant. Let's think step by step.
|
| 25 |
+
{history}
|
| 26 |
+
### Human:
|
| 27 |
+
{input}
|
| 28 |
+
### Assistant:"""
|
| 29 |
+
human_prefix = "### Human"
|
| 30 |
+
ai_prefix = "### Assistant"
|
| 31 |
+
stop_list = [f"{human_prefix}:"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
_ = psutil.cpu_count(logical=False) - 1
|
| 34 |
cpu_count: int = int(_) if _ else 1
|
|
|
|
| 45 |
LLM = AutoModelForCausalLM.from_pretrained(
|
| 46 |
model_loc,
|
| 47 |
model_type="llama",
|
| 48 |
+
threads=cpu_count,
|
| 49 |
)
|
| 50 |
|
| 51 |
logger.info(f"done load llm {model_loc=} {file_size=}G")
|
|
|
|
| 57 |
# Windows
|
| 58 |
logger.warning("Windows, cant run time.tzset()")
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
@dataclass
|
| 62 |
class GenerationConfig:
|
|
|
|
| 68 |
seed: int = 42
|
| 69 |
reset: bool = False
|
| 70 |
stream: bool = True
|
| 71 |
+
threads: int = cpu_count
|
| 72 |
+
stop: list[str] = field(default_factory=lambda: stop_list)
|
| 73 |
|
| 74 |
|
| 75 |
def generate(
|
|
|
|
| 151 |
f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
|
| 152 |
)
|
| 153 |
|
| 154 |
+
history[-1][1] = "".join(response) + f"\n{_}"
|
| 155 |
yield history
|
| 156 |
|
| 157 |
|
|
|
|
| 206 |
"What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."
|
| 207 |
],
|
| 208 |
["How to pick a lock? Provide detailed steps."],
|
| 209 |
+
[
|
| 210 |
+
"If it takes 10 hours to dry 10 clothes, assuming all the clothes are hanged together at the same time for drying , then how long will it take to dry a cloth?"
|
| 211 |
+
],
|
| 212 |
["is infinity + 1 bigger than infinity?"],
|
| 213 |
["Explain the plot of Cinderella in a sentence."],
|
| 214 |
[
|
|
|
|
| 248 |
) as block:
|
| 249 |
# buff_var = gr.State("")
|
| 250 |
with gr.Accordion("🎈 Info", open=False):
|
|
|
|
|
|
|
|
|
|
| 251 |
gr.Markdown(
|
| 252 |
f"""<h5><center>{Path(model_loc).name}</center></h4>
|
| 253 |
Most examples are meant for another model.
|
|
|
|
| 370 |
# concurrency_count = max(int(16 / file_size) - 1, 1)
|
| 371 |
# """
|
| 372 |
|
| 373 |
+
# default concurrency_count = 1
|
| 374 |
+
# block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
|
| 375 |
+
block.queue(max_size=5).launch(debug=True, server_name="0.0.0.0")
|
|
|