Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -12,29 +12,12 @@ DEFAULT_MAX_NEW_TOKENS = 1024
|
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
-
#
|
16 |
-
|
17 |
-
This Space demonstrates model [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta, a Llama 2 model with 13B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
|
18 |
-
|
19 |
-
π For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
|
20 |
-
|
21 |
-
π¨ Looking for an even more powerful model? Check out the large [**70B** model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
|
22 |
-
π For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
|
23 |
-
|
24 |
-
"""
|
25 |
-
|
26 |
-
LICENSE = """
|
27 |
-
<p/>
|
28 |
-
|
29 |
-
---
|
30 |
-
As a derivate work of [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta,
|
31 |
-
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/USE_POLICY.md).
|
32 |
"""
|
33 |
|
34 |
if not torch.cuda.is_available():
|
35 |
DESCRIPTION += "\n<p>Running on CPU π₯Ά This demo does not work on CPU.</p>"
|
36 |
|
37 |
-
|
38 |
if torch.cuda.is_available():
|
39 |
model_id = "appvoid/palmer-003"
|
40 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=False)
|
@@ -139,9 +122,7 @@ chat_interface = gr.ChatInterface(
|
|
139 |
|
140 |
with gr.Blocks(css="style.css") as demo:
|
141 |
gr.Markdown(DESCRIPTION)
|
142 |
-
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
|
143 |
chat_interface.render()
|
144 |
-
gr.Markdown(LICENSE)
|
145 |
|
146 |
if __name__ == "__main__":
|
147 |
demo.queue(max_size=20).launch()
|
|
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
+
# palmer-003
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"""
|
17 |
|
18 |
if not torch.cuda.is_available():
|
19 |
DESCRIPTION += "\n<p>Running on CPU π₯Ά This demo does not work on CPU.</p>"
|
20 |
|
|
|
21 |
if torch.cuda.is_available():
|
22 |
model_id = "appvoid/palmer-003"
|
23 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=False)
|
|
|
122 |
|
123 |
with gr.Blocks(css="style.css") as demo:
|
124 |
gr.Markdown(DESCRIPTION)
|
|
|
125 |
chat_interface.render()
|
|
|
126 |
|
127 |
if __name__ == "__main__":
|
128 |
demo.queue(max_size=20).launch()
|