ThomasFfefefef commited on
Commit
2f28407
·
1 Parent(s): 5236c4f

Removed top_p

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -6,7 +6,15 @@ import requests
6
  # Template
7
  title = "A conversation with Gandalf (GPTJ-6B) 🧙"
8
  description = ""
9
- article = """<img src='http://www.simoninithomas.com/test/gandalf.jpg', alt="Gandalf"/>"""
 
 
 
 
 
 
 
 
10
  theme="huggingface"
11
  examples = [[0.9, 1.1, 50, "Hey Gandalf! How are you?"], [0.9, 1.1, 50, "Hey Gandalf, why you didn't use the great eagles to fly Frodo to Mordor?"]]
12
 
@@ -49,7 +57,6 @@ def chat(top_p, temperature, max_new_tokens, message):
49
  json_ = {"inputs": prompt,
50
  "parameters":
51
  {
52
- "top_p": top_p,
53
  "temperature": temperature,
54
  "max_new_tokens": max_new_tokens,
55
  "return_full_text": False
@@ -71,7 +78,7 @@ def chat(top_p, temperature, max_new_tokens, message):
71
  iface = gr.Interface(
72
  chat,
73
  [
74
- gr.inputs.Slider(minimum=0.5, maximum=1, step=0.05, default=0.9, label="top_p"),
75
  gr.inputs.Slider(minimum=0.5, maximum=1.5, step=0.1, default=1.1, label="temperature"),
76
  gr.inputs.Slider(minimum=20, maximum=250, step=10, default=50, label="max_new_tokens"),
77
  "text",
 
6
  # Template
7
  title = "A conversation with Gandalf (GPTJ-6B) 🧙"
8
  description = ""
9
+ article = """
10
+ <p> To reset you need to reload the page </p>
11
+ <h2> Parameters: </h2>
12
+ <ul>
13
+ <li>temperature: (sampling temperature) higher values means the model will take more risks.</li>
14
+ <li>max_new_tokens: Max number of tokens in generation.</li>
15
+ </ul>
16
+
17
+ <img src='http://www.simoninithomas.com/test/gandalf.jpg', alt="Gandalf"/>"""
18
  theme="huggingface"
19
  examples = [[0.9, 1.1, 50, "Hey Gandalf! How are you?"], [0.9, 1.1, 50, "Hey Gandalf, why you didn't use the great eagles to fly Frodo to Mordor?"]]
20
 
 
57
  json_ = {"inputs": prompt,
58
  "parameters":
59
  {
 
60
  "temperature": temperature,
61
  "max_new_tokens": max_new_tokens,
62
  "return_full_text": False
 
78
  iface = gr.Interface(
79
  chat,
80
  [
81
+ #gr.inputs.Slider(minimum=0.5, maximum=1, step=0.05, default=0.9, label="top_p"),
82
  gr.inputs.Slider(minimum=0.5, maximum=1.5, step=0.1, default=1.1, label="temperature"),
83
  gr.inputs.Slider(minimum=20, maximum=250, step=10, default=50, label="max_new_tokens"),
84
  "text",