AndreaAlessandrelli4 commited on
Commit
66bc4cd
·
verified ·
1 Parent(s): 9a02cf3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -15
app.py CHANGED
@@ -70,7 +70,7 @@ def generate(
70
  outputs.append(text)
71
  yield "".join(outputs)
72
 
73
- image_path = "AvvoVhat.png"
74
 
75
  with gr.Blocks() as demo:
76
  gr.Markdown("# AvvoChat")
@@ -82,10 +82,60 @@ with gr.Blocks() as demo:
82
 
83
  with gr.Row():
84
  with gr.Column(scale=0.5, min_width=100):
85
- gr.Image(image_path, width=50, height=200)
86
  with gr.Column(scale=6):
87
  chatbox = gr.Chatbot(label="AvvoChat")
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  def on_send_message(message, chat_history, max_new_tokens, temperature, top_p, top_k, do_sample, repetition_penalty):
90
  response_stream = generate(
91
  message=message,
@@ -109,12 +159,7 @@ with gr.Blocks() as demo:
109
  inputs=[
110
  msg,
111
  chatbox,
112
- gr.State(value=DEFAULT_MAX_NEW_TOKENS),
113
- gr.State(value=0.6),
114
- gr.State(value=0.9),
115
- gr.State(value=50),
116
- gr.State(value=False),
117
- gr.State(value=1.2)
118
  ],
119
  outputs=[chatbox, msg],
120
  )
@@ -123,15 +168,13 @@ with gr.Blocks() as demo:
123
  inputs=[
124
  msg,
125
  chatbox,
126
- gr.State(value=DEFAULT_MAX_NEW_TOKENS),
127
- gr.State(value=0.6),
128
- gr.State(value=0.9),
129
- gr.State(value=50),
130
- gr.State(value=False),
131
- gr.State(value=1.2)
132
  ],
133
  outputs=[chatbox, msg],
134
  )
135
 
 
 
 
136
  if __name__ == "__main__":
137
- demo.launch(share=True)
 
70
  outputs.append(text)
71
  yield "".join(outputs)
72
 
73
+ image_path = "/home/a.alessandrelli/LLM_Dante/Raccolta_doc_index/AvvoVhat.png"
74
 
75
  with gr.Blocks() as demo:
76
  gr.Markdown("# AvvoChat")
 
82
 
83
  with gr.Row():
84
  with gr.Column(scale=0.5, min_width=100):
85
+ gr.Image(image_path, label="AvvoChat Logo", width=50, height=200)
86
  with gr.Column(scale=6):
87
  chatbox = gr.Chatbot(label="AvvoChat")
88
 
89
+ additional_inputs = [
90
+ gr.Slider(
91
+ label="Max new tokens",
92
+ minimum=1,
93
+ maximum=MAX_MAX_NEW_TOKENS,
94
+ step=1,
95
+ value=DEFAULT_MAX_NEW_TOKENS,
96
+ ),
97
+ gr.Slider(
98
+ label="Temperature",
99
+ minimum=0.1,
100
+ maximum=4.0,
101
+ step=0.1,
102
+ value=0.6,
103
+ ),
104
+ gr.Slider(
105
+ label="Top-p (nucleus sampling)",
106
+ minimum=0.05,
107
+ maximum=1.0,
108
+ step=0.05,
109
+ value=0.9,
110
+ ),
111
+ gr.Slider(
112
+ label="Top-k",
113
+ minimum=1,
114
+ maximum=1000,
115
+ step=1,
116
+ value=50,
117
+ ),
118
+ gr.Checkbox(
119
+ label="Do-sample (False)",
120
+ value=False,
121
+ ),
122
+ gr.Slider(
123
+ label="Repetition penalty",
124
+ minimum=1.0,
125
+ maximum=2.0,
126
+ step=0.05,
127
+ value=1.2,
128
+ ),
129
+ ]
130
+
131
+ examples = [
132
+ ["Posso fare un barbecue sul balcone di casa?"],
133
+ ["Posso essere multato se esco di casa senza documento d'identità?"],
134
+ ["Una persona single può adottare un bambino?"],
135
+ ["Posso usare un immagine creada con l'intelligenza artificiale?"],
136
+ ["Se il mio pallone da calcio cade in un giardino di un'abitazione privata, poss scavalcare il concello per riprendermelo?"],
137
+ ]
138
+
139
  def on_send_message(message, chat_history, max_new_tokens, temperature, top_p, top_k, do_sample, repetition_penalty):
140
  response_stream = generate(
141
  message=message,
 
159
  inputs=[
160
  msg,
161
  chatbox,
162
+ *additional_inputs
 
 
 
 
 
163
  ],
164
  outputs=[chatbox, msg],
165
  )
 
168
  inputs=[
169
  msg,
170
  chatbox,
171
+ *additional_inputs
 
 
 
 
 
172
  ],
173
  outputs=[chatbox, msg],
174
  )
175
 
176
+ for example in examples:
177
+ gr.Examples(examples=[example], inputs=msg)
178
+
179
  if __name__ == "__main__":
180
+ demo.queue(max_size=20).launch(share=True)