Spaces:
Sleeping
Sleeping
from langchain.schema import AIMessage, HumanMessage | |
import gradio as gr | |
from langchain_community.llms import Ollama | |
import time | |
def parse_model_names(path): | |
"""Parses the model file to extract value-label pairs for the dropdown.""" | |
choices = [] | |
with open(path, 'r') as file: | |
lines = file.readlines() | |
for line in lines: | |
if '#' in line: | |
value, description = line.split('#', 1) | |
value = value.strip() | |
description = description.strip() | |
choices.append((description, value)) | |
return choices | |
models = parse_model_names("models.txt") | |
def predict(message, history, model): | |
print("Predicting", message, history, model), | |
llm = Ollama(model=models[model][1], timeout=1000) # Instantiate Ollama with the selected model | |
history_langchain_format = [] | |
for m in message: | |
history_langchain_format.append(HumanMessage(content=m[0])) | |
if m[1] is not None: | |
history_langchain_format.append(AIMessage(content=m[1])) | |
try: | |
chat_response = llm.invoke(history_langchain_format) | |
except Exception as e: # Use a general exception handler here | |
chat_response = "Error: " + str(e) | |
return [(chat_response, )] | |
# with gr.Blocks(fill_height=True) as demo: | |
# with gr.Row(): | |
# def update_model(selected_model): | |
# print("Model selected", selected_model) | |
# model_state.value = selected_model | |
# return selected_model | |
# chat = gr.ChatInterface(predict, | |
# additional_inputs=[ model_dropdown ], | |
# ) | |
def print_like_dislike(x: gr.LikeData): | |
print(x.index, x.value, x.liked) | |
def add_message(history, message): | |
for x in message["files"]: | |
history.append(((x,), None)) | |
if message["text"] is not None: | |
history.append((message["text"], None)) | |
return history, gr.MultimodalTextbox(value=None, interactive=False) | |
with gr.Blocks() as demo: | |
model_dropdown = gr.Dropdown(label="Select LLM Model", choices=models, info="Select the model you want to chat with", type="index") | |
model_state = gr.State(value=model_dropdown.value) | |
chatbot = gr.Chatbot( | |
[], | |
elem_id="chatbot", | |
bubble_full_width=False | |
) | |
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False) | |
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input]) | |
bot_msg = chat_msg.then(predict, [chatbot, chat_input, model_dropdown], chatbot, api_name="bot_response") | |
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input]) | |
chatbot.like(print_like_dislike, None, None) | |
demo.queue() | |
if __name__ == "__main__": | |
demo.launch(server_name="0.0.0.0", server_port=7860) |