Sébastien De Greef commited on
Commit
4a4f086
·
1 Parent(s): 7cbd81a

change the app to a chatbot

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. main.py +12 -26
  3. requirements.txt +2 -2
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .venv
main.py CHANGED
@@ -1,30 +1,16 @@
 
1
  import gradio as gr
2
- import torch
3
- import requests
4
- from torchvision import transforms
5
 
6
- model = torch.hub.load("pytorch/vision:v0.6.0", "resnet18", pretrained=True).eval()
7
- response = requests.get("https://git.io/JJkYN")
8
- labels = response.text.split("\n")
9
 
 
 
 
 
 
 
 
 
10
 
11
- def predict(inp):
12
- inp = transforms.ToTensor()(inp).unsqueeze(0)
13
- with torch.no_grad():
14
- prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
15
- confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
16
- return confidences
17
-
18
-
19
- def run():
20
- demo = gr.Interface(
21
- fn=predict,
22
- inputs=gr.Image(type="pil"),
23
- outputs=gr.Label(num_top_classes=3),
24
- )
25
-
26
- demo.launch(server_name="0.0.0.0", server_port=7860)
27
-
28
-
29
- if __name__ == "__main__":
30
- run()
 
1
+ from langchain.schema import AIMessage, HumanMessage
2
  import gradio as gr
3
+ from langchain_community.llms import Ollama
 
 
4
 
5
+ llm = Ollama(model="mistral:7b", timeout=1000)
 
 
6
 
7
+ def predict(message, history):
8
+ history_langchain_format = []
9
+ for human, ai in history:
10
+ history_langchain_format.append(HumanMessage(content=human))
11
+ history_langchain_format.append(AIMessage(content=ai))
12
+ history_langchain_format.append(HumanMessage(content=message))
13
+ gpt_response = llm.invoke(history_langchain_format)
14
+ return gpt_response
15
 
16
+ gr.ChatInterface(predict).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
  gradio
2
- torch
3
- torchvision
4
  requests
 
1
  gradio
2
+ langchain
3
+ langchain-community
4
  requests