gneya commited on
Commit
58deadd
·
verified ·
1 Parent(s): e322dc3

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +106 -0
  2. requirements.txt +12 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from utils import final_function
3
+
4
+ qa, memory = final_function()
5
+ import streamlit as st
6
+ import io
7
+ import re
8
+ import sys
9
+ from typing import Any, Callable
10
+
11
+ def capture_and_display_output(func: Callable[..., Any], args, **kwargs) -> Any:
12
+ # Capture the standard output
13
+ original_stdout = sys.stdout
14
+ sys.stdout = output_catcher = io.StringIO()
15
+
16
+ # Run the given function and capture its output
17
+ response = func(args, **kwargs)
18
+
19
+ # Reset the standard output to its original value
20
+ sys.stdout = original_stdout
21
+
22
+ # Clean the captured output
23
+ output_text = output_catcher.getvalue()
24
+ clean_text = re.sub(r"\x1b[.?[@-~]", "", output_text)
25
+
26
+ # Custom CSS for the response box
27
+ st.markdown("""
28
+ <style>
29
+ .response-value {
30
+ border: 2px solid #6c757d;
31
+ border-radius: 5px;
32
+ padding: 20px;
33
+ background-color: #f8f9fa;
34
+ color: #3d3d3d;
35
+ font-size: 20px; # Change this value to adjust the text size
36
+ font-family: monospace;
37
+ }
38
+ </style>
39
+ """, unsafe_allow_html=True)
40
+
41
+ # Create an expander titled "See Verbose"
42
+ with st.expander("See Langchain Thought Process"):
43
+ # Display the cleaned text in Streamlit as code
44
+ st.code(clean_text)
45
+
46
+ return response
47
+ # Initialize chat history
48
+ if "messages" not in st.session_state:
49
+ st.session_state.messages = []
50
+
51
+ # Display chat messages from history on app rerun
52
+ for message in st.session_state.messages:
53
+ with st.chat_message(message["role"]):
54
+ st.markdown(message["content"])
55
+
56
+ def chat_ui(qa):
57
+ # Accept user input
58
+ if prompt := st.chat_input(
59
+ "Ask me questions: How can I retrieve data from Deep Lake in Langchain?"
60
+ ):
61
+
62
+ # Add user message to chat history
63
+ st.session_state.messages.append({"role": "user", "content": prompt})
64
+
65
+ # Display user message in chat message container
66
+ with st.chat_message("user"):
67
+ st.markdown(prompt)
68
+
69
+ # Display assistant response in chat message container
70
+ with st.chat_message("assistant"):
71
+ message_placeholder = st.empty()
72
+ full_response = ""
73
+
74
+ # Load the memory variables, which include the chat history
75
+ memory_variables = memory.load_memory_variables({})
76
+
77
+ # Predict the AI's response in the conversation
78
+ with st.spinner("Searching course material"):
79
+ response = capture_and_display_output(
80
+ qa, ({"question": prompt, "chat_history": memory_variables})
81
+ )
82
+
83
+ # Display chat response
84
+ full_response += response["answer"]
85
+ message_placeholder.markdown(full_response + "▌")
86
+ message_placeholder.markdown(full_response)
87
+
88
+ #Display top 2 retrieved sources
89
+ source = response["source_documents"][0].metadata
90
+ source2 = response["source_documents"][1].metadata
91
+ with st.expander("See Resources"):
92
+ st.write(f"Title: {source['title'].split('·')[0].strip()}")
93
+ st.write(f"Source: {source['source']}")
94
+ st.write(f"Relevance to Query: {source['relevance_score'] * 100}%")
95
+ st.write(f"Title: {source2['title'].split('·')[0].strip()}")
96
+ st.write(f"Source: {source2['source']}")
97
+ st.write(f"Relevance to Query: {source2['relevance_score'] * 100}%")
98
+
99
+ # Append message to session state
100
+ st.session_state.messages.append(
101
+ {"role": "assistant", "content": full_response}
102
+ )
103
+
104
+ # Run function passing the ConversationalRetrievalChain
105
+
106
+ chat_ui(qa)
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ langchain-community
3
+ deeplake
4
+ langchain-groq
5
+ cohere
6
+ apify-client
7
+ tiktoken
8
+ python-dotenv
9
+ langchain-groq
10
+ langchain-cohere
11
+ lark
12
+ streamlit