Canstralian commited on
Commit
a182cbf
Β·
verified Β·
1 Parent(s): 68997ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -62
app.py CHANGED
@@ -1,73 +1,140 @@
1
  import streamlit as st
2
- from streamlit_chat import message
3
- from streamlit_extras.colored_header import colored_header
4
- from streamlit_extras.add_vertical_space import add_vertical_space
5
- from hugchat import hugchat
6
- from hugchat.login import Login
 
7
 
8
- st.set_page_config(page_title="HugChat - An LLM-powered Streamlit app")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- # Sidebar contents
11
- with st.sidebar:
12
- st.title('πŸ€—πŸ’¬ HugChat App')
13
-
14
- st.header('Hugging Face Login')
15
- hf_email = st.text_input('Enter E-mail:', type='password')
16
- hf_pass = st.text_input('Enter password:', type='password')
17
-
18
- st.markdown('''
19
- ## About
20
- This app is an LLM-powered chatbot built using:
21
- - [Streamlit](https://streamlit.io/)
22
- - [HugChat](https://github.com/Soulter/hugging-chat-api)
23
- - [OpenAssistant/oasst-sft-6-llama-30b-xor](https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor) LLM model
24
 
25
- ''')
26
- add_vertical_space(5)
27
- st.write('Made with ❀️ by [Data Professor](https://youtube.com/dataprofessor)')
 
 
28
 
29
- # Generate empty lists for generated and past.
30
- ## generated stores AI generated responses
31
- if 'generated' not in st.session_state:
32
- st.session_state['generated'] = ["I'm HugChat, How may I help you?"]
33
- ## past stores User's questions
34
- if 'past' not in st.session_state:
35
- st.session_state['past'] = ['Hi!']
36
 
37
- # Layout of input/response containers
38
- input_container = st.container()
39
- colored_header(label='', description='', color_name='blue-30')
40
- response_container = st.container()
 
 
 
 
 
 
 
 
 
41
 
42
- # User input
43
- ## Function for taking user provided prompt as input
44
- def get_text():
45
- input_text = st.text_input("You: ", "", key="input")
46
- return input_text
47
- ## Applying the user input box
48
- with input_container:
49
- user_input = get_text()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- # Response output
52
- ## Function for taking user prompt as input followed by producing AI generated responses
53
- def generate_response(prompt, email, passwd):
54
- # Hugging Face Login
55
- sign = Login(email, passwd)
56
- cookies = sign.login()
57
- sign.saveCookies()
58
- # Create ChatBot
59
- chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
60
- response = chatbot.chat(prompt)
61
  return response
62
 
63
- ## Conditional display of AI generated responses as a function of user provided prompts
64
- with response_container:
65
- if user_input and hf_email and hf_pass:
66
- response = generate_response(user_input, hf_email, hf_pass)
67
- st.session_state.past.append(user_input)
68
- st.session_state.generated.append(response)
 
 
 
 
 
 
 
69
 
70
- if st.session_state['generated']:
71
- for i in range(len(st.session_state['generated'])):
72
- message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
73
- message(st.session_state["generated"][i], key=str(i))
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from datasets import load_dataset
4
+ from gtts import gTTS
5
+ import os
6
+ import re
7
+ import random
8
 
9
+ # Enable Dark Mode and Custom CSS
10
+ st.markdown(
11
+ """
12
+ <style>
13
+ body {
14
+ background-color: #121212;
15
+ color: white;
16
+ }
17
+ .css-1d391kg {
18
+ background-color: #333;
19
+ }
20
+ .stButton > button {
21
+ background-color: #6200ee;
22
+ color: white;
23
+ }
24
+ .stTextInput input {
25
+ background-color: #333;
26
+ color: white;
27
+ }
28
+ </style>
29
+ """,
30
+ unsafe_allow_html=True,
31
+ )
32
 
33
+ # Load models and datasets
34
+ try:
35
+ code_llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/CodeLlama-7B-Python")
36
+ code_llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/CodeLlama-7B-Python")
37
+ except Exception as e:
38
+ st.error(f"Error loading model: {e}")
39
+ code_llama_model = None
40
+ code_llama_tokenizer = None
 
 
 
 
 
 
41
 
42
+ try:
43
+ wordlist_dataset = load_dataset("Canstralian/Wordlists")
44
+ except Exception as e:
45
+ st.error(f"Error loading Wordlist dataset: {e}")
46
+ wordlist_dataset = None
47
 
48
+ # Initialize chat history storage
49
+ if "messages" not in st.session_state:
50
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you?"}]
 
 
 
 
51
 
52
+ # Function to validate the prompt using regular expressions
53
+ def validate_prompt(prompt: str) -> bool:
54
+ """
55
+ Validates if the input prompt is not empty and meets some basic format rules.
56
+ Args:
57
+ prompt (str): The input prompt to be validated.
58
+ Returns:
59
+ bool: True if the prompt is valid, False otherwise.
60
+ """
61
+ # Improved validation: Allow alphanumeric characters, spaces, and punctuation
62
+ if re.match(r'^[A-Za-z0-9\s\.,;!?(){}[\]]+$', prompt):
63
+ return True
64
+ return False
65
 
66
+ # Function to convert text to speech
67
+ def text_to_speech(text: str) -> None:
68
+ """
69
+ Converts text to speech using gTTS and saves it as an MP3 file.
70
+ Args:
71
+ text (str): The text to be converted to speech.
72
+ """
73
+ try:
74
+ tts = gTTS(text, lang='en')
75
+ tts.save("response.mp3")
76
+ os.system("mpg321 response.mp3")
77
+ except Exception as e:
78
+ st.error(f"Error generating speech: {e}")
79
+
80
+ # Display chat history
81
+ for message in st.session_state.messages:
82
+ with st.chat_message(message["role"]):
83
+ st.write(message["content"])
84
+
85
+ # Function to generate chatbot response
86
+ def generate_response(prompt: str) -> str:
87
+ """
88
+ Generates a response from the assistant based on the user input.
89
+ Args:
90
+ prompt (str): The user's input prompt.
91
+ Returns:
92
+ str: The generated response from the assistant.
93
+ """
94
+ if code_llama_model and code_llama_tokenizer:
95
+ if "python" in prompt.lower():
96
+ # Use the Code Llama model for code-related queries
97
+ inputs = code_llama_tokenizer(prompt, return_tensors="pt")
98
+ outputs = code_llama_model.generate(**inputs, max_length=150, num_return_sequences=1)
99
+ response = code_llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
100
+ else:
101
+ response = "I'm here to assist with your queries."
102
+ else:
103
+ response = "Model not loaded. Please try again later."
104
+
105
+ if "osint" in prompt.lower():
106
+ # Respond with dataset-based OSINT information
107
+ response = "OSINT data analysis coming soon!"
108
+ elif "wordlist" in prompt.lower() and wordlist_dataset:
109
+ # Fetch and display a random entry from the Wordlist dataset
110
+ wordlist_entry = random.choice(wordlist_dataset["train"])["text"]
111
+ response = f"Here's a random wordlist entry: {wordlist_entry}"
112
 
 
 
 
 
 
 
 
 
 
 
113
  return response
114
 
115
+ # User input handling
116
+ if prompt := st.chat_input():
117
+ # Validate user input
118
+ if validate_prompt(prompt):
119
+ st.session_state.messages.append({"role": "user", "content": prompt})
120
+ with st.chat_message("user"):
121
+ st.write(prompt)
122
+
123
+ # Generate and display response with smooth animations
124
+ with st.chat_message("assistant"):
125
+ with st.spinner("Assistant is typing..."):
126
+ response = generate_response(prompt)
127
+ st.write(response)
128
 
129
+ # Text-to-Speech integration for the assistant's response
130
+ text_to_speech(response)
131
+
132
+ # Store the assistant's response
133
+ st.session_state.messages.append({"role": "assistant", "content": response})
134
+ else:
135
+ st.warning("Invalid input. Please ensure your input contains only valid characters.")
136
+
137
+ # User Feedback Section
138
+ feedback = st.selectbox("How was your experience?", ["😊 Excellent", "😐 Okay", "πŸ˜• Poor"])
139
+ if feedback:
140
+ st.success(f"Thank you for your feedback: {feedback}", icon="βœ…")