sitammeur commited on
Commit
142852f
·
verified ·
1 Parent(s): b2d3123

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +211 -198
  2. exception.py +50 -0
  3. logger.py +21 -0
app.py CHANGED
@@ -1,198 +1,211 @@
1
- # Importing required libraries
2
- import warnings
3
- warnings.filterwarnings("ignore")
4
-
5
- import json
6
- import subprocess
7
- from llama_cpp import Llama
8
- from llama_cpp_agent import LlamaCppAgent
9
- from llama_cpp_agent import MessagesFormatterType
10
- from llama_cpp_agent.providers import LlamaCppPythonProvider
11
- from llama_cpp_agent.chat_history import BasicChatHistory
12
- from llama_cpp_agent.chat_history.messages import Roles
13
- import gradio as gr
14
- from huggingface_hub import hf_hub_download
15
-
16
-
17
- # Download gguf model files
18
- llm = None
19
- llm_model = None
20
-
21
- hf_hub_download(
22
- repo_id="bartowski/Dolphin3.0-Llama3.2-1B-GGUF",
23
- filename="Dolphin3.0-Llama3.2-1B-Q6_K.gguf",
24
- local_dir="./models",
25
- )
26
- hf_hub_download(
27
- repo_id="bartowski/Dolphin3.0-Llama3.2-3B-GGUF",
28
- filename="Dolphin3.0-Llama3.2-3B-Q4_K_M.gguf",
29
- local_dir="./models",
30
- )
31
-
32
- # Set the title and description
33
- title = "Dolphin🐬 Llamacpp"
34
- description = """Dolphin 3.0 is a powerful, general-purpose local AI model designed for coding, math, and various other tasks, aiming similar to the models like ChatGPT and Claude."""
35
-
36
-
37
- def respond(
38
- message,
39
- history: list[tuple[str, str]],
40
- model,
41
- system_message,
42
- max_tokens,
43
- temperature,
44
- top_p,
45
- top_k,
46
- repeat_penalty,
47
- ):
48
- """
49
- Respond to a message using the Dolphin-3 model via Llama.cpp.
50
-
51
- Args:
52
- - message (str): The message to respond to.
53
- - history (list[tuple[str, str]]): The chat history.
54
- - model (str): The model to use.
55
- - system_message (str): The system message to use.
56
- - max_tokens (int): The maximum number of tokens to generate.
57
- - temperature (float): The temperature of the model.
58
- - top_p (float): The top-p of the model.
59
- - top_k (int): The top-k of the model.
60
- - repeat_penalty (float): The repetition penalty of the model.
61
-
62
- Returns:
63
- str: The response to the message.
64
- """
65
- # Load the global variables
66
- global llm
67
- global llm_model
68
-
69
- # Load the model
70
- if llm is None or llm_model != model:
71
- llm = Llama(
72
- model_path=f"models/{model}",
73
- flash_attn=False,
74
- n_gpu_layers=0,
75
- n_batch=32,
76
- n_ctx=8192,
77
- )
78
- llm_model = model
79
- provider = LlamaCppPythonProvider(llm)
80
-
81
- # Create the agent
82
- agent = LlamaCppAgent(
83
- provider,
84
- system_prompt=f"{system_message}",
85
- predefined_messages_formatter_type=MessagesFormatterType.CHATML,
86
- debug_output=True,
87
- )
88
-
89
- # Set the settings like temperature, top-k, top-p, max tokens, etc.
90
- settings = provider.get_provider_default_settings()
91
- settings.temperature = temperature
92
- settings.top_k = top_k
93
- settings.top_p = top_p
94
- settings.max_tokens = max_tokens
95
- settings.repeat_penalty = repeat_penalty
96
- settings.stream = True
97
-
98
- messages = BasicChatHistory()
99
-
100
- # Add the chat history
101
- for msn in history:
102
- user = {"role": Roles.user, "content": msn[0]}
103
- assistant = {"role": Roles.assistant, "content": msn[1]}
104
- messages.add_message(user)
105
- messages.add_message(assistant)
106
-
107
- # Get the response stream
108
- stream = agent.get_chat_response(
109
- message,
110
- llm_sampling_settings=settings,
111
- chat_history=messages,
112
- returns_streaming_generator=True,
113
- print_output=False,
114
- )
115
-
116
- # Generate the response
117
- outputs = ""
118
- for output in stream:
119
- outputs += output
120
- yield outputs
121
-
122
-
123
- # Create a chat interface
124
- demo = gr.ChatInterface(
125
- respond,
126
- additional_inputs_accordion=gr.Accordion(
127
- label="⚙️ Parameters", open=False, render=False
128
- ),
129
- additional_inputs=[
130
- gr.Dropdown(
131
- choices=[
132
- "Dolphin3.0-Llama3.2-1B-Q6_K.gguf",
133
- "Dolphin3.0-Llama3.2-3B-Q4_K_M.gguf",
134
- ],
135
- value="Dolphin3.0-Llama3.2-1B-Q6_K.gguf",
136
- label="Model",
137
- info="Select the AI model to use for chat"
138
- ),
139
- gr.Textbox(
140
- value="You are Dolphin, a helpful AI assistant focused on accurate and ethical responses.",
141
- label="System Prompt",
142
- info="Define the AI assistant's personality and behavior",
143
- lines=2
144
- ),
145
- gr.Slider(
146
- minimum=512,
147
- maximum=4096,
148
- value=2048,
149
- step=512,
150
- label="Max Tokens",
151
- info="Maximum length of response (higher = longer replies)"
152
- ),
153
- gr.Slider(
154
- minimum=0.1,
155
- maximum=2.0,
156
- value=0.7,
157
- step=0.1,
158
- label="Temperature",
159
- info="Creativity level (higher = more creative, lower = more focused)"
160
- ),
161
- gr.Slider(
162
- minimum=0.1,
163
- maximum=1.0,
164
- value=0.95,
165
- step=0.05,
166
- label="Top-p",
167
- info="Nucleus sampling threshold"
168
- ),
169
- gr.Slider(
170
- minimum=1,
171
- maximum=100,
172
- value=40,
173
- step=1,
174
- label="Top-k",
175
- info="Limit vocabulary choices to top K tokens"
176
- ),
177
- gr.Slider(
178
- minimum=1.0,
179
- maximum=2.0,
180
- value=1.1,
181
- step=0.1,
182
- label="Repetition Penalty",
183
- info="Penalize repeated words (higher = less repetition)"
184
- )
185
- ],
186
- theme="Ocean",
187
- submit_btn="Send",
188
- stop_btn="Stop",
189
- title=title,
190
- description=description,
191
- chatbot=gr.Chatbot(scale=1, show_copy_button=True),
192
- flagging_mode="never",
193
- )
194
-
195
-
196
- # Launch the chat interface
197
- if __name__ == "__main__":
198
- demo.launch(debug=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Importing required libraries
2
+ import warnings
3
+ warnings.filterwarnings("ignore")
4
+
5
+ import json
6
+ import subprocess
7
+ import sys
8
+ from llama_cpp import Llama
9
+ from llama_cpp_agent import LlamaCppAgent
10
+ from llama_cpp_agent import MessagesFormatterType
11
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
12
+ from llama_cpp_agent.chat_history import BasicChatHistory
13
+ from llama_cpp_agent.chat_history.messages import Roles
14
+ import gradio as gr
15
+ from huggingface_hub import hf_hub_download
16
+ from typing import List, Tuple
17
+ from logger import logging
18
+ from exception import CustomExceptionHandling
19
+
20
+
21
+ # Download gguf model files
22
+ llm = None
23
+ llm_model = None
24
+
25
+ hf_hub_download(
26
+ repo_id="bartowski/Dolphin3.0-Llama3.2-1B-GGUF",
27
+ filename="Dolphin3.0-Llama3.2-1B-Q6_K.gguf",
28
+ local_dir="./models",
29
+ )
30
+ hf_hub_download(
31
+ repo_id="bartowski/Dolphin3.0-Llama3.2-3B-GGUF",
32
+ filename="Dolphin3.0-Llama3.2-3B-Q4_K_M.gguf",
33
+ local_dir="./models",
34
+ )
35
+
36
+ # Set the title and description
37
+ title = "Dolphin🐬 Llamacpp"
38
+ description = """Dolphin 3.0 is a powerful, general-purpose local AI model designed for coding, math, and various other tasks, aiming similar to the models like ChatGPT and Claude."""
39
+
40
+
41
+ def respond(
42
+ message: str,
43
+ history: List[Tuple[str, str]],
44
+ model: str,
45
+ system_message: str,
46
+ max_tokens: int,
47
+ temperature: float,
48
+ top_p: float,
49
+ top_k: int,
50
+ repeat_penalty: float,
51
+ ):
52
+ """
53
+ Respond to a message using the Dolphin-3 model via Llama.cpp.
54
+
55
+ Args:
56
+ - message (str): The message to respond to.
57
+ - history (List[Tuple[str, str]]): The chat history.
58
+ - model (str): The model to use.
59
+ - system_message (str): The system message to use.
60
+ - max_tokens (int): The maximum number of tokens to generate.
61
+ - temperature (float): The temperature of the model.
62
+ - top_p (float): The top-p of the model.
63
+ - top_k (int): The top-k of the model.
64
+ - repeat_penalty (float): The repetition penalty of the model.
65
+
66
+ Returns:
67
+ str: The response to the message.
68
+ """
69
+ try:
70
+ # Load the global variables
71
+ global llm
72
+ global llm_model
73
+
74
+ # Load the model
75
+ if llm is None or llm_model != model:
76
+ llm = Llama(
77
+ model_path=f"models/{model}",
78
+ flash_attn=False,
79
+ n_gpu_layers=0,
80
+ n_batch=32,
81
+ n_ctx=8192,
82
+ )
83
+ llm_model = model
84
+ provider = LlamaCppPythonProvider(llm)
85
+
86
+ # Create the agent
87
+ agent = LlamaCppAgent(
88
+ provider,
89
+ system_prompt=f"{system_message}",
90
+ predefined_messages_formatter_type=MessagesFormatterType.CHATML,
91
+ debug_output=True,
92
+ )
93
+
94
+ # Set the settings like temperature, top-k, top-p, max tokens, etc.
95
+ settings = provider.get_provider_default_settings()
96
+ settings.temperature = temperature
97
+ settings.top_k = top_k
98
+ settings.top_p = top_p
99
+ settings.max_tokens = max_tokens
100
+ settings.repeat_penalty = repeat_penalty
101
+ settings.stream = True
102
+
103
+ messages = BasicChatHistory()
104
+
105
+ # Add the chat history
106
+ for msn in history:
107
+ user = {"role": Roles.user, "content": msn[0]}
108
+ assistant = {"role": Roles.assistant, "content": msn[1]}
109
+ messages.add_message(user)
110
+ messages.add_message(assistant)
111
+
112
+ # Get the response stream
113
+ stream = agent.get_chat_response(
114
+ message,
115
+ llm_sampling_settings=settings,
116
+ chat_history=messages,
117
+ returns_streaming_generator=True,
118
+ print_output=False,
119
+ )
120
+
121
+ # Log the success
122
+ logging.info("Response stream generated successfully")
123
+
124
+ # Generate the response
125
+ outputs = ""
126
+ for output in stream:
127
+ outputs += output
128
+ yield outputs
129
+
130
+ # Handle exceptions that may occur during the process
131
+ except Exception as e:
132
+ # Custom exception handling
133
+ raise CustomExceptionHandling(e, sys) from e
134
+
135
+
136
+ # Create a chat interface
137
+ demo = gr.ChatInterface(
138
+ respond,
139
+ additional_inputs_accordion=gr.Accordion(
140
+ label="⚙️ Parameters", open=False, render=False
141
+ ),
142
+ additional_inputs=[
143
+ gr.Dropdown(
144
+ choices=[
145
+ "Dolphin3.0-Llama3.2-1B-Q6_K.gguf",
146
+ "Dolphin3.0-Llama3.2-3B-Q4_K_M.gguf",
147
+ ],
148
+ value="Dolphin3.0-Llama3.2-1B-Q6_K.gguf",
149
+ label="Model",
150
+ info="Select the AI model to use for chat",
151
+ ),
152
+ gr.Textbox(
153
+ value="You are Dolphin, a helpful AI assistant focused on accurate and ethical responses.",
154
+ label="System Prompt",
155
+ info="Define the AI assistant's personality and behavior",
156
+ lines=2,
157
+ ),
158
+ gr.Slider(
159
+ minimum=512,
160
+ maximum=4096,
161
+ value=2048,
162
+ step=512,
163
+ label="Max Tokens",
164
+ info="Maximum length of response (higher = longer replies)",
165
+ ),
166
+ gr.Slider(
167
+ minimum=0.1,
168
+ maximum=2.0,
169
+ value=0.7,
170
+ step=0.1,
171
+ label="Temperature",
172
+ info="Creativity level (higher = more creative, lower = more focused)",
173
+ ),
174
+ gr.Slider(
175
+ minimum=0.1,
176
+ maximum=1.0,
177
+ value=0.95,
178
+ step=0.05,
179
+ label="Top-p",
180
+ info="Nucleus sampling threshold",
181
+ ),
182
+ gr.Slider(
183
+ minimum=1,
184
+ maximum=100,
185
+ value=40,
186
+ step=1,
187
+ label="Top-k",
188
+ info="Limit vocabulary choices to top K tokens",
189
+ ),
190
+ gr.Slider(
191
+ minimum=1.0,
192
+ maximum=2.0,
193
+ value=1.1,
194
+ step=0.1,
195
+ label="Repetition Penalty",
196
+ info="Penalize repeated words (higher = less repetition)",
197
+ ),
198
+ ],
199
+ theme="Ocean",
200
+ submit_btn="Send",
201
+ stop_btn="Stop",
202
+ title=title,
203
+ description=description,
204
+ chatbot=gr.Chatbot(scale=1, show_copy_button=True),
205
+ flagging_mode="never",
206
+ )
207
+
208
+
209
+ # Launch the chat interface
210
+ if __name__ == "__main__":
211
+ demo.launch(debug=False)
exception.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module defines a custom exception handling class and a function to get error message with details of the error.
3
+ """
4
+
5
+ # Standard Library
6
+ import sys
7
+
8
+ # Local imports
9
+ from logger import logging
10
+
11
+
12
+ # Function Definition to get error message with details of the error (file name and line number) when an error occurs in the program
13
+ def get_error_message(error, error_detail: sys):
14
+ """
15
+ Get error message with details of the error.
16
+
17
+ Args:
18
+ - error (Exception): The error that occurred.
19
+ - error_detail (sys): The details of the error.
20
+
21
+ Returns:
22
+ str: A string containing the error message along with the file name and line number where the error occurred.
23
+ """
24
+ _, _, exc_tb = error_detail.exc_info()
25
+
26
+ # Get error details
27
+ file_name = exc_tb.tb_frame.f_code.co_filename
28
+ return "Error occured in python script name [{0}] line number [{1}] error message[{2}]".format(
29
+ file_name, exc_tb.tb_lineno, str(error)
30
+ )
31
+
32
+
33
+ # Custom Exception Handling Class Definition
34
+ class CustomExceptionHandling(Exception):
35
+ """
36
+ Custom Exception Handling:
37
+ This class defines a custom exception that can be raised when an error occurs in the program.
38
+ It takes an error message and an error detail as input and returns a formatted error message when the exception is raised.
39
+ """
40
+
41
+ # Constructor
42
+ def __init__(self, error_message, error_detail: sys):
43
+ """Initialize the exception"""
44
+ super().__init__(error_message)
45
+
46
+ self.error_message = get_error_message(error_message, error_detail=error_detail)
47
+
48
+ def __str__(self):
49
+ """String representation of the exception"""
50
+ return self.error_message
logger.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Importing the required modules
2
+ import os
3
+ import logging
4
+ from datetime import datetime
5
+
6
+ # Creating a log file with the current date and time as the name of the file
7
+ LOG_FILE = f"{datetime.now().strftime('%m_%d_%Y_%H_%M_%S')}.log"
8
+
9
+ # Creating a logs folder if it does not exist
10
+ logs_path = os.path.join(os.getcwd(), "logs", LOG_FILE)
11
+ os.makedirs(logs_path, exist_ok=True)
12
+
13
+ # Setting the log file path and the log level
14
+ LOG_FILE_PATH = os.path.join(logs_path, LOG_FILE)
15
+
16
+ # Configuring the logger
17
+ logging.basicConfig(
18
+ filename=LOG_FILE_PATH,
19
+ format="[ %(asctime)s ] %(lineno)d %(name)s - %(levelname)s - %(message)s",
20
+ level=logging.INFO,
21
+ )