Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -49,25 +49,27 @@ MAX_SEC = 30
|
|
49 |
tokenizer = AutoTokenizer.from_pretrained("MediaTek-Research/Breeze-7B-Instruct-v0_1")
|
50 |
|
51 |
def insert_to_db(prompt, response, temperature, top_p):
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
|
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
conn.close()
|
70 |
-
|
71 |
|
72 |
|
73 |
def refusal_condition(query):
|
@@ -159,7 +161,7 @@ with gr.Blocks() as demo:
|
|
159 |
message = tokenizer.apply_chat_template(chat_data, tokenize=False)
|
160 |
message = message[3:] # remove SOT token
|
161 |
|
162 |
-
response =
|
163 |
if refusal_condition(history[-1][0]):
|
164 |
history = [['[安全拒答啟動]', '[安全拒答啟動] 請清除再開啟對話']]
|
165 |
response = '[REFUSAL]'
|
@@ -180,8 +182,8 @@ with gr.Blocks() as demo:
|
|
180 |
keep_streaming = True
|
181 |
s = requests.Session()
|
182 |
with s.post(API_URL, headers=HEADERS, json=data, stream=True, timeout=30) as r:
|
183 |
-
time.sleep(0.1)
|
184 |
for line in r.iter_lines():
|
|
|
185 |
if time.time() - start_time > MAX_SEC:
|
186 |
keep_streaming = False
|
187 |
break
|
@@ -191,23 +193,22 @@ with gr.Blocks() as demo:
|
|
191 |
continue
|
192 |
json_response = json.loads(line)
|
193 |
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
except Exception as e:
|
201 |
-
raise e
|
202 |
|
203 |
history[-1][1] += delta
|
204 |
yield history
|
205 |
|
206 |
-
response = history[-1][1]
|
207 |
if history[-1][1].endswith('</s>'):
|
208 |
history[-1][1] = history[-1][1][:-4]
|
209 |
yield history
|
210 |
|
|
|
|
|
211 |
if refusal_condition(history[-1][1]):
|
212 |
history[-1][1] = history[-1][1] + '\n\n**[免責聲明: Breeze-7B-Instruct 和 Breeze-7B-Instruct-64k 並未針對問答進行安全保護,因此語言模型的任何回應不代表 MediaTek Research 立場。]**'
|
213 |
yield history
|
|
|
49 |
tokenizer = AutoTokenizer.from_pretrained("MediaTek-Research/Breeze-7B-Instruct-v0_1")
|
50 |
|
51 |
def insert_to_db(prompt, response, temperature, top_p):
|
52 |
+
try:
|
53 |
+
#Establishing the connection
|
54 |
+
conn = psycopg2.connect(
|
55 |
+
database=os.environ.get("DB"), user=os.environ.get("USER"), password=os.environ.get("DB_PASS"), host=os.environ.get("DB_HOST"), port= '5432'
|
56 |
+
)
|
57 |
+
#Setting auto commit false
|
58 |
+
conn.autocommit = True
|
59 |
+
|
60 |
+
#Creating a cursor object using the cursor() method
|
61 |
+
cursor = conn.cursor()
|
62 |
|
63 |
+
# Preparing SQL queries to INSERT a record into the database.
|
64 |
+
cursor.execute(f"INSERT INTO breezedata(prompt, response, temperature, top_p) VALUES ('{prompt}', '{response}', {temperature}, {top_p})")
|
65 |
+
|
66 |
+
# Commit your changes in the database
|
67 |
+
conn.commit()
|
68 |
+
|
69 |
+
# Closing the connection
|
70 |
+
conn.close()
|
71 |
+
except:
|
72 |
+
pass
|
|
|
|
|
73 |
|
74 |
|
75 |
def refusal_condition(query):
|
|
|
161 |
message = tokenizer.apply_chat_template(chat_data, tokenize=False)
|
162 |
message = message[3:] # remove SOT token
|
163 |
|
164 |
+
response = ''
|
165 |
if refusal_condition(history[-1][0]):
|
166 |
history = [['[安全拒答啟動]', '[安全拒答啟動] 請清除再開啟對話']]
|
167 |
response = '[REFUSAL]'
|
|
|
182 |
keep_streaming = True
|
183 |
s = requests.Session()
|
184 |
with s.post(API_URL, headers=HEADERS, json=data, stream=True, timeout=30) as r:
|
|
|
185 |
for line in r.iter_lines():
|
186 |
+
time.sleep(0.05)
|
187 |
if time.time() - start_time > MAX_SEC:
|
188 |
keep_streaming = False
|
189 |
break
|
|
|
193 |
continue
|
194 |
json_response = json.loads(line)
|
195 |
|
196 |
+
if "fragment" not in json_response["result"]:
|
197 |
+
keep_streaming = False
|
198 |
+
break
|
199 |
+
|
200 |
+
delta = json_response["result"]["fragment"]["data"]["text"]
|
201 |
+
|
|
|
|
|
202 |
|
203 |
history[-1][1] += delta
|
204 |
yield history
|
205 |
|
|
|
206 |
if history[-1][1].endswith('</s>'):
|
207 |
history[-1][1] = history[-1][1][:-4]
|
208 |
yield history
|
209 |
|
210 |
+
response = history[-1][1]
|
211 |
+
|
212 |
if refusal_condition(history[-1][1]):
|
213 |
history[-1][1] = history[-1][1] + '\n\n**[免責聲明: Breeze-7B-Instruct 和 Breeze-7B-Instruct-64k 並未針對問答進行安全保護,因此語言模型的任何回應不代表 MediaTek Research 立場。]**'
|
214 |
yield history
|