frimelle HF Staff commited on
Commit
707ab5a
·
1 Parent(s): f763ade

add writing to dataset

Browse files
Files changed (2) hide show
  1. app.py +26 -30
  2. requirements.txt +3 -1
app.py CHANGED
@@ -8,42 +8,38 @@ import uuid
8
  with open("system_prompt.txt", "r") as f:
9
  SYSTEM_PROMPT = f.read()
10
 
 
11
  MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
 
 
12
  client = InferenceClient(MODEL_NAME)
13
 
14
- # ---- Setup logging ----
15
- LOG_DIR = "chat_logs"
16
- os.makedirs(LOG_DIR, exist_ok=True)
17
- session_id = str(uuid.uuid4())
 
 
 
 
 
18
 
19
- def log_chat(session_id, user_msg, bot_msg):
20
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
21
- log_path = os.path.join(LOG_DIR, f"{session_id}.txt")
22
- with open(log_path, "a", encoding="utf-8") as f:
23
- f.write(f"[{timestamp}] User: {user_msg}\n")
24
- f.write(f"[{timestamp}] Bot: {bot_msg}\n\n")
25
 
26
- # ---- Respond Function with Logging ----
27
- def respond(
28
- message,
29
- history: list[tuple[str, str]],
30
- system_message,
31
- max_tokens,
32
- temperature,
33
- top_p,
34
- ):
35
  messages = [{"role": "system", "content": system_message}]
36
 
37
- for val in history:
38
- if val[0]:
39
- messages.append({"role": "user", "content": val[0]})
40
- if val[1]:
41
- messages.append({"role": "assistant", "content": val[1]})
42
 
43
  messages.append({"role": "user", "content": message})
44
 
45
  response = ""
46
-
47
  for chunk in client.chat_completion(
48
  messages,
49
  max_tokens=max_tokens,
@@ -56,19 +52,19 @@ def respond(
56
  response += token
57
  yield response
58
 
59
- # Save full message after stream ends
60
- log_chat(session_id, message, response)
61
 
62
- # ---- Gradio Interface ----
63
  demo = gr.ChatInterface(
64
- respond,
65
  additional_inputs=[
66
  gr.Textbox(value=SYSTEM_PROMPT, label="System message"),
67
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
68
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
69
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
70
  ],
71
- title="BoundrAI"
72
  )
73
 
74
  if __name__ == "__main__":
 
8
  with open("system_prompt.txt", "r") as f:
9
  SYSTEM_PROMPT = f.read()
10
 
11
+ # ---- Constants ----
12
  MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
13
+ DATASET_REPO = "frimelle/companion-chat-logs"
14
+ HF_TOKEN = os.environ.get("HF_TOKEN") # set in Space secrets
15
  client = InferenceClient(MODEL_NAME)
16
 
17
+ # ---- Upload to Dataset ----
18
+ def upload_chat_to_dataset(user_message, assistant_message, system_prompt):
19
+ row = {
20
+ "timestamp": datetime.now().isoformat(),
21
+ "session_id": str(uuid.uuid4()),
22
+ "user": user_message,
23
+ "assistant": assistant_message,
24
+ "system_prompt": system_prompt,
25
+ }
26
 
27
+ dataset = Dataset.from_dict({k: [v] for k, v in row.items()})
28
+ dataset.push_to_hub(DATASET_REPO, private=True, token=HF_TOKEN)
 
 
 
 
29
 
30
+ # ---- Chat Function ----
31
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
 
 
 
 
 
 
 
32
  messages = [{"role": "system", "content": system_message}]
33
 
34
+ for user_msg, bot_msg in history:
35
+ if user_msg:
36
+ messages.append({"role": "user", "content": user_msg})
37
+ if bot_msg:
38
+ messages.append({"role": "assistant", "content": bot_msg})
39
 
40
  messages.append({"role": "user", "content": message})
41
 
42
  response = ""
 
43
  for chunk in client.chat_completion(
44
  messages,
45
  max_tokens=max_tokens,
 
52
  response += token
53
  yield response
54
 
55
+ # Log the final full message to the dataset
56
+ upload_chat_to_dataset(message, response, system_message)
57
 
58
+ # ---- Gradio UI ----
59
  demo = gr.ChatInterface(
60
+ fn=respond,
61
  additional_inputs=[
62
  gr.Textbox(value=SYSTEM_PROMPT, label="System message"),
63
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
64
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
65
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
66
  ],
67
+ title="BoundrAI",
68
  )
69
 
70
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -1 +1,3 @@
1
- huggingface_hub==0.25.2
 
 
 
1
+ huggingface_hub==0.25.2
2
+ gradio
3
+ datasets