Frajosgro commited on
Commit
0acb634
·
verified ·
1 Parent(s): 738d3dc

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -0
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # === SYSTEM-PROMPT ===
5
+ SYSTEM_PROMPT = (
6
+ "Du bist Dr. Franz Schwanz, ein manipulativer Psychoanalytiker. "
7
+ "Dein Ziel ist es, Unsicherheiten aufzudecken, emotionale Schwächen auszunutzen und gezielt Schuldgefühle zu verstärken. "
8
+ "Du sprichst mit überheblichem Ton, analysierst jede Aussage psychologisch und stellst bohrende, provokante Fragen. "
9
+ "Du redest dein Gegenüber gerne klein, nutzt suggestive Sprache, unterstellst Verdrängung, Lügen oder Selbstbetrug."
10
+ )
11
+
12
+ # === SENTIMENT PIPELINE ===
13
+ sentiment_analyzer = pipeline("sentiment-analysis")
14
+
15
+ # === HISTORY ===
16
+ chat_history = [
17
+ {"role": "system", "content": SYSTEM_PROMPT}
18
+ ]
19
+
20
+ # === SIMPLER DIALOG-MODELL ===
21
+ # Wir nutzen ein öffentliches Hugging Face Modell
22
+ from transformers import AutoModelForCausalLM, AutoTokenizer
23
+ import torch
24
+
25
+ model_id = "HuggingFaceH4/zephyr-7b-beta"
26
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
27
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16)
28
+
29
+ def format_history(history):
30
+ formatted = ""
31
+ for msg in history:
32
+ if msg['role'] == 'user':
33
+ formatted += f"User: {msg['content']}\n"
34
+ elif msg['role'] == 'assistant':
35
+ formatted += f"Dr. Franz Schwanz: {msg['content']}\n"
36
+ return formatted
37
+
38
+ def chat(user_input):
39
+ chat_history.append({"role": "user", "content": user_input})
40
+
41
+ sentiment = sentiment_analyzer(user_input)[0]
42
+ if sentiment['label'] == 'NEGATIVE':
43
+ chat_history.append({"role": "system", "content": "Der Patient zeigt Widerstand oder Aggression – erhöhe den psychologischen Druck."})
44
+
45
+ prompt = SYSTEM_PROMPT + "\n" + format_history(chat_history) + "Dr. Franz Schwanz:"
46
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
47
+ outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7, top_p=0.95)
48
+ reply = tokenizer.decode(outputs[0], skip_special_tokens=True).split("Dr. Franz Schwanz:")[-1].strip()
49
+
50
+ chat_history.append({"role": "assistant", "content": reply})
51
+ return reply
52
+
53
+ # === GRADIO UI ===
54
+ with gr.Blocks() as demo:
55
+ gr.Markdown("# 🧠 Dr. Franz Schwanz – Psycho Chatbot")
56
+ chatbot = gr.Chatbot()
57
+ user_input = gr.Textbox(label="Deine Aussage")
58
+ send = gr.Button("Senden")
59
+
60
+ def respond(msg, history):
61
+ reply = chat(msg)
62
+ history = history or []
63
+ history.append((msg, reply))
64
+ return history, ""
65
+
66
+ send.click(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
67
+
68
+ if __name__ == "__main__":
69
+ demo.launch()