Tonic commited on
Commit
a94de3f
·
verified ·
1 Parent(s): 40413ed

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +155 -0
app.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import transformers
3
+
4
+ title = """🙋🏻‍♂️Welcome to 🌟Tonic's 🤳🏻Phi-4 Demo"""
5
+
6
+ description = """
7
+ This demo uses Microsoft's Phi-4 model for text generation.
8
+ - System Prompt: Sets the context/role for the AI
9
+ - User Prompt: Your specific question or request
10
+ - Max Tokens: Maximum length of the generated response
11
+ - Temperature: Controls randomness (higher = more creative, lower = more focused)
12
+ """
13
+
14
+
15
+ join_us = """
16
+ ## Join us:
17
+ 🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻
18
+ [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP)
19
+ On 🤗Huggingface: [MultiTransformer](https://huggingface.co/MultiTransformer)
20
+ On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Dark Thoughts](https://github.com/MultiTonic/thinking-dataset)
21
+ 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
22
+ """
23
+
24
+
25
+ def generate_response(system_prompt, user_prompt, max_tokens, temperature):
26
+ pipeline = transformers.pipeline(
27
+ "text-generation",
28
+ model="microsoft/phi-4",
29
+ model_kwargs={"torch_dtype": "auto"},
30
+ device_map="auto",
31
+ )
32
+
33
+ messages = [
34
+ {"role": "system", "content": system_prompt},
35
+ {"role": "user", "content": user_prompt},
36
+ ]
37
+
38
+ outputs = pipeline(
39
+ messages,
40
+ max_new_tokens=max_tokens,
41
+ temperature=temperature,
42
+ do_sample=True
43
+ )
44
+
45
+ return outputs[0]["generated_text"]
46
+
47
+ # Example configurations
48
+ examples = [
49
+ [
50
+ "You are a medieval knight and must provide explanations to modern people.",
51
+ "How should I explain the Internet?",
52
+ 128,
53
+ 0.7
54
+ ],
55
+ [
56
+ "You are a wise wizard from ancient times.",
57
+ "What would you call a smartphone?",
58
+ 256,
59
+ 0.8
60
+ ],
61
+ [
62
+ "You are a time-traveling merchant from the year 1400.",
63
+ "How would you describe modern cars?",
64
+ 200,
65
+ 0.6
66
+ ],
67
+ [
68
+ "You are a medieval monk who specializes in manuscripts.",
69
+ "What do you think about e-books?",
70
+ 150,
71
+ 0.7
72
+ ],
73
+ [
74
+ "You are a castle guard from the Middle Ages.",
75
+ "What do you think about modern security systems?",
76
+ 180,
77
+ 0.9
78
+ ]
79
+ ]
80
+
81
+ # Create the Gradio interface
82
+ with gr.Blocks() as demo:
83
+ gr.Markdown(title)
84
+ gr.Markdown(description)
85
+ gr.Markdown(joinus)
86
+
87
+ with gr.Row():
88
+ with gr.Column():
89
+ system_prompt = gr.Textbox(
90
+ label="System Prompt",
91
+ placeholder="Enter system prompt...",
92
+ value="You are a medieval knight and must provide explanations to modern people."
93
+ )
94
+ user_prompt = gr.Textbox(
95
+ label="User Prompt",
96
+ placeholder="Enter your question...",
97
+ value="How should I explain the Internet?"
98
+ )
99
+
100
+ with gr.Row():
101
+ max_tokens = gr.Slider(
102
+ minimum=1,
103
+ maximum=512,
104
+ value=128,
105
+ step=1,
106
+ label="Maximum Tokens"
107
+ )
108
+ temperature = gr.Slider(
109
+ minimum=0.1,
110
+ maximum=1.0,
111
+ value=0.7,
112
+ step=0.1,
113
+ label="Temperature"
114
+ )
115
+
116
+ submit_btn = gr.Button("🚀 Generate Response")
117
+
118
+ with gr.Column():
119
+ output = gr.Textbox(
120
+ label="Generated Response",
121
+ lines=10
122
+ )
123
+
124
+ gr.Examples(
125
+ examples=examples,
126
+ inputs=[system_prompt, user_prompt, max_tokens, temperature],
127
+ outputs=output,
128
+ fn=generate_response,
129
+ cache_examples=True,
130
+ label="Example Prompts"
131
+ )
132
+
133
+ submit_btn.click(
134
+ fn=generate_response,
135
+ inputs=[system_prompt, user_prompt, max_tokens, temperature],
136
+ outputs=output
137
+ )
138
+
139
+ gr.Markdown("""
140
+ ### 📝 Parameters:
141
+ - **System Prompt**: Sets the behavior/role of the AI (e.g., medieval knight, wizard, merchant)
142
+ - **User Prompt**: Your question or input about modern concepts
143
+ - **Maximum Tokens**: Controls the maximum length of the generated response
144
+ - **Temperature**: Controls randomness (higher = more creative, lower = more focused)
145
+
146
+ ### 💡 Tips:
147
+ 1. Try different historical personas in the system prompt
148
+ 2. Ask about modern technology from a historical perspective
149
+ 3. Adjust temperature for more varied or consistent responses
150
+ 4. Use the examples below for inspiration
151
+ """)
152
+
153
+ # Launch the demo
154
+ if __name__ == "__main__":
155
+ demo.launch()