openfree commited on
Commit
7d0296f
·
verified ·
1 Parent(s): 926edf8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -24
app.py CHANGED
@@ -48,18 +48,25 @@ custom_css = """
48
  .dark .sidebar {
49
  background-color: rgba(40, 40, 40, 0.9);
50
  }
 
 
 
 
 
51
  """
52
 
53
- def load_model(model_name, signed_in):
54
- """Function to load different models based on selection"""
55
- if not signed_in:
56
- return gr.Info("Please sign in to use the models")
57
-
58
- # Here you would implement the actual model loading logic
59
- # For now, we'll return a placeholder
60
- return f"Model {model_name} loaded successfully!"
61
 
62
  with gr.Blocks(fill_height=True, theme="Nymbo/Nymbo_Theme", css=custom_css) as demo:
 
 
 
63
  with gr.Row():
64
  with gr.Column(scale=1):
65
  with gr.Group(elem_classes="sidebar"):
@@ -81,7 +88,10 @@ with gr.Blocks(fill_height=True, theme="Nymbo/Nymbo_Theme", css=custom_css) as d
81
  )
82
 
83
  # Login button
84
- button = gr.LoginButton("Sign in with Hugging Face", size="lg")
 
 
 
85
 
86
  # Additional options
87
  with gr.Accordion("⚙️ Advanced Options", open=False):
@@ -99,23 +109,89 @@ with gr.Blocks(fill_height=True, theme="Nymbo/Nymbo_Theme", css=custom_css) as d
99
  step=1,
100
  label="Max Tokens"
101
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  with gr.Column(scale=3):
104
  with gr.Group(elem_classes="main-container"):
105
- # Dynamic model loading based on selection
106
- @gr.render(inputs=[model_dropdown, button])
107
- def render_model_interface(selected_model, login_status):
108
- if selected_model == "openai/gpt-oss-120b":
109
- gr.load(
110
- "models/openai/gpt-oss-120b",
111
- accept_token=login_status,
112
- provider="fireworks-ai"
113
- )
114
- elif selected_model == "openai/gpt-oss-20b":
115
- gr.load(
116
- "models/openai/gpt-oss-20b",
117
- accept_token=login_status,
118
- provider="fireworks-ai"
119
- )
120
 
121
  demo.launch()
 
48
  .dark .sidebar {
49
  background-color: rgba(40, 40, 40, 0.9);
50
  }
51
+
52
+ /* Chat interface styling */
53
+ .chat-container {
54
+ height: 600px;
55
+ }
56
  """
57
 
58
+ def create_chat_interface(model_name):
59
+ """Create a chat interface for the selected model"""
60
+ # This creates the actual chat interface
61
+ return gr.load(
62
+ f"models/{model_name}",
63
+ provider="fireworks-ai"
64
+ )
 
65
 
66
  with gr.Blocks(fill_height=True, theme="Nymbo/Nymbo_Theme", css=custom_css) as demo:
67
+ # State to track login status
68
+ is_logged_in = gr.State(False)
69
+
70
  with gr.Row():
71
  with gr.Column(scale=1):
72
  with gr.Group(elem_classes="sidebar"):
 
88
  )
89
 
90
  # Login button
91
+ login_button = gr.LoginButton("Sign in with Hugging Face", size="lg")
92
+
93
+ # Status display
94
+ status_text = gr.Markdown("❌ Not logged in", visible=True)
95
 
96
  # Additional options
97
  with gr.Accordion("⚙️ Advanced Options", open=False):
 
109
  step=1,
110
  label="Max Tokens"
111
  )
112
+
113
+ # Load model button
114
+ load_button = gr.Button("🔄 Load Selected Model", variant="primary", size="lg")
115
+
116
+ with gr.Column(scale=3):
117
+ with gr.Group(elem_classes="main-container"):
118
+ gr.Markdown("## 💬 Chat Interface")
119
+
120
+ # Chat interface placeholder
121
+ chat_interface = gr.ChatInterface(
122
+ fn=lambda message, history: "Please sign in and load a model to start chatting.",
123
+ examples=["Hello! How are you?", "What can you help me with?", "Tell me a joke"],
124
+ retry_btn=None,
125
+ undo_btn="Delete Previous",
126
+ clear_btn="Clear",
127
+ elem_classes="chat-container"
128
+ )
129
+
130
+ # Handle login status
131
+ def update_login_status(profile):
132
+ if profile:
133
+ return gr.update(value="✅ Logged in", visible=True), True
134
+ return gr.update(value="❌ Not logged in", visible=True), False
135
+
136
+ # Load the selected model
137
+ def load_selected_model(model_name, logged_in):
138
+ if not logged_in:
139
+ gr.Warning("Please sign in first to use the models!")
140
+ return
141
+
142
+ gr.Info(f"Loading {model_name}... This may take a moment.")
143
+
144
+ # Here you would implement the actual model loading
145
+ # For now, we'll update the chat interface
146
+ try:
147
+ # Load the model-specific interface
148
+ loaded_interface = gr.load(
149
+ f"models/{model_name}",
150
+ accept_token=True,
151
+ provider="fireworks-ai"
152
+ )
153
+ gr.Success(f"Successfully loaded {model_name}!")
154
+ return loaded_interface
155
+ except Exception as e:
156
+ gr.Error(f"Failed to load model: {str(e)}")
157
+ return None
158
+
159
+ # Connect the login button to status update
160
+ login_button.click(
161
+ fn=update_login_status,
162
+ inputs=[login_button],
163
+ outputs=[status_text, is_logged_in]
164
+ )
165
+
166
+ # Connect the load button
167
+ load_button.click(
168
+ fn=load_selected_model,
169
+ inputs=[model_dropdown, is_logged_in],
170
+ outputs=[]
171
+ )
172
+
173
+ # Alternative approach: Direct loading with model selection
174
+ # Uncomment this if you want to use the original approach with modifications
175
+ """
176
+ with gr.Blocks(fill_height=True, theme="Nymbo/Nymbo_Theme", css=custom_css) as demo:
177
+ with gr.Row():
178
+ with gr.Column(scale=1):
179
+ with gr.Group(elem_classes="sidebar"):
180
+ gr.Markdown("# 🚀 Inference Provider")
181
+ gr.Markdown("This Space showcases OpenAI GPT-OSS models. Sign in to use.")
182
+
183
+ model_choice = gr.Radio(
184
+ choices=["openai/gpt-oss-120b", "openai/gpt-oss-20b"],
185
+ value="openai/gpt-oss-120b",
186
+ label="Select Model"
187
+ )
188
+
189
+ button = gr.LoginButton("Sign in")
190
 
191
  with gr.Column(scale=3):
192
  with gr.Group(elem_classes="main-container"):
193
+ # Default to 120b model
194
+ gr.load("models/openai/gpt-oss-120b", accept_token=button, provider="fireworks-ai")
195
+ """
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
  demo.launch()