charliebaby2023 commited on
Commit
746d932
·
verified ·
1 Parent(s): 288b3a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +221 -8
app.py CHANGED
@@ -9,12 +9,17 @@ from datetime import datetime
9
  from threading import RLock
10
  lock = RLock()
11
  HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
12
- # --- Step 2: Authenticate and fetch your models
13
 
 
 
14
  default_models = models[:howManyModelsToUse]
15
  api = HfApi()
16
  user_info = whoami(token=HF_TOKEN)
17
  username = user_info["name"]
 
 
 
 
18
  from handle_models import load_fn,infer,gen_fn
19
  from externalmod import gr_Interface_load, save_image, randomize_seed
20
  def extend_choices(choices):
@@ -37,7 +42,206 @@ load_fn(models,HF_TOKEN)
37
 
38
 
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  with gr.Blocks(fill_width=True) as demo:
 
 
 
 
 
41
  with gr.Tab(str(num_models) + ' Models'):
42
  with gr.Column(scale=2):
43
  with gr.Group():
@@ -65,20 +269,29 @@ with gr.Blocks(fill_width=True) as demo:
65
  visible=True) for m in default_models]
66
  current_models = [gr.Textbox(m, visible=False) for m in default_models]
67
 
 
 
 
 
68
  for m, o in zip(current_models, output):
69
- gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
70
- inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
71
- concurrency_limit=None, queue=False)
72
-
73
-
74
  with gr.Column(scale=4):
75
  with gr.Accordion('Model selection'):
76
  model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=models, interactive=True)
77
  model_choice.change(update_imgbox, model_choice, output)
78
  model_choice.change(extend_choices, model_choice, current_models)
79
  random_button.click(random_choices, None, model_choice)
80
-
81
- demo.launch(show_api=False, max_threads=400)
 
 
 
 
 
 
82
 
83
 
84
  '''
 
9
  from threading import RLock
10
  lock = RLock()
11
  HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
 
12
 
13
+
14
+ stop_event = asyncio.Event()
15
  default_models = models[:howManyModelsToUse]
16
  api = HfApi()
17
  user_info = whoami(token=HF_TOKEN)
18
  username = user_info["name"]
19
+ print(f"{username}")
20
+ print(f"{username}")
21
+ print(f"{username}")
22
+ print(f"{username}")
23
  from handle_models import load_fn,infer,gen_fn
24
  from externalmod import gr_Interface_load, save_image, randomize_seed
25
  def extend_choices(choices):
 
42
 
43
 
44
 
45
+
46
+
47
+
48
+ # === CONFIG ===
49
+ host = "api-inference.huggingface.co"
50
+ endpoint = "/models/charliebaby2023/cybrony"
51
+ token = HF_TOKEN
52
+ prompt = "a futuristic city on Mars at sunset"
53
+
54
+ # === REQUEST SETUP ===
55
+ body = json.dumps({
56
+ "inputs": prompt
57
+ })
58
+ headers = {
59
+ "Authorization": f"Bearer {token}",
60
+ "Content-Type": "application/json",
61
+ "User-Agent": "PythonRawClient/1.0"
62
+ }
63
+
64
+ # === CONNECTION ===
65
+ context = ssl.create_default_context()
66
+ conn = http.client.HTTPSConnection(host, context=context)
67
+
68
+ # === RAW REQUEST ===
69
+ print("🔸 REQUEST LINE:")
70
+ print(f"POST {endpoint} HTTP/1.1")
71
+ print(f"Host: {host}")
72
+ for key, value in headers.items():
73
+ print(f"{key}: {value}")
74
+ print(f"\n{body}\n")
75
+
76
+ # Send request
77
+ conn.request("POST", endpoint, body=body, headers=headers)
78
+
79
+ # === RAW RESPONSE ===
80
+ response = conn.getresponse()
81
+ print("🔹 STATUS:", response.status, response.reason)
82
+ print("🔹 RESPONSE HEADERS:")
83
+ for hdr in response.getheaders():
84
+ print(f"{hdr[0]}: {hdr[1]}")
85
+ print("\n🔹 RESPONSE BODY (raw):")
86
+ raw = response.read()
87
+ try:
88
+ print(raw.decode("utf-8")[:1000]) # print first 1k chars
89
+ except UnicodeDecodeError:
90
+ print("[binary data]")
91
+
92
+
93
+
94
+ def query_model(model_name,prompt):
95
+ logs = []
96
+ img_out = None
97
+
98
+ host = "api-inference.huggingface.co"
99
+ endpoint = f"/models/{model_name}"
100
+ # Prepare request
101
+ body = json.dumps({"inputs": prompt})
102
+ headers = {
103
+ "Authorization": f"Bearer {token}",
104
+ "Content-Type": "application/json",
105
+ "User-Agent": "PythonRawClient/1.0"
106
+ }
107
+
108
+ # Connect
109
+ context = ssl.create_default_context()
110
+ conn = http.client.HTTPSConnection(host, context=context)
111
+
112
+ logs.append(f"📤 POST {endpoint}")
113
+ logs.append(f"Headers: {headers}")
114
+ logs.append(f"Body: {body}\n")
115
+
116
+ try:
117
+
118
+ conn.request("POST", endpoint, body=body, headers=headers)
119
+ response = conn.getresponse()
120
+
121
+ logs.append(f"📥 Status: {response.status} {response.reason}")
122
+ logs.append("Headers:")
123
+ for k, v in response.getheaders():
124
+ logs.append(f"{k}: {v}")
125
+
126
+ raw = response.read()
127
+
128
+ try:
129
+ text = raw.decode("utf-8")
130
+ result = json.loads(text)
131
+ logs.append("\nBody:\n" + text[:1000])
132
+ except:
133
+ result = raw
134
+ logs.append("\n⚠️ Binary response.")
135
+
136
+ # === HANDLE RESPONSE ===
137
+ def show(img_bytes):
138
+ try:
139
+ img = Image.open(BytesIO(img_bytes))
140
+ return img
141
+ except Exception as e:
142
+ logs.append(f"❌ Failed to open image: {e}")
143
+ return None
144
+
145
+ if isinstance(result, dict):
146
+ if "image" in result:
147
+ logs.append("🧠 Found base64 image in 'image'")
148
+ return show(base64.b64decode(result["image"])), "\n".join(logs)
149
+
150
+ elif "url" in result or "image_url" in result:
151
+ url = result.get("url") or result.get("image_url")
152
+ logs.append(f"🌐 Found image URL: {url}")
153
+ r = requests.get(url)
154
+ return show(r.content), "\n".join(logs)
155
+
156
+ else:
157
+ logs.append("⚠️ No image found in response.")
158
+ return None, "\n".join(logs)
159
+
160
+ elif isinstance(result, bytes):
161
+ logs.append("🧾 Raw image bytes returned.")
162
+ return show(result), "\n".join(logs)
163
+
164
+ else:
165
+ logs.append("❌ Unknown response format.")
166
+ return None, "\n".join(logs)
167
+
168
+ except Exception as e:
169
+ logs.append(f"💥 Exception: {e}")
170
+ return None, "\n".join(logs)
171
+
172
+
173
+ # === GRADIO UI ===
174
+
175
+
176
+
177
+ def query_model2(model_name, prompt):
178
+ logs = []
179
+ img_out = None
180
+ try:
181
+ model = gr.Interface.load(f"models/{model_name}", token=HF_TOKEN)
182
+ logs.append(f"Prompt: {prompt}")
183
+ response = model.predict(prompt)
184
+ logs.append(f"Model response: {response}")
185
+ def get_image_from_response(response):
186
+ if isinstance(response, dict):
187
+ if "image" in response:
188
+ img_data = base64.b64decode(response["image"])
189
+ img = Image.open(BytesIO(img_data))
190
+ return img
191
+ elif "url" in response or "image_url" in response:
192
+ url = response.get("url") or response.get("image_url")
193
+ img_data = requests.get(url).content
194
+ img = Image.open(BytesIO(img_data))
195
+ return img
196
+ elif isinstance(response, bytes):
197
+ img = Image.open(BytesIO(response))
198
+ return img
199
+ return None
200
+ img_out = get_image_from_response(response)
201
+ except Exception as e:
202
+ logs.append(f"Error: {e}")
203
+ response = None
204
+ return img_out, "\n".join(logs)
205
+
206
+
207
+
208
+
209
+
210
+
211
+
212
+
213
+ def debugon():
214
+ print(f"DEBUGGING MODE : ON ")
215
+ logging.basicConfig(level=logging.DEBUG, format='%(message)s')
216
+ error_handler = ErrorCodeLogHandler()
217
+ print(f"{error_handler}")
218
+ logging.getLogger().addHandler(error_handler)
219
+ def debugoff():
220
+ print(f"DEBUGGING MODE : OFF ")
221
+ logging.basicConfig(level=logging.WARNING, format='%(message)s')
222
+ error_handler = ErrorCodeLogHandler()
223
+ print(f"{error_handler}")
224
+ logging.getLogger().addHandler(error_handler)
225
+ def handle_debug_mode(selected_option):
226
+ if selected_option == "debug on":
227
+ debugon()
228
+ else:
229
+ debugoff()
230
+
231
+
232
+
233
+ def stop_all_tasks():
234
+ print("Stopping...")
235
+ stop_event.set()
236
+
237
+
238
+
239
  with gr.Blocks(fill_width=True) as demo:
240
+ with gr.Tab(label="DEBUG"):
241
+ with gr.Row():
242
+ radio = gr.Radio(["debug on", "debug off"], value="debug off", label=" Debug mode: activated in output log", interactive=True)
243
+ radio.change(handle_debug_mode, radio, None)
244
+
245
  with gr.Tab(str(num_models) + ' Models'):
246
  with gr.Column(scale=2):
247
  with gr.Group():
 
269
  visible=True) for m in default_models]
270
  current_models = [gr.Textbox(m, visible=False) for m in default_models]
271
 
272
+ #for m, o in zip(current_models, output):
273
+ # gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
274
+ # inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
275
+ # concurrency_limit=None, queue=False)
276
  for m, o in zip(current_models, output):
277
+ gen_button.click( fn=gen_fn, inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed, timeout=120], outputs=[o],queue=False)
278
+ #concurrency_limit=None,
279
+ txt_input.submit( fn=gen_fn, inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed, timeout=120], outputs=[o],queue=False)
280
+
 
281
  with gr.Column(scale=4):
282
  with gr.Accordion('Model selection'):
283
  model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=models, interactive=True)
284
  model_choice.change(update_imgbox, model_choice, output)
285
  model_choice.change(extend_choices, model_choice, current_models)
286
  random_button.click(random_choices, None, model_choice)
287
+ stop_button = gr.Button("Stop 🛑", variant="stop")
288
+ stop_button.click(
289
+ fn=stop_all_tasks,
290
+ inputs=[],
291
+ outputs=[]
292
+ )
293
+
294
+ demo.launch(show_api=True, max_threads=400)
295
 
296
 
297
  '''