Ephemeral182 commited on
Commit
0cf15c3
ยท
verified ยท
1 Parent(s): 7f26ef5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -25
app.py CHANGED
@@ -8,9 +8,7 @@ import spaces
8
  import torch
9
  from huggingface_hub import login, whoami
10
 
11
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
12
- # ๅ…จๅฑ€้…็ฝฎ
13
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
14
  HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
15
 
16
  DEFAULT_PIPELINE_PATH = "black-forest-labs/FLUX.1-dev"
@@ -22,7 +20,6 @@ MAX_IMAGE_SIZE = 2048
22
 
23
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
24
 
25
- # ่ฎค่ฏ็Šถๆ€
26
  auth_status = "๐Ÿ”ด Not Authenticated"
27
  if HF_TOKEN:
28
  try:
@@ -34,14 +31,9 @@ if HF_TOKEN:
34
  logging.error(f"HF authentication failed: {e}")
35
  auth_status = f"๐Ÿ”ด Authentication Error: {str(e)}"
36
 
37
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
38
- # ๅœจ GPU ๅญ่ฟ›็จ‹ import ้˜ถๆฎตๅฐฑๆŠŠๅคงๆจกๅž‹่ฏป่ฟ›ๆ˜พๅญ˜
39
- # ๅชๅœจ GPU ่ฟ›็จ‹ๆ‰ง่กŒ๏ผŒCPU ไธป่ฟ›็จ‹่ทณ่ฟ‡
40
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
41
 
42
- # ๆฃ€ๆต‹ๆ˜ฏๅฆๅœจ GPU ็Žฏๅขƒไธญ่ฟ่กŒ
43
  def is_gpu_available():
44
- """ๆฃ€ๆต‹ๅฝ“ๅ‰็Žฏๅขƒๆ˜ฏๅฆๆ”ฏๆŒ GPU"""
45
  try:
46
  import torch
47
  return torch.cuda.is_available()
@@ -84,11 +76,7 @@ if is_gpu_available():
84
 
85
  print("โœ… [GPU init] All models loaded successfully!")
86
 
87
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
88
- # Qwen ๆ็คบ่ฏๅขžๅผบๅ‡ฝๆ•ฐ
89
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
90
  def enhance_prompt_with_qwen(original_prompt):
91
- """ไฝฟ็”จ้ข„ๅŠ ่ฝฝ็š„ Qwen ๆจกๅž‹ๅขžๅผบๆ็คบ่ฏ"""
92
  if not is_gpu_available():
93
  return original_prompt
94
 
@@ -141,7 +129,7 @@ Elaborate on each core requirement to create a rich description.
141
  with torch.no_grad():
142
  generated_ids = QWEN_MODEL.generate(
143
  **model_inputs,
144
- max_new_tokens=512, # โ† ไปŽ 4096 ๆ”นๆˆ 512 ๅคŸ็”จ
145
  temperature=0.6,
146
  top_p=0.9,
147
  do_sample=True,
@@ -165,9 +153,7 @@ Elaborate on each core requirement to create a rich description.
165
  logging.error(f"Qwen enhancement failed: {e}")
166
  return original_prompt
167
 
168
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
169
- # ไธป่ฆ็”Ÿๆˆๅ‡ฝๆ•ฐ๏ผˆไฝฟ็”จ้ข„ๅŠ ่ฝฝ็š„ๆจกๅž‹๏ผ‰
170
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
171
  @spaces.GPU(duration=300)
172
  def generate_poster(
173
  original_prompt,
@@ -221,9 +207,7 @@ def generate_poster(
221
  logging.error(f"Generation failed: {e}")
222
  return None, f"โŒ Generation failed: {str(e)}", ""
223
 
224
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
225
- # Gradio Interface
226
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
227
  def create_interface():
228
  """Create Gradio interface"""
229
 
@@ -257,7 +241,7 @@ def create_interface():
257
 
258
  with gr.Column(scale=1):
259
  gr.Markdown("### 2. Results")
260
- image_output = gr.Image(label="Generated Image", type="pil", show_download_button=True, height=340, container=False, elem_classes=["preserve-aspect-ratio"])
261
  recapped_prompt_output = gr.Textbox(label="Final Prompt Used", lines=5, interactive=False)
262
  status_output = gr.Textbox(label="Status Log", lines=4, interactive=False)
263
 
@@ -271,9 +255,6 @@ def create_interface():
271
 
272
  return demo
273
 
274
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
275
- # ๅฏๅŠจๅบ”็”จ
276
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
277
  if __name__ == "__main__":
278
  demo = create_interface()
279
  demo.launch(
 
8
  import torch
9
  from huggingface_hub import login, whoami
10
 
11
+
 
 
12
  HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
13
 
14
  DEFAULT_PIPELINE_PATH = "black-forest-labs/FLUX.1-dev"
 
20
 
21
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
22
 
 
23
  auth_status = "๐Ÿ”ด Not Authenticated"
24
  if HF_TOKEN:
25
  try:
 
31
  logging.error(f"HF authentication failed: {e}")
32
  auth_status = f"๐Ÿ”ด Authentication Error: {str(e)}"
33
 
 
 
 
 
34
 
35
+
36
  def is_gpu_available():
 
37
  try:
38
  import torch
39
  return torch.cuda.is_available()
 
76
 
77
  print("โœ… [GPU init] All models loaded successfully!")
78
 
 
 
 
79
  def enhance_prompt_with_qwen(original_prompt):
 
80
  if not is_gpu_available():
81
  return original_prompt
82
 
 
129
  with torch.no_grad():
130
  generated_ids = QWEN_MODEL.generate(
131
  **model_inputs,
132
+ max_new_tokens=512,
133
  temperature=0.6,
134
  top_p=0.9,
135
  do_sample=True,
 
153
  logging.error(f"Qwen enhancement failed: {e}")
154
  return original_prompt
155
 
156
+
 
 
157
  @spaces.GPU(duration=300)
158
  def generate_poster(
159
  original_prompt,
 
207
  logging.error(f"Generation failed: {e}")
208
  return None, f"โŒ Generation failed: {str(e)}", ""
209
 
210
+
 
 
211
  def create_interface():
212
  """Create Gradio interface"""
213
 
 
241
 
242
  with gr.Column(scale=1):
243
  gr.Markdown("### 2. Results")
244
+ image_output = gr.Image(label="Generated Image", type="pil", show_download_button=True, height=350, container=False, elem_classes=["preserve-aspect-ratio"])
245
  recapped_prompt_output = gr.Textbox(label="Final Prompt Used", lines=5, interactive=False)
246
  status_output = gr.Textbox(label="Status Log", lines=4, interactive=False)
247
 
 
255
 
256
  return demo
257
 
 
 
 
258
  if __name__ == "__main__":
259
  demo = create_interface()
260
  demo.launch(