Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,11 +4,14 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
4 |
import gc
|
5 |
import os
|
6 |
import datetime
|
|
|
7 |
|
|
|
8 |
MODEL_ID = "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B"
|
9 |
MAX_NEW_TOKENS = 512
|
10 |
CPU_THREAD_COUNT = 4 # νμμ μ‘°μ
|
11 |
|
|
|
12 |
# torch.set_num_threads(CPU_THREAD_COUNT)
|
13 |
# os.environ["OMP_NUM_THREADS"] = str(CPU_THREAD_COUNT)
|
14 |
# os.environ["MKL_NUM_THREADS"] = str(CPU_THREAD_COUNT)
|
@@ -18,60 +21,62 @@ print(f"PyTorch version: {torch.__version__}")
|
|
18 |
print(f"Running on device: cpu")
|
19 |
print(f"Torch Threads: {torch.get_num_threads()}")
|
20 |
|
|
|
21 |
print(f"--- Loading Model: {MODEL_ID} ---")
|
22 |
print("This might take a few minutes, especially on the first launch...")
|
23 |
|
24 |
model = None
|
25 |
tokenizer = None
|
|
|
26 |
|
27 |
try:
|
|
|
28 |
model = AutoModelForCausalLM.from_pretrained(
|
29 |
MODEL_ID,
|
30 |
torch_dtype=torch.float32,
|
31 |
device_map="cpu",
|
32 |
-
force_download=True #
|
33 |
)
|
34 |
tokenizer = AutoTokenizer.from_pretrained(
|
35 |
MODEL_ID,
|
36 |
-
force_download=True #
|
37 |
)
|
38 |
model.eval()
|
39 |
-
|
|
|
|
|
40 |
|
|
|
41 |
stop_token_strings = ["<|endofturn|>", "<|stop|>"]
|
42 |
stop_token_ids_list = [tokenizer.convert_tokens_to_ids(token) for token in stop_token_strings]
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
stop_token_ids_list.append(tokenizer.eos_token_id)
|
49 |
-
else:
|
50 |
-
print("Warning: tokenizer.eos_token_id is None. Cannot add to stop tokens.")
|
51 |
-
elif tokenizer.eos_token is None:
|
52 |
-
print("Warning: tokenizer.eos_token is not defined.")
|
53 |
-
|
54 |
|
55 |
stop_token_ids_list = [tid for tid in stop_token_ids_list if tid is not None]
|
56 |
|
57 |
if not stop_token_ids_list:
|
58 |
-
print("Warning: Could not find any stop token IDs.
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
62 |
|
63 |
print(f"Using Stop Token IDs: {stop_token_ids_list}")
|
64 |
|
65 |
except Exception as e:
|
66 |
print(f"!!! Error loading model: {e}")
|
67 |
-
if 'model' in locals() and model is not None:
|
68 |
-
|
69 |
-
if 'tokenizer' in locals() and tokenizer is not None:
|
70 |
-
del tokenizer
|
71 |
gc.collect()
|
72 |
-
|
|
|
73 |
|
74 |
|
|
|
75 |
def get_system_prompt():
|
76 |
current_date = datetime.datetime.now().strftime("%Y-%m-%d (%A)")
|
77 |
return (
|
@@ -80,72 +85,143 @@ def get_system_prompt():
|
|
80 |
f"- μ¬μ©μμ μ§λ¬Έμ λν΄ μΉμ νκ³ μμΈνκ² νκ΅μ΄λ‘ λ΅λ³ν΄μΌ νλ€."
|
81 |
)
|
82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
def predict(message, history):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
system_prompt = get_system_prompt()
|
85 |
|
|
|
86 |
chat_history_formatted = [
|
87 |
{"role": "tool_list", "content": ""},
|
88 |
{"role": "system", "content": system_prompt}
|
89 |
]
|
90 |
-
# history
|
91 |
-
for
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
|
|
95 |
chat_history_formatted.append({"role": "user", "content": message})
|
96 |
|
97 |
inputs = None
|
98 |
output_ids = None
|
99 |
|
100 |
try:
|
101 |
-
# device_map="cpu"λ‘ λͺ¨λΈμ λ‘λνμΌλ―λ‘, inputsλ cpuλ‘ λ³΄λ
λλ€.
|
102 |
inputs = tokenizer.apply_chat_template(
|
103 |
chat_history_formatted,
|
104 |
add_generation_prompt=True,
|
105 |
return_dict=True,
|
106 |
return_tensors="pt"
|
107 |
-
).to("cpu") #
|
108 |
input_length = inputs['input_ids'].shape[1]
|
109 |
print(f"\nInput tokens: {input_length}")
|
110 |
|
111 |
except Exception as e:
|
112 |
print(f"!!! Error applying chat template: {e}")
|
|
|
113 |
return f"μ€λ₯: μ
λ ₯ νμμ μ²λ¦¬νλ μ€ λ¬Έμ κ° λ°μνμ΅λλ€. ({e})"
|
114 |
|
115 |
try:
|
116 |
print("Generating response...")
|
|
|
117 |
with torch.no_grad():
|
118 |
-
# eos_token_idμ 리μ€νΈλ₯Ό μ λ¬νλ κ²μ΄ μΌλ°μ μ
λλ€.
|
119 |
output_ids = model.generate(
|
120 |
**inputs,
|
121 |
max_new_tokens=MAX_NEW_TOKENS,
|
122 |
-
eos_token_id=stop_token_ids_list,
|
123 |
-
pad_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
|
124 |
do_sample=True,
|
125 |
temperature=0.7,
|
126 |
top_p=0.9,
|
127 |
)
|
128 |
-
|
|
|
129 |
|
130 |
except Exception as e:
|
131 |
print(f"!!! Error during model generation: {e}")
|
|
|
132 |
if inputs is not None: del inputs
|
133 |
if output_ids is not None: del output_ids
|
134 |
gc.collect()
|
135 |
return f"μ€λ₯: μλ΅μ μμ±νλ μ€ λ¬Έμ κ° λ°μνμ΅λλ€. ({e})"
|
136 |
|
137 |
-
#
|
|
|
138 |
if output_ids is not None:
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
|
|
146 |
|
147 |
|
148 |
-
#
|
149 |
if inputs is not None: del inputs
|
150 |
if output_ids is not None: del output_ids
|
151 |
gc.collect()
|
@@ -153,15 +229,15 @@ def predict(message, history):
|
|
153 |
|
154 |
return response
|
155 |
|
156 |
-
# --- Gradio Interface ---
|
157 |
print("--- Setting up Gradio Interface ---")
|
158 |
|
159 |
-
# UserWarning ν΄κ²° λ° μ΅μ νμ μ¬μ©
|
160 |
chatbot_component = gr.Chatbot(
|
161 |
label="HyperCLOVA X SEED (0.5B) λν",
|
162 |
bubble_full_width=False,
|
163 |
height=600,
|
164 |
-
type='messages' #
|
165 |
)
|
166 |
|
167 |
examples = [
|
@@ -171,23 +247,34 @@ examples = [
|
|
171 |
["μ μ£Όλ μ¬ν κ³νμ μΈμ°κ³ μλλ°, 3λ° 4μΌ μΆμ² μ½μ€ μ’ μ§μ€λ?"],
|
172 |
]
|
173 |
|
174 |
-
#
|
175 |
demo = gr.ChatInterface(
|
176 |
-
fn=predict,
|
177 |
-
chatbot=chatbot_component,
|
178 |
title="π°π· λ€μ΄λ² HyperCLOVA X SEED (0.5B) λ°λͺ¨",
|
179 |
description=(
|
180 |
f"**λͺ¨λΈ:** {MODEL_ID}\n"
|
181 |
f"**νκ²½:** Hugging Face λ¬΄λ£ CPU (16GB RAM)\n"
|
182 |
-
f"**μ£Όμ:** CPUμμ μ€νλλ―λ‘ μλ΅ μμ±μ λ€μ μκ°μ΄ 걸릴 μ
|
183 |
f"μ΅λ μμ± ν ν° μλ {MAX_NEW_TOKENS}κ°λ‘ μ νλ©λλ€."
|
184 |
),
|
185 |
examples=examples,
|
186 |
-
cache_examples=False,
|
187 |
theme="soft",
|
|
|
188 |
)
|
189 |
|
190 |
-
# --- Launch
|
191 |
if __name__ == "__main__":
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
print("--- Launching Gradio App ---")
|
193 |
-
|
|
|
|
|
|
|
|
|
|
4 |
import gc
|
5 |
import os
|
6 |
import datetime
|
7 |
+
import time
|
8 |
|
9 |
+
# --- Configuration ---
|
10 |
MODEL_ID = "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B"
|
11 |
MAX_NEW_TOKENS = 512
|
12 |
CPU_THREAD_COUNT = 4 # νμμ μ‘°μ
|
13 |
|
14 |
+
# --- Optional: Set CPU Threads ---
|
15 |
# torch.set_num_threads(CPU_THREAD_COUNT)
|
16 |
# os.environ["OMP_NUM_THREADS"] = str(CPU_THREAD_COUNT)
|
17 |
# os.environ["MKL_NUM_THREADS"] = str(CPU_THREAD_COUNT)
|
|
|
21 |
print(f"Running on device: cpu")
|
22 |
print(f"Torch Threads: {torch.get_num_threads()}")
|
23 |
|
24 |
+
# --- Model and Tokenizer Loading ---
|
25 |
print(f"--- Loading Model: {MODEL_ID} ---")
|
26 |
print("This might take a few minutes, especially on the first launch...")
|
27 |
|
28 |
model = None
|
29 |
tokenizer = None
|
30 |
+
load_successful = False
|
31 |
|
32 |
try:
|
33 |
+
start_load_time = time.time()
|
34 |
model = AutoModelForCausalLM.from_pretrained(
|
35 |
MODEL_ID,
|
36 |
torch_dtype=torch.float32,
|
37 |
device_map="cpu",
|
38 |
+
# force_download=True # μ£Όμ μ²λ¦¬. μΊμ λ¬Έμ κ° μλ€λ©΄ λΆνμ
|
39 |
)
|
40 |
tokenizer = AutoTokenizer.from_pretrained(
|
41 |
MODEL_ID,
|
42 |
+
# force_download=True # μ£Όμ μ²λ¦¬
|
43 |
)
|
44 |
model.eval()
|
45 |
+
load_time = time.time() - start_load_time
|
46 |
+
print(f"--- Model and Tokenizer Loaded Successfully on CPU in {load_time:.2f} seconds ---")
|
47 |
+
load_successful = True
|
48 |
|
49 |
+
# --- Stop Token Configuration ---
|
50 |
stop_token_strings = ["<|endofturn|>", "<|stop|>"]
|
51 |
stop_token_ids_list = [tokenizer.convert_tokens_to_ids(token) for token in stop_token_strings]
|
52 |
|
53 |
+
if tokenizer.eos_token_id is not None and tokenizer.eos_token_id not in stop_token_ids_list:
|
54 |
+
stop_token_ids_list.append(tokenizer.eos_token_id)
|
55 |
+
elif tokenizer.eos_token_id is None:
|
56 |
+
print("Warning: tokenizer.eos_token_id is None. Cannot add to stop tokens.")
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
stop_token_ids_list = [tid for tid in stop_token_ids_list if tid is not None]
|
59 |
|
60 |
if not stop_token_ids_list:
|
61 |
+
print("Warning: Could not find any stop token IDs. Using default EOS if available, otherwise generation might not stop correctly.")
|
62 |
+
if tokenizer.eos_token_id is not None:
|
63 |
+
stop_token_ids_list = [tokenizer.eos_token_id]
|
64 |
+
else:
|
65 |
+
print("Error: No stop tokens found, including default EOS. Generation may run indefinitely.")
|
66 |
+
# νμμ μλ¬ μ²λ¦¬ λλ κΈ°λ³Έκ° μ€μ
|
67 |
|
68 |
print(f"Using Stop Token IDs: {stop_token_ids_list}")
|
69 |
|
70 |
except Exception as e:
|
71 |
print(f"!!! Error loading model: {e}")
|
72 |
+
if 'model' in locals() and model is not None: del model
|
73 |
+
if 'tokenizer' in locals() and tokenizer is not None: del tokenizer
|
|
|
|
|
74 |
gc.collect()
|
75 |
+
# μ± μ€ν μ μ λ‘λ© μ€ν¨ μ Gradio μλ¬ λμ νλ‘μΈμ€ μ’
λ£ λλ λ€λ₯Έ μ²λ¦¬ κ³ λ €
|
76 |
+
raise gr.Error(f"Failed to load the model {MODEL_ID}. Cannot start the application. Error: {e}")
|
77 |
|
78 |
|
79 |
+
# --- System Prompt Definition ---
|
80 |
def get_system_prompt():
|
81 |
current_date = datetime.datetime.now().strftime("%Y-%m-%d (%A)")
|
82 |
return (
|
|
|
85 |
f"- μ¬μ©μμ μ§λ¬Έμ λν΄ μΉμ νκ³ μμΈνκ² νκ΅μ΄λ‘ λ΅λ³ν΄μΌ νλ€."
|
86 |
)
|
87 |
|
88 |
+
# --- Warm-up Function ---
|
89 |
+
def warmup_model():
|
90 |
+
if not load_successful or model is None or tokenizer is None:
|
91 |
+
print("Skipping warmup: Model not loaded successfully.")
|
92 |
+
return
|
93 |
+
|
94 |
+
print("--- Starting Model Warm-up ---")
|
95 |
+
try:
|
96 |
+
start_warmup_time = time.time()
|
97 |
+
warmup_message = "μοΏ½οΏ½νμΈμ"
|
98 |
+
system_prompt = get_system_prompt()
|
99 |
+
warmup_chat = [
|
100 |
+
{"role": "tool_list", "content": ""},
|
101 |
+
{"role": "system", "content": system_prompt},
|
102 |
+
{"role": "user", "content": warmup_message}
|
103 |
+
]
|
104 |
+
|
105 |
+
inputs = tokenizer.apply_chat_template(
|
106 |
+
warmup_chat,
|
107 |
+
add_generation_prompt=True,
|
108 |
+
return_dict=True,
|
109 |
+
return_tensors="pt"
|
110 |
+
).to("cpu")
|
111 |
+
|
112 |
+
with torch.no_grad():
|
113 |
+
output_ids = model.generate(
|
114 |
+
**inputs,
|
115 |
+
max_new_tokens=10, # μ§§κ² μμ±νμ¬ μκ° μ μ½
|
116 |
+
eos_token_id=stop_token_ids_list,
|
117 |
+
pad_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
|
118 |
+
do_sample=False # Warm-up μμλ μνλ§ λΆνμ
|
119 |
+
)
|
120 |
+
|
121 |
+
# κ²°κ³Ό λμ½λ© (μ ν μ¬ν, νμΈμ©)
|
122 |
+
# response = tokenizer.decode(output_ids[0, inputs['input_ids'].shape[1]:], skip_special_tokens=True)
|
123 |
+
# print(f"Warm-up response (decoded): {response}")
|
124 |
+
|
125 |
+
del inputs
|
126 |
+
del output_ids
|
127 |
+
gc.collect()
|
128 |
+
warmup_time = time.time() - start_warmup_time
|
129 |
+
print(f"--- Model Warm-up Completed in {warmup_time:.2f} seconds ---")
|
130 |
+
|
131 |
+
except Exception as e:
|
132 |
+
print(f"!!! Error during model warm-up: {e}")
|
133 |
+
# μμ
μ€ν¨κ° μ± μ€νμ λ§μ§λ μλλ‘ μ²λ¦¬
|
134 |
+
finally:
|
135 |
+
gc.collect() # Ensure cleanup even if warmup fails
|
136 |
+
|
137 |
+
|
138 |
+
# --- Inference Function ---
|
139 |
def predict(message, history):
|
140 |
+
"""
|
141 |
+
Generates response using HyperCLOVAX based on user message and chat history.
|
142 |
+
Handles chat formatting, generation, decoding, and memory management.
|
143 |
+
Assumes 'history' is in the Gradio 'messages' format: List[List[str | None | Tuple]] or List[Dict]
|
144 |
+
"""
|
145 |
+
if model is None or tokenizer is None:
|
146 |
+
return "μ€λ₯: λͺ¨λΈμ΄ λ‘λλμ§ μμμ΅λλ€."
|
147 |
+
|
148 |
system_prompt = get_system_prompt()
|
149 |
|
150 |
+
# history νμμ΄ List[Dict] ('messages' format)λΌκ³ κ°μ νκ³ μ²λ¦¬
|
151 |
chat_history_formatted = [
|
152 |
{"role": "tool_list", "content": ""},
|
153 |
{"role": "system", "content": system_prompt}
|
154 |
]
|
155 |
+
# historyλ [{'role': 'user', 'content': '...'}, {'role': 'assistant', 'content': '...'}] νν
|
156 |
+
for turn in history:
|
157 |
+
# historyμ κ° μμκ° λμ
λ리 ννμΈμ§ νμΈ (λ μμ νκ²)
|
158 |
+
if isinstance(turn, dict) and "role" in turn and "content" in turn:
|
159 |
+
chat_history_formatted.append(turn)
|
160 |
+
else:
|
161 |
+
# μμμΉ λͺ»ν νμμ΄ λ€μ΄μ¬ κ²½μ° κ²½κ³ μΆλ ₯ (λλ²κΉ
μ©)
|
162 |
+
print(f"Warning: Unexpected history format item: {turn}")
|
163 |
+
# νμνλ€λ©΄ μ¬κΈ°μ μλ¬ μ²λ¦¬ λλ λ³ν λ‘μ§ μΆκ°
|
164 |
+
|
165 |
|
166 |
+
# Add the latest user message
|
167 |
chat_history_formatted.append({"role": "user", "content": message})
|
168 |
|
169 |
inputs = None
|
170 |
output_ids = None
|
171 |
|
172 |
try:
|
|
|
173 |
inputs = tokenizer.apply_chat_template(
|
174 |
chat_history_formatted,
|
175 |
add_generation_prompt=True,
|
176 |
return_dict=True,
|
177 |
return_tensors="pt"
|
178 |
+
).to("cpu") # Explicitly send to CPU
|
179 |
input_length = inputs['input_ids'].shape[1]
|
180 |
print(f"\nInput tokens: {input_length}")
|
181 |
|
182 |
except Exception as e:
|
183 |
print(f"!!! Error applying chat template: {e}")
|
184 |
+
# Provide feedback to the user
|
185 |
return f"μ€λ₯: μ
λ ₯ νμμ μ²λ¦¬νλ μ€ λ¬Έμ κ° λ°μνμ΅λλ€. ({e})"
|
186 |
|
187 |
try:
|
188 |
print("Generating response...")
|
189 |
+
generation_start_time = time.time()
|
190 |
with torch.no_grad():
|
|
|
191 |
output_ids = model.generate(
|
192 |
**inputs,
|
193 |
max_new_tokens=MAX_NEW_TOKENS,
|
194 |
+
eos_token_id=stop_token_ids_list,
|
195 |
+
pad_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
|
196 |
do_sample=True,
|
197 |
temperature=0.7,
|
198 |
top_p=0.9,
|
199 |
)
|
200 |
+
generation_time = time.time() - generation_start_time
|
201 |
+
print(f"Generation complete in {generation_time:.2f} seconds.")
|
202 |
|
203 |
except Exception as e:
|
204 |
print(f"!!! Error during model generation: {e}")
|
205 |
+
# Clean up potentially large tensors in case of error
|
206 |
if inputs is not None: del inputs
|
207 |
if output_ids is not None: del output_ids
|
208 |
gc.collect()
|
209 |
return f"μ€λ₯: μλ΅μ μμ±νλ μ€ λ¬Έμ κ° λ°μνμ΅λλ€. ({e})"
|
210 |
|
211 |
+
# Decode the response
|
212 |
+
response = "μ€λ₯: μλ΅ μμ±μ μ€ν¨νμ΅λλ€." # κΈ°λ³Έκ°
|
213 |
if output_ids is not None:
|
214 |
+
try:
|
215 |
+
new_tokens = output_ids[0, input_length:]
|
216 |
+
response = tokenizer.decode(new_tokens, skip_special_tokens=True)
|
217 |
+
print(f"Output tokens: {len(new_tokens)}")
|
218 |
+
del new_tokens
|
219 |
+
except Exception as e:
|
220 |
+
print(f"!!! Error decoding response: {e}")
|
221 |
+
response = "μ€λ₯: μλ΅μ λμ½λ©νλ μ€ λ¬Έμ κ° λ°μνμ΅λλ€."
|
222 |
|
223 |
|
224 |
+
# Clean up memory
|
225 |
if inputs is not None: del inputs
|
226 |
if output_ids is not None: del output_ids
|
227 |
gc.collect()
|
|
|
229 |
|
230 |
return response
|
231 |
|
232 |
+
# --- Gradio Interface Setup ---
|
233 |
print("--- Setting up Gradio Interface ---")
|
234 |
|
235 |
+
# type='messages'λ₯Ό λͺ
μνμ¬ UserWarning ν΄κ²° λ° μ΅μ νμ μ¬μ©
|
236 |
chatbot_component = gr.Chatbot(
|
237 |
label="HyperCLOVA X SEED (0.5B) λν",
|
238 |
bubble_full_width=False,
|
239 |
height=600,
|
240 |
+
type='messages' # μ΄ λΆλΆμ λͺ
μνμ¬ ChatInterfaceμμ νΈνμ± ν보
|
241 |
)
|
242 |
|
243 |
examples = [
|
|
|
247 |
["μ μ£Όλ μ¬ν κ³νμ μΈμ°κ³ μλλ°, 3λ° 4μΌ μΆμ² μ½μ€ μ’ μ§μ€λ?"],
|
248 |
]
|
249 |
|
250 |
+
# ChatInterface μμ± μ λΆνμν μΈμ μ κ±°λ¨
|
251 |
demo = gr.ChatInterface(
|
252 |
+
fn=predict, # μμΈ‘ ν¨μ μ°κ²°
|
253 |
+
chatbot=chatbot_component, # Chatbot μ»΄ν¬λνΈ μ¬μ© (type='messages' μ€μ λ¨)
|
254 |
title="π°π· λ€μ΄λ² HyperCLOVA X SEED (0.5B) λ°λͺ¨",
|
255 |
description=(
|
256 |
f"**λͺ¨λΈ:** {MODEL_ID}\n"
|
257 |
f"**νκ²½:** Hugging Face λ¬΄λ£ CPU (16GB RAM)\n"
|
258 |
+
f"**μ£Όμ:** CPUμμ μ€νλλ―λ‘ μλ΅ μμ±μ λ€μ μκ°μ΄ 걸릴 μ μμ΅λλ€. (μμ
μλλ¨)\n"
|
259 |
f"μ΅λ μμ± ν ν° μλ {MAX_NEW_TOKENS}κ°λ‘ μ νλ©λλ€."
|
260 |
),
|
261 |
examples=examples,
|
262 |
+
cache_examples=False, # λ¬΄λ£ ν°μ΄ μΊμ± λΉνμ±ν
|
263 |
theme="soft",
|
264 |
+
# retry_btn, undo_btn, clear_btn λ±μ μ΅μ λ²μ μμ μ§μ μ§μνμ§ μμ
|
265 |
)
|
266 |
|
267 |
+
# --- Application Launch ---
|
268 |
if __name__ == "__main__":
|
269 |
+
# λͺ¨λΈ λ‘λ© μ±κ³΅ μμλ§ μμ
μ€ν
|
270 |
+
if load_successful:
|
271 |
+
warmup_model()
|
272 |
+
else:
|
273 |
+
print("Skipping warm-up because model loading failed.")
|
274 |
+
|
275 |
print("--- Launching Gradio App ---")
|
276 |
+
# queue()λ μ¬λ¬ μ¬μ©μ μ²λ¦¬ λ° κΈ΄ μμ
κ΄λ¦¬μ μ μ©
|
277 |
+
demo.queue().launch(
|
278 |
+
# share=True # κ³΅κ° λ§ν¬ μμ± μ νμ (λ‘κ·ΈμΈ νμν μ μμ)
|
279 |
+
# server_name="0.0.0.0" # λ‘컬 λ€νΈμν¬μμ μ κ·Ό νμ© μ
|
280 |
+
)
|