Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -87,15 +87,13 @@ import torch
|
|
87 |
import re
|
88 |
import time
|
89 |
|
90 |
-
#
|
91 |
torch.set_num_threads(1)
|
92 |
|
93 |
app = FastAPI()
|
94 |
|
95 |
@app.on_event("startup")
|
96 |
def startup_event():
|
97 |
-
print("🔁 Loading model...")
|
98 |
-
|
99 |
global tokenizer, model, device
|
100 |
|
101 |
model_path = "longvnhue1/facebook-m2m100_418M-fine_tuning"
|
@@ -105,7 +103,7 @@ def startup_event():
|
|
105 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
106 |
model.to(device)
|
107 |
|
108 |
-
print("
|
109 |
|
110 |
def split_by_words_and_dot(text, min_words=125, max_words=160, fallback_words=150):
|
111 |
words = re.findall(r'\S+|\n', text) # giữ nguyên \n như một "từ"
|
|
|
87 |
import re
|
88 |
import time
|
89 |
|
90 |
+
# Limit CPU thread
|
91 |
torch.set_num_threads(1)
|
92 |
|
93 |
app = FastAPI()
|
94 |
|
95 |
@app.on_event("startup")
|
96 |
def startup_event():
|
|
|
|
|
97 |
global tokenizer, model, device
|
98 |
|
99 |
model_path = "longvnhue1/facebook-m2m100_418M-fine_tuning"
|
|
|
103 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
104 |
model.to(device)
|
105 |
|
106 |
+
print("Model loaded and ready.")
|
107 |
|
108 |
def split_by_words_and_dot(text, min_words=125, max_words=160, fallback_words=150):
|
109 |
words = re.findall(r'\S+|\n', text) # giữ nguyên \n như một "từ"
|