Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -25,6 +25,14 @@ torch.set_num_threads(4)
|
|
| 25 |
torch.backends.mkldnn.enabled = True
|
| 26 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
@app.post("/process")
|
| 29 |
async def transcribe_audio(file: UploadFile = File(...)):
|
| 30 |
# Geçici dosya oluştur
|
|
@@ -53,9 +61,9 @@ async def transcribe_audio(file: UploadFile = File(...)):
|
|
| 53 |
|
| 54 |
|
| 55 |
logging.info(f"Response: {response}")
|
| 56 |
-
|
| 57 |
|
| 58 |
-
|
|
|
|
| 59 |
|
| 60 |
#parts = generated_text.split("\n", 1)
|
| 61 |
|
|
@@ -88,8 +96,7 @@ async def get_message(request: Request):
|
|
| 88 |
|
| 89 |
|
| 90 |
|
| 91 |
-
generated_text_msg = response_msg[0]['generated_text']
|
| 92 |
-
|
| 93 |
|
| 94 |
return generated_text_msg
|
| 95 |
|
|
|
|
| 25 |
torch.backends.mkldnn.enabled = True
|
| 26 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 27 |
|
| 28 |
+
|
| 29 |
+
def extract_assistant_response(text):
|
| 30 |
+
# Metni "<|assistant|>" ifadesine göre böl
|
| 31 |
+
parts = text.split("<|assistant|>")
|
| 32 |
+
|
| 33 |
+
# Eğer "<|assistant|>" ifadesi varsa, ondan sonrasını döndür
|
| 34 |
+
return parts[1].strip() if len(parts) > 1 else ""
|
| 35 |
+
|
| 36 |
@app.post("/process")
|
| 37 |
async def transcribe_audio(file: UploadFile = File(...)):
|
| 38 |
# Geçici dosya oluştur
|
|
|
|
| 61 |
|
| 62 |
|
| 63 |
logging.info(f"Response: {response}")
|
|
|
|
| 64 |
|
| 65 |
+
|
| 66 |
+
generated_text = extract_assistant_response(response[0]['generated_text'])
|
| 67 |
|
| 68 |
#parts = generated_text.split("\n", 1)
|
| 69 |
|
|
|
|
| 96 |
|
| 97 |
|
| 98 |
|
| 99 |
+
generated_text_msg = extract_assistant_response(response_msg[0]['generated_text'])
|
|
|
|
| 100 |
|
| 101 |
return generated_text_msg
|
| 102 |
|