Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ from urllib.parse import urlparse
|
|
16 |
from sklearn.cluster import AgglomerativeClustering
|
17 |
|
18 |
# Step 1: Set AssemblyAI API Key
|
19 |
-
aai.settings.api_key = "
|
20 |
transcriber = aai.Transcriber()
|
21 |
|
22 |
def transcribe_audio(audio_file_path):
|
@@ -52,7 +52,16 @@ def summarize_text(text, source_language, target_language):
|
|
52 |
return summary
|
53 |
|
54 |
# Step 4: Key Points Extraction with spaCy
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
def extract_key_points(text):
|
58 |
doc = nlp(text)
|
@@ -198,28 +207,4 @@ def process_meeting(file, url, language):
|
|
198 |
|
199 |
return transcription, translated_text, key_points, summary, speaker_details, sentiment
|
200 |
|
201 |
-
# Step 9: Launch Gradio
|
202 |
-
iface = gr.Interface(
|
203 |
-
fn=process_meeting,
|
204 |
-
inputs=[
|
205 |
-
gr.File(label="Upload Meeting Recording"),
|
206 |
-
gr.Textbox(label="Enter Meeting URL"),
|
207 |
-
gr.Radio(["english", "urdu"], label="Select Summary Language")
|
208 |
-
],
|
209 |
-
outputs=[
|
210 |
-
gr.Textbox(label="Transcription", lines=20),
|
211 |
-
gr.Textbox(label="Translated Text", lines=20),
|
212 |
-
gr.Textbox(label="Key Points", lines=20),
|
213 |
-
gr.Textbox(label="Summary", lines=20),
|
214 |
-
gr.Textbox(label="Speakers", lines=20),
|
215 |
-
gr.Textbox(label="Sentiment", lines=1)
|
216 |
-
],
|
217 |
-
title="Smart AI Meeting Assistant",
|
218 |
-
description="""
|
219 |
-
<div style='text-align: center;'>by Ayesha Ameen & Sana Sadiq</div>
|
220 |
-
<br>Upload your meeting recording or enter a publicly accessible URL and choose the summary language (English or Urdu).
|
221 |
-
""",
|
222 |
-
)
|
223 |
-
|
224 |
-
if __name__ == "__main__":
|
225 |
-
iface.launch(share=True, debug=True)
|
|
|
16 |
from sklearn.cluster import AgglomerativeClustering
|
17 |
|
18 |
# Step 1: Set AssemblyAI API Key
|
19 |
+
aai.settings.api_key = "00f66859f24e4cefa15c9beefa13e4ce"
|
20 |
transcriber = aai.Transcriber()
|
21 |
|
22 |
def transcribe_audio(audio_file_path):
|
|
|
52 |
return summary
|
53 |
|
54 |
# Step 4: Key Points Extraction with spaCy
|
55 |
+
def ensure_spacy_model():
|
56 |
+
try:
|
57 |
+
nlp = spacy.load("en_core_web_sm")
|
58 |
+
except OSError:
|
59 |
+
from spacy.cli import download
|
60 |
+
download("en_core_web_sm")
|
61 |
+
nlp = spacy.load("en_core_web_sm")
|
62 |
+
return nlp
|
63 |
+
|
64 |
+
nlp = ensure_spacy_model()
|
65 |
|
66 |
def extract_key_points(text):
|
67 |
doc = nlp(text)
|
|
|
207 |
|
208 |
return transcription, translated_text, key_points, summary, speaker_details, sentiment
|
209 |
|
210 |
+
# Step 9: Launch Gradio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|