Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,90 +1,90 @@
|
|
1 |
-
import nltk
|
2 |
-
nltk.download('punkt_tab')
|
3 |
-
nltk.download('averaged_perceptron_tagger_eng')
|
4 |
-
import streamlit as st
|
5 |
-
from streamlit_lottie import st_lottie
|
6 |
-
import requests
|
7 |
-
import time
|
8 |
-
from utils import (
|
9 |
-
get_bing_news_articles,
|
10 |
-
analyze_sentiment,
|
11 |
-
extract_topics,
|
12 |
-
comparative_analysis,
|
13 |
-
convert_text_to_hindi_tts,
|
14 |
-
)
|
15 |
-
from collections import Counter
|
16 |
-
# Load Lottie Animation
|
17 |
-
def load_lottie_url(url):
|
18 |
-
r = requests.get(url)
|
19 |
-
if r.status_code != 200:
|
20 |
-
return None
|
21 |
-
return r.json()
|
22 |
-
|
23 |
-
lottie_animation = load_lottie_url("https://lottie.host/d02e4bd8-cd9c-401e-b143-17fc0ad924a8/o2dLZzU9oO.json")
|
24 |
-
|
25 |
-
# UI Layout
|
26 |
-
st_lottie(lottie_animation, height=200)
|
27 |
-
st.markdown("<h1 style='text-align: center; color: #4CAF50;'>Sentiment Analysis Dashboard</h1>", unsafe_allow_html=True)
|
28 |
-
|
29 |
-
|
30 |
-
st.title("News Summarization & Sentiment Analysis with Hindi TTS")
|
31 |
-
st.write("Enter a company name to fetch news articles, analyze sentiment, and generate a Hindi summary.")
|
32 |
-
|
33 |
-
company = st.text_input("Company Name"
|
34 |
-
|
35 |
-
if st.button("Generate Report"):
|
36 |
-
with st.spinner("Fetching news articles..."):
|
37 |
-
articles = get_bing_news_articles(company, num_articles=10)
|
38 |
-
|
39 |
-
if not articles:
|
40 |
-
st.error("No articles found or there was an error fetching the articles.")
|
41 |
-
else:
|
42 |
-
# Process each article: perform sentiment analysis.
|
43 |
-
for article in articles:
|
44 |
-
combined_text = article["title"]
|
45 |
-
if article["summary"]:
|
46 |
-
combined_text += ". " + article["summary"]
|
47 |
-
sentiment, scores = analyze_sentiment(combined_text)
|
48 |
-
article["sentiment"] = sentiment
|
49 |
-
article["sentiment_scores"] = scores
|
50 |
-
# Topics are still extracted but not used in the final summary.
|
51 |
-
article["topics"] = extract_topics(combined_text)
|
52 |
-
time.sleep(0.5)
|
53 |
-
|
54 |
-
# Display individual article details.
|
55 |
-
st.subheader("Extracted Articles")
|
56 |
-
for idx, article in enumerate(articles, start=1):
|
57 |
-
st.markdown(f"**Article {idx}:**")
|
58 |
-
st.write("Title:", article["title"])
|
59 |
-
st.write("Summary:", article["summary"])
|
60 |
-
st.write("Source:", article["source"])
|
61 |
-
st.write("URL:", article["url"])
|
62 |
-
st.write("Sentiment:", article["sentiment"])
|
63 |
-
st.markdown("---")
|
64 |
-
|
65 |
-
# Perform comparative analysis for internal metrics (sentiment distribution, coverage insights)
|
66 |
-
analysis = comparative_analysis(articles)
|
67 |
-
st.subheader("Comparative Analysis")
|
68 |
-
st.write("**Sentiment Distribution:**", analysis["Sentiment Distribution"])
|
69 |
-
st.write("**Coverage Differences:**", analysis["Coverage Differences"])
|
70 |
-
|
71 |
-
# Create a final Hindi summary report that aggregates all the articles.
|
72 |
-
total_articles = len(articles)
|
73 |
-
dist = analysis["Sentiment Distribution"]
|
74 |
-
final_summary = (
|
75 |
-
f"कुल {total_articles} लेखों में से, {dist.get('Positive', 0)} लेख सकारात्मक, "
|
76 |
-
f"{dist.get('Negative', 0)} लेख नकारात्मक, और {dist.get('Neutral', 0)} लेख तटस्थ हैं।\n"
|
77 |
-
"कई लेखों में विक्रय में वृद्धि और आर्थिक विकास पर जोर दिया गया है, जबकि कुछ लेखों में नियामकीय चुनौतियाँ और कानूनी मुद्दों पर चर्चा की गई है।\n"
|
78 |
-
"संपूर्ण रूप से, यह रिपोर्ट दर्शाती है कि कंपनी का समाचार कवरेज मुख्य रूप से सकारात्मक है, "
|
79 |
-
"जो संभावित आर्थिक विकास के संकेत देता है।"
|
80 |
-
)
|
81 |
-
|
82 |
-
st.subheader("Final Summary Report")
|
83 |
-
st.markdown(final_summary)
|
84 |
-
|
85 |
-
# Convert the final summary into Hindi speech.
|
86 |
-
with st.spinner("Generating Hindi TTS audio..."):
|
87 |
-
audio_file = convert_text_to_hindi_tts(final_summary, output_file="tesla_summary_hi.mp3")
|
88 |
-
|
89 |
-
st.success("Audio summary generated!")
|
90 |
-
st.audio(audio_file)
|
|
|
1 |
+
import nltk
|
2 |
+
nltk.download('punkt_tab')
|
3 |
+
nltk.download('averaged_perceptron_tagger_eng')
|
4 |
+
import streamlit as st
|
5 |
+
from streamlit_lottie import st_lottie
|
6 |
+
import requests
|
7 |
+
import time
|
8 |
+
from utils import (
|
9 |
+
get_bing_news_articles,
|
10 |
+
analyze_sentiment,
|
11 |
+
extract_topics,
|
12 |
+
comparative_analysis,
|
13 |
+
convert_text_to_hindi_tts,
|
14 |
+
)
|
15 |
+
from collections import Counter
|
16 |
+
# Load Lottie Animation
|
17 |
+
def load_lottie_url(url):
|
18 |
+
r = requests.get(url)
|
19 |
+
if r.status_code != 200:
|
20 |
+
return None
|
21 |
+
return r.json()
|
22 |
+
|
23 |
+
lottie_animation = load_lottie_url("https://lottie.host/d02e4bd8-cd9c-401e-b143-17fc0ad924a8/o2dLZzU9oO.json")
|
24 |
+
|
25 |
+
# UI Layout
|
26 |
+
st_lottie(lottie_animation, height=200)
|
27 |
+
st.markdown("<h1 style='text-align: center; color: #4CAF50;'>Sentiment Analysis Dashboard</h1>", unsafe_allow_html=True)
|
28 |
+
|
29 |
+
|
30 |
+
st.title("News Summarization & Sentiment Analysis with Hindi TTS")
|
31 |
+
st.write("Enter a company name to fetch news articles, analyze sentiment, and generate a Hindi summary.")
|
32 |
+
|
33 |
+
company = st.text_input("Company Name")
|
34 |
+
|
35 |
+
if st.button("Generate Report"):
|
36 |
+
with st.spinner("Fetching news articles..."):
|
37 |
+
articles = get_bing_news_articles(company, num_articles=10)
|
38 |
+
|
39 |
+
if not articles:
|
40 |
+
st.error("No articles found or there was an error fetching the articles.")
|
41 |
+
else:
|
42 |
+
# Process each article: perform sentiment analysis.
|
43 |
+
for article in articles:
|
44 |
+
combined_text = article["title"]
|
45 |
+
if article["summary"]:
|
46 |
+
combined_text += ". " + article["summary"]
|
47 |
+
sentiment, scores = analyze_sentiment(combined_text)
|
48 |
+
article["sentiment"] = sentiment
|
49 |
+
article["sentiment_scores"] = scores
|
50 |
+
# Topics are still extracted but not used in the final summary.
|
51 |
+
article["topics"] = extract_topics(combined_text)
|
52 |
+
time.sleep(0.5)
|
53 |
+
|
54 |
+
# Display individual article details.
|
55 |
+
st.subheader("Extracted Articles")
|
56 |
+
for idx, article in enumerate(articles, start=1):
|
57 |
+
st.markdown(f"**Article {idx}:**")
|
58 |
+
st.write("Title:", article["title"])
|
59 |
+
st.write("Summary:", article["summary"])
|
60 |
+
st.write("Source:", article["source"])
|
61 |
+
st.write("URL:", article["url"])
|
62 |
+
st.write("Sentiment:", article["sentiment"])
|
63 |
+
st.markdown("---")
|
64 |
+
|
65 |
+
# Perform comparative analysis for internal metrics (sentiment distribution, coverage insights)
|
66 |
+
analysis = comparative_analysis(articles)
|
67 |
+
st.subheader("Comparative Analysis")
|
68 |
+
st.write("**Sentiment Distribution:**", analysis["Sentiment Distribution"])
|
69 |
+
st.write("**Coverage Differences:**", analysis["Coverage Differences"])
|
70 |
+
|
71 |
+
# Create a final Hindi summary report that aggregates all the articles.
|
72 |
+
total_articles = len(articles)
|
73 |
+
dist = analysis["Sentiment Distribution"]
|
74 |
+
final_summary = (
|
75 |
+
f"कुल {total_articles} लेखों में से, {dist.get('Positive', 0)} लेख सकारात्मक, "
|
76 |
+
f"{dist.get('Negative', 0)} लेख नकारात्मक, और {dist.get('Neutral', 0)} लेख तटस्थ हैं।\n"
|
77 |
+
"कई लेखों में विक्रय में वृद्धि और आर्थिक विकास पर जोर दिया गया है, जबकि कुछ लेखों में नियामकीय चुनौतियाँ और कानूनी मुद्दों पर चर्चा की गई है।\n"
|
78 |
+
"संपूर्ण रूप से, यह रिपोर्ट दर्शाती है कि कंपनी का समाचार कवरेज मुख्य रूप से सकारात्मक है, "
|
79 |
+
"जो संभावित आर्थिक विकास के संकेत देता है।"
|
80 |
+
)
|
81 |
+
|
82 |
+
st.subheader("Final Summary Report")
|
83 |
+
st.markdown(final_summary)
|
84 |
+
|
85 |
+
# Convert the final summary into Hindi speech.
|
86 |
+
with st.spinner("Generating Hindi TTS audio..."):
|
87 |
+
audio_file = convert_text_to_hindi_tts(final_summary, output_file="tesla_summary_hi.mp3")
|
88 |
+
|
89 |
+
st.success("Audio summary generated!")
|
90 |
+
st.audio(audio_file)
|