Spaces:
Sleeping
Sleeping
adding files
Browse files- .DS_Store +0 -0
- Dockerfile +11 -0
- LICENSE +21 -0
- README.md +54 -5
- app.py +186 -0
- data/Instagram_transurban_phantombuster.csv +18 -0
- data/LinkedIn_transurban_phantombuster.csv +50 -0
- data/Twitter_transurban_phantombuster.csv +19 -0
- requirements.txt +23 -0
- tools/.DS_Store +0 -0
- tools/__pycache__/sentiment_analysis_util.cpython-311.pyc +0 -0
- tools/sentiment_analysis_util.py +306 -0
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
Dockerfile
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9
|
| 2 |
+
RUN useradd -m -u 1000 user
|
| 3 |
+
USER user
|
| 4 |
+
ENV HOME=/home/user \
|
| 5 |
+
PATH=/home/user/.local/bin:$PATH
|
| 6 |
+
WORKDIR $HOME/app
|
| 7 |
+
COPY --chown=user . $HOME/app
|
| 8 |
+
COPY ./requirements.txt ~/app/requirements.txt
|
| 9 |
+
RUN pip install -r requirements.txt
|
| 10 |
+
COPY . .
|
| 11 |
+
CMD ["streamlit", "run", "app.py", "--port", "7860"]
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024 Katerina Gawthorpe
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,10 +1,59 @@
|
|
| 1 |
---
|
| 2 |
title: NewsSearch
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
colorTo: green
|
| 6 |
-
sdk:
|
| 7 |
-
|
| 8 |
---
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
title: NewsSearch
|
| 3 |
+
emoji: 📰
|
| 4 |
+
colorFrom: blue
|
| 5 |
colorTo: green
|
| 6 |
+
sdk: streamlit
|
| 7 |
+
sdk_version: 1.37.1
|
| 8 |
---
|
| 9 |
|
| 10 |
+
# 🔍 NewsSearch
|
| 11 |
+
----
|
| 12 |
+
NewsSearch is a comprehensive news aggregation and analysis tool that helps you gather and analyze news from multiple sources. This app enables you to search for any topic and get relevant news articles, discussions, and sentiment analysis from various platforms.
|
| 13 |
+
|
| 14 |
+
### 🚀 Features
|
| 15 |
+
|
| 16 |
+
- **Multi-Source News Aggregation**: Collect news from various sources including:
|
| 17 |
+
- News websites and articles
|
| 18 |
+
- Reddit discussions
|
| 19 |
+
- Social media mentions
|
| 20 |
+
|
| 21 |
+
- **Smart Analysis**:
|
| 22 |
+
- Sentiment analysis of news coverage
|
| 23 |
+
- Topic relevance scoring
|
| 24 |
+
- Trending topics identification
|
| 25 |
+
|
| 26 |
+
- **User-Friendly Interface**:
|
| 27 |
+
- Clean, intuitive design
|
| 28 |
+
- Easy-to-read article summaries
|
| 29 |
+
- Quick access to original sources
|
| 30 |
+
|
| 31 |
+
### 🧑💻 Usage
|
| 32 |
+
|
| 33 |
+
1. Enter your search topic in the sidebar
|
| 34 |
+
2. Choose your preferred news sources
|
| 35 |
+
3. View aggregated results including:
|
| 36 |
+
- Latest news articles
|
| 37 |
+
- Reddit discussions
|
| 38 |
+
- Sentiment analysis
|
| 39 |
+
- Related topics
|
| 40 |
+
|
| 41 |
+
### 📊 Results
|
| 42 |
+
|
| 43 |
+
For each search, you'll get:
|
| 44 |
+
- Direct links to news articles
|
| 45 |
+
- Summary of key points
|
| 46 |
+
- Sentiment indicators
|
| 47 |
+
- Source credibility ratings
|
| 48 |
+
- Related discussions
|
| 49 |
+
|
| 50 |
+
### 💬 Feedback
|
| 51 |
+
|
| 52 |
+
For questions, suggestions, or feedback, please contact [Your Contact Information].
|
| 53 |
+
|
| 54 |
+
### 🔄 Updates
|
| 55 |
+
|
| 56 |
+
Check back regularly for new features and improvements to the news search and analysis capabilities.
|
| 57 |
+
|
| 58 |
+
---
|
| 59 |
+
📰 Start exploring news now!
|
app.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from openai import OpenAI
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from langchain_openai import ChatOpenAI
|
| 4 |
+
from tools import sentiment_analysis_util
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
st.set_page_config(page_title="LangChain Agent", layout="wide")
|
| 11 |
+
load_dotenv()
|
| 12 |
+
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
|
| 13 |
+
|
| 14 |
+
from langchain_core.runnables import RunnableConfig
|
| 15 |
+
|
| 16 |
+
st.title("💬 News Search")
|
| 17 |
+
st.image('el_pic.png')
|
| 18 |
+
|
| 19 |
+
#@st.cache_resource
|
| 20 |
+
if "messages" not in st.session_state:
|
| 21 |
+
st.session_state["messages"] = [{"role":"system", "content":"""💬 How can I help you?"""}]
|
| 22 |
+
|
| 23 |
+
# Display all previous messages
|
| 24 |
+
for msg in st.session_state.messages:
|
| 25 |
+
st.chat_message(msg["role"]).write(msg["content"])
|
| 26 |
+
|
| 27 |
+
#initialize_session_state()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
sideb=st.sidebar
|
| 31 |
+
with st.sidebar:
|
| 32 |
+
prompt=st.text_input("Enter topic for sentiment analysis: ")
|
| 33 |
+
|
| 34 |
+
check1=sideb.button(f"analyze {prompt}")
|
| 35 |
+
|
| 36 |
+
if check1:
|
| 37 |
+
# Add user message to chat history
|
| 38 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 39 |
+
# Display user message in chat message container
|
| 40 |
+
with st.chat_message("user"):
|
| 41 |
+
st.markdown(prompt)
|
| 42 |
+
|
| 43 |
+
# ========================== Sentiment analysis
|
| 44 |
+
#Perform sentiment analysis on the cryptocurrency news & predict dominant sentiment along with plotting the sentiment breakdown chart
|
| 45 |
+
# Downloading from reddit
|
| 46 |
+
|
| 47 |
+
# Downloading from alpaca
|
| 48 |
+
if len(prompt.split(' '))<3:
|
| 49 |
+
st.write('I am analyzing Google News ...')
|
| 50 |
+
news_articles = sentiment_analysis_util.fetch_news(str(prompt))
|
| 51 |
+
st.write('Now, I am analyzing Reddit ...')
|
| 52 |
+
reddit_news_articles=sentiment_analysis_util.fetch_reddit_news(prompt)
|
| 53 |
+
# Fetch news articles
|
| 54 |
+
tavily_news_articles = sentiment_analysis_util.fetch_tavily_news(prompt)
|
| 55 |
+
|
| 56 |
+
# Handle empty results
|
| 57 |
+
if not tavily_news_articles:
|
| 58 |
+
print("No news articles found. Try adjusting your search terms.")
|
| 59 |
+
else:
|
| 60 |
+
# Process the articles
|
| 61 |
+
for url in tavily_news_articles:
|
| 62 |
+
try:
|
| 63 |
+
# Your existing article processing code
|
| 64 |
+
st.write(f"Article URL: {url}")
|
| 65 |
+
# ... rest of your processing ...
|
| 66 |
+
except Exception as e:
|
| 67 |
+
st.error(f"Error processing article {url}: {e}")
|
| 68 |
+
continue
|
| 69 |
+
analysis_results = []
|
| 70 |
+
|
| 71 |
+
#Perform sentiment analysis for each product review
|
| 72 |
+
if len(prompt.split(' '))<3:
|
| 73 |
+
for article in news_articles:
|
| 74 |
+
if prompt.lower()[0:6] in article['News_Article'].lower():
|
| 75 |
+
sentiment_analysis_result = sentiment_analysis_util.analyze_sentiment(article['News_Article'])
|
| 76 |
+
|
| 77 |
+
# Display sentiment analysis results
|
| 78 |
+
#print(f'News Article: {sentiment_analysis_result["News_Article"]} : Sentiment: {sentiment_analysis_result["Sentiment"]}', '\n')
|
| 79 |
+
|
| 80 |
+
result = {
|
| 81 |
+
'News_Article': sentiment_analysis_result["News_Article"],
|
| 82 |
+
'Sentiment': sentiment_analysis_result["Sentiment"][0]['label'],
|
| 83 |
+
'Index': sentiment_analysis_result["Sentiment"][0]['score'],
|
| 84 |
+
'URL': article['URL']
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
analysis_results.append(result)
|
| 88 |
+
|
| 89 |
+
articles_url=[]
|
| 90 |
+
for article in reddit_news_articles:
|
| 91 |
+
if prompt.lower()[0:6] in article.lower():
|
| 92 |
+
sentiment_analysis_result_reddit = sentiment_analysis_util.analyze_sentiment(article)
|
| 93 |
+
|
| 94 |
+
# Display sentiment analysis results
|
| 95 |
+
#print(f'News Article: {sentiment_analysis_result_reddit["News_Article"]} : Sentiment: {sentiment_analysis_result_reddit["Sentiment"]}', '\n')
|
| 96 |
+
|
| 97 |
+
result = {
|
| 98 |
+
'News_Article': sentiment_analysis_result_reddit["News_Article"],
|
| 99 |
+
'Index':np.round(sentiment_analysis_result_reddit["Sentiment"][0]['score'],2)
|
| 100 |
+
}
|
| 101 |
+
analysis_results.append(np.append(result,np.append(article.split('URL:')[-1:], ((article.split('Date: ')[-1:])[0][0:10]))))
|
| 102 |
+
|
| 103 |
+
for article in tavily_news_articles:
|
| 104 |
+
if prompt.lower()[0:5] in article:
|
| 105 |
+
sentiment_analysis_result_tavily = sentiment_analysis_util.analyze_sentiment(article)
|
| 106 |
+
|
| 107 |
+
# Display sentiment analysis results
|
| 108 |
+
#print(f'News Article: {sentiment_analysis_result_tavily["News_Article"]} : Sentiment: {sentiment_analysis_result_tavily["Sentiment"]}', '\n')
|
| 109 |
+
|
| 110 |
+
result = {
|
| 111 |
+
'News_Article': sentiment_analysis_result_tavily["News_Article"],
|
| 112 |
+
'Index':np.round(sentiment_analysis_result_tavily["Sentiment"][0]['score'],2)
|
| 113 |
+
}
|
| 114 |
+
analysis_results.append(np.append(result,np.append(article.split('URL:')[-1:], ((article.split('Date: ')[-1:])[0][0:10]))))
|
| 115 |
+
print('is_present tavily 2',analysis_results)
|
| 116 |
+
# #LinkedIn and Twitter previously downloaded from phantombuster
|
| 117 |
+
# st.write('Teď analyzuji data z LinkedInu a Twitteru ...')
|
| 118 |
+
# df=pd.read_csv('./data/LinkedIn_transurban_phantombuster.csv',index_col='postTimestamp',parse_dates=True,infer_datetime_format=True)
|
| 119 |
+
# df=df.sort_index(ascending=False)
|
| 120 |
+
# df=df.dropna()
|
| 121 |
+
# from tools import sentiment_analysis_util
|
| 122 |
+
# for linkedin_news in df['postContent']:
|
| 123 |
+
# print(linkedin_news)
|
| 124 |
+
# news_article={
|
| 125 |
+
# 'News_Article': linkedin_news,
|
| 126 |
+
# 'URL': df.loc[df['postContent']==linkedin_news]['postUrl'][0],
|
| 127 |
+
# 'date': df.loc[df['postContent']==linkedin_news].index[0]}
|
| 128 |
+
# if prompt.lower()[0:6] in linkedin_news.lower():
|
| 129 |
+
# sentiment_analysis_result = sentiment_analysis_util.analyze_sentiment(news_article)
|
| 130 |
+
|
| 131 |
+
# news_article["Sentiment"]=sentiment_analysis_result["Sentiment"][0]['label']
|
| 132 |
+
# news_article["Index"]=sentiment_analysis_result["Sentiment"][0]['score']
|
| 133 |
+
|
| 134 |
+
# analysis_results.append(news_article)
|
| 135 |
+
|
| 136 |
+
# count=0
|
| 137 |
+
# df=pd.read_csv('./data/Twitter_transurban_phantombuster.csv',index_col='tweetDate',parse_dates=True,infer_datetime_format=True)
|
| 138 |
+
# df=df.sort_index(ascending=False)
|
| 139 |
+
# df=df.dropna()
|
| 140 |
+
# from tools import sentiment_analysis_util
|
| 141 |
+
# for twitter_news in df['text']:
|
| 142 |
+
# print(twitter_news)
|
| 143 |
+
# news_article={
|
| 144 |
+
# 'News_Article': twitter_news,
|
| 145 |
+
# 'URL': df['tweetLink'][count],
|
| 146 |
+
# 'date': df.iloc[count:count+1,:].index[0]}
|
| 147 |
+
# if prompt.lower()[0:6] in twitter_news.lower():
|
| 148 |
+
# sentiment_analysis_result = sentiment_analysis_util.analyze_sentiment(news_article)
|
| 149 |
+
# news_article["Sentiment"]=sentiment_analysis_result["Sentiment"][0]['label']
|
| 150 |
+
# news_article["Index"]=sentiment_analysis_result["Sentiment"][0]['score']
|
| 151 |
+
|
| 152 |
+
# analysis_results.append(news_article)
|
| 153 |
+
# count+=1
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
#Generate summarized message rationalize dominant sentiment
|
| 157 |
+
#st.write(analysis_results)
|
| 158 |
+
summary = sentiment_analysis_util.generate_summary_of_sentiment(analysis_results) #, dominant_sentiment)
|
| 159 |
+
st.chat_message("assistant").write((summary))
|
| 160 |
+
st.session_state.messages.append({"role": "assistant", "content": summary})
|
| 161 |
+
#answers=np.append(res["messages"][-1].content,summary)
|
| 162 |
+
|
| 163 |
+
client = ChatOpenAI(model="gpt-4o",api_key=OPENAI_API_KEY)
|
| 164 |
+
|
| 165 |
+
if "openai_model" not in st.session_state:
|
| 166 |
+
st.session_state["openai_model"] = "gpt-4o"
|
| 167 |
+
|
| 168 |
+
if prompt := st.chat_input("Any other questions? "):
|
| 169 |
+
# Add user message to chat history
|
| 170 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 171 |
+
# Display user message in chat message container
|
| 172 |
+
with st.chat_message("user"):
|
| 173 |
+
st.markdown(prompt)
|
| 174 |
+
# Display assistant response in chat message container
|
| 175 |
+
with st.chat_message("assistant"):
|
| 176 |
+
stream = client.chat.completions.create(
|
| 177 |
+
model=st.session_state["openai_model"],
|
| 178 |
+
messages=[
|
| 179 |
+
{"role": m["role"], "content": m["content"]}
|
| 180 |
+
for m in st.session_state.messages
|
| 181 |
+
],
|
| 182 |
+
stream=True,
|
| 183 |
+
)
|
| 184 |
+
response = st.write_stream(stream)
|
| 185 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 186 |
+
|
data/Instagram_transurban_phantombuster.csv
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
postUrl,description,commentCount,likeCount,location,locationId,pubDate,likedByViewer,isSidecar,type,caption,profileUrl,username,fullName,imgUrl,postId,query,timestamp,sidecarMedias
|
| 2 |
+
https://www.instagram.com/p/DCBe77wPFVs/,Just taking in the view.,1,11,,,2024-11-06T08:27:06.000Z,false,false,Photo,"Photo by Transurban on November 06, 2024. Kan een afbeelding zijn van tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams4-1.cdninstagram.com/v/t51.2885-15/465804089_573051211874695_7956160915238081737_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams4-1.cdninstagram.com&_nc_cat=107&_nc_ohc=Msbz53hR6-QQ7kNvgEoVR3p&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ5NTIxMDg0NTgyMDcwNDEwOA%3D%3D.3-ccb7-5&oh=00_AYDILlXQ00qBFFtWj5y8FtDlNIT89EDfy49zqN2USPAX8w&oe=67327CFF&_nc_sid=ee9879,3495210845820704108,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,
|
| 3 |
+
https://www.instagram.com/p/DByGveBsO9C/,"Blue trees x NorthConnex
|
| 4 |
+
|
| 5 |
+
Skipping 21 sets of lights for 4 years.",1,9,,,2024-10-31T09:07:04.000Z,false,false,Photo,"Photo by Transurban on October 31, 2024. Kan een afbeelding zijn van terminal en tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams4-1.cdninstagram.com/v/t51.2885-15/465067084_1090410276013819_5453122601792700045_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams4-1.cdninstagram.com&_nc_cat=107&_nc_ohc=UcSFIJhANk0Q7kNvgHUxrX5&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ5MDg4MjMxMTUwMDY1NjQ1MA%3D%3D.3-ccb7-5&oh=00_AYAWxg-aNNAtWP1ckWsYpz5CAc3e-XbFUVYTRANCE0oZug&oe=67328E58&_nc_sid=ee9879,3490882311500656450,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,
|
| 6 |
+
https://www.instagram.com/p/DBvcAduNB5R/,'Tis greener on the other side.,1,14,Gateway Bridge,1660123460959642,2024-10-30T08:15:11.000Z,false,true,Photo,"Photo by Transurban on October 30, 2024. Kan een afbeelding zijn van 1 persoon, brug en tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams2-1.cdninstagram.com/v/t51.2885-15/465091223_394115483773302_4945461272489960419_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams2-1.cdninstagram.com&_nc_cat=106&_nc_ohc=fKzT32BWEIMQ7kNvgHKcMM3&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ5MDEzMTM2MjgzNDY1NDAzNg%3D%3D.3-ccb7-5&oh=00_AYAGtm-VzmfWZtyqWslaXbVOJ9FkcTz6teYBAiMczrEudg&oe=67328344&_nc_sid=ee9879,3490131362834654036,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,2
|
| 7 |
+
https://www.instagram.com/p/DBvcAduNB5R/,'Tis greener on the other side.,1,14,Gateway Bridge,1660123460959642,2024-10-30T08:15:11.000Z,false,true,Photo,"Photo by Transurban on October 30, 2024. Kan een afbeelding zijn van 1 persoon, brug en tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams4-1.cdninstagram.com/v/t51.2885-15/465004252_570230218855023_772004561391457879_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams4-1.cdninstagram.com&_nc_cat=103&_nc_ohc=vZ_7Ws5AoLoQ7kNvgEZKVWw&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ5MDEzMTM5NDE1NzY4MTM0MA%3D%3D.3-ccb7-5&oh=00_AYCHCUfFiMMowNq1jxPc9MM2pHNToUoTDhhit8sYcd2LOw&oe=67328141&_nc_sid=ee9879,3490131394157681340,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,2
|
| 8 |
+
https://www.instagram.com/p/DBs87BKtBTp/,We're watching the road 24/7.,1,9,,,2024-10-29T09:05:04.000Z,false,false,Photo,"Photo by Transurban on October 29, 2024. Kan een afbeelding zijn van 1 persoon, scherm, radar en tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams2-1.cdninstagram.com/v/t51.2885-15/464984369_1062477795519716_2450852666003621794_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams2-1.cdninstagram.com&_nc_cat=108&_nc_ohc=Om3r0wA6epUQ7kNvgH3V7eo&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ4OTQzMTc0OTc5ODQwMTI1Nw%3D%3D.3-ccb7-5&oh=00_AYAZI7gZ9E52sGC2uUVvVoh1T9lKUaW2h0Ov64lK8EPQcA&oe=673270C9&_nc_sid=ee9879,3489431749798401257,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,
|
| 9 |
+
https://www.instagram.com/p/DBgEZwFJs_N/,Stop feeding the birds.,3,14,The Big Cheese stick,287680039,2024-10-24T09:00:19.000Z,false,false,Photo,"Photo by Transurban in The Big Cheese stick. Kan een afbeelding zijn van weg, straat, schemering, lantaarnpaal en tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams2-1.cdninstagram.com/v/t51.2885-15/463998702_28204362599162672_5721384923876325116_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams2-1.cdninstagram.com&_nc_cat=108&_nc_ohc=_84FiPCUyX4Q7kNvgERwIgy&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ4NTgwNTQ3MzM4Mzc2MzkxNw%3D%3D.3-ccb7-5&oh=00_AYAVr0Pk6tSas8tKcaMaA9iTjN3Xz9MAPJWiuQnn4_IoWQ&oe=6732780C&_nc_sid=ee9879,3485805473383763917,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,
|
| 10 |
+
https://www.instagram.com/p/DA-qK-rtxUR/,Iconic drive.,4,11,Sound Tube,10224061,2024-10-11T09:36:09.000Z,false,true,Photo,"Photo by Transurban on October 11, 2024. Kan een afbeelding zijn van limousine, nacht en tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams4-1.cdninstagram.com/v/t51.2885-15/462862786_536696355768739_8931578429193952708_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams4-1.cdninstagram.com&_nc_cat=107&_nc_ohc=5jiISu3ghyIQ7kNvgGqP7kl&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ3NjQwMTM4OTk3NjI2MjA3Mw%3D%3D.3-ccb7-5&oh=00_AYBGed8tHXTcW95RWDnADsG1Mis4MwsWZso75hEVO21a0A&oe=673267AA&_nc_sid=ee9879,3476401389976262073,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,2
|
| 11 |
+
https://www.instagram.com/p/DA-qK-rtxUR/,Iconic drive.,4,11,Sound Tube,10224061,2024-10-11T09:36:09.000Z,false,true,Photo,"Photo by Transurban on October 11, 2024. Kan een afbeelding zijn van limousine, nacht en tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams4-1.cdninstagram.com/v/t51.2885-15/462744719_539689995352838_3635766472495674316_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams4-1.cdninstagram.com&_nc_cat=102&_nc_ohc=AmclIxdvmIwQ7kNvgFS0brq&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ3NjQwMTQxNTE1ODc5NDk5OA%3D%3D.3-ccb7-5&oh=00_AYDJOVWUK1ruzJGRKOmLg7CN2-oc9yprtIv4rt44_FudbA&oe=6732754D&_nc_sid=ee9879,3476401415158794998,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,2
|
| 12 |
+
https://www.instagram.com/p/DA22wFRtIbW/,That morning light 🙌,1,10,Gateway Bridge,1660123460959642,2024-10-08T08:52:08.000Z,false,true,Photo,"Photo by Transurban on October 08, 2024. Kan een afbeelding zijn van water, brug en tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams2-1.cdninstagram.com/v/t51.2885-15/462172235_1720153952116048_4988073154950750775_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams2-1.cdninstagram.com&_nc_cat=104&_nc_ohc=35S-UYn1bmQQ7kNvgGHi-UM&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ3NDIwNDkxNDUwMDM2MzI0NA%3D%3D.3-ccb7-5&oh=00_AYCh6nV8CnrFpWJt9uzU8wRhPcPEIizJVMfHwmxw_yOVYA&oe=67329432&_nc_sid=ee9879,3474204914500363244,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,2
|
| 13 |
+
https://www.instagram.com/p/DA22wFRtIbW/,That morning light 🙌,1,10,Gateway Bridge,1660123460959642,2024-10-08T08:52:08.000Z,false,true,Photo,"Photo by Transurban on October 08, 2024. Kan een afbeelding zijn van water, brug en tekst.",https://www.instagram.com/transurban,transurban,Transurban,https://scontent-ams2-1.cdninstagram.com/v/t51.2885-15/462404718_1049335426668352_5535199890794569290_n.jpg?stp=dst-jpg_e35&efg=eyJ2ZW5jb2RlX3RhZyI6ImltYWdlX3VybGdlbi4xMDgweDEzNTAuc2RyLmYyODg1LmRlZmF1bHRfaW1hZ2UifQ&_nc_ht=scontent-ams2-1.cdninstagram.com&_nc_cat=104&_nc_ohc=t39tSyWqt3EQ7kNvgEftZci&_nc_gid=6d24b235055a416a9388d12a22409a86&edm=ACWDqb8BAAAA&ccb=7-5&ig_cache_key=MzQ3NDIwNDkzNDgzNDM1MTY0MA%3D%3D.3-ccb7-5&oh=00_AYDKmK8kfSMDZx7DikcyS2YH9BOyjGmcfdIDIP5490rN4A&oe=67328BBD&_nc_sid=ee9879,3474204934834351640,https://www.instagram.com/transurban/,2024-11-07T11:57:36.575Z,2
|
| 14 |
+
,,,,,,,,,,,,,,,,,,
|
| 15 |
+
,,,,,,,,,,,,,,,,,,
|
| 16 |
+
Export limit reached - Get more with our premium plans,,,,,,,,,,,,,,,,,,
|
| 17 |
+
Upgrade to export all your data with the link below:,,,,,,,,,,,,,,,,,,
|
| 18 |
+
https://phbuster.io/upgrade,,,,,,,,,,,,,,,,,,
|
data/LinkedIn_transurban_phantombuster.csv
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
postUrl,imgUrl,type,postContent,likeCount,commentCount,repostCount,postDate,action,profileUrl,timestamp,viewCount,postTimestamp,videoUrl
|
| 2 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7259735903592460288,https://media.licdn.com/dms/image/v2/D5622AQF_MrHXGrVCnQ/feedshare-shrink_800/feedshare-shrink_800/0/1730855917881?e=1733961600&v=beta&t=FvOT6iYbI0BwfLj-HHTdkknzi--k9bd3n9bQqcHQwWk,Image,"According to the Back to the Future movies, flying cars were meant to be everywhere by 2015! While that prediction didn't come true, we will have the XPENG X2 flying car on display at our EV Drive Day later this month.
|
| 3 |
+
|
| 4 |
+
You can also take XPeng's G6 for a test drive.
|
| 5 |
+
|
| 6 |
+
Come down and check them out: https://lnkd.in/gDuhQyvf",48,1,2,1d,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:41.056Z,,2024-11-06T01:18:38.787Z,
|
| 7 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7256514521815474177,https://media.licdn.com/dms/image/v2/D5610AQGaFh6j9QC4bQ/image-shrink_800/image-shrink_800/0/1730087881323?e=1731582000&v=beta&t=NTYb4lDoqfCL4bQ5XDEzPBhtq3zbaB8GUKqbPbfuxRA,Image,"This new, sporty little Volvo SUV is electric-ly quick and just one of the many EVs we'll have at Sydney Motorsport Park on 17 November as part of our EV Drive Day.
|
| 8 |
+
|
| 9 |
+
Learn more about #EV ownership, talk to the experts and see a range of electric cars up close.
|
| 10 |
+
|
| 11 |
+
Get your free ticket here: https://lnkd.in/gDuhQyvf",35,0,2,1w,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:41.057Z,,2024-10-28T03:58:01.521Z,
|
| 12 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7257164308491481090,https://media.licdn.com/dms/image/v2/D5622AQGgCDitxftvGw/feedshare-shrink_800/feedshare-shrink_800/0/1730152107518?e=1733961600&v=beta&t=bioRCBoksd_C5pdgUGudJn549UsCwtqbsX7bRy5Tlrw,Image,"Samantha from our Customer and Technology team has a simple but effective wellbeing tip:
|
| 13 |
+
|
| 14 |
+
""Taking a break from the screen and looking outside is a great way to reset and for my eyes to catch a break. Who knows, you might even see something interesting out the window like a double rainbow.""",73,2,0,1w,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:41.057Z,,2024-10-29T23:00:02.737Z,
|
| 15 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7253944739832504320,https://media.licdn.com/dms/image/v2/D5622AQFwRdDMUtAj6Q/feedshare-shrink_800/feedshare-shrink_800/0/1729475186566?e=1733961600&v=beta&t=6upLFJSp7V8K_leaydR_Z0tHHcbtDYXtCO5FVWlhHHk,Image,"How do we harness the power of AI whilst mitigating the risks?
|
| 16 |
+
|
| 17 |
+
We were delighted to have the opportunity to chat about this topic with AWSN - Australian Women in Security Network and Data#3 in our Melbourne HQ!
|
| 18 |
+
|
| 19 |
+
AI should be treated like any other software – whilst it can help us deliver at speed it should never be used without human oversight.
|
| 20 |
+
|
| 21 |
+
Partnering with AWSN helps us to increase the representation of women in tech, as we believe diverse perspectives are what drive innovation and creativity.
|
| 22 |
+
|
| 23 |
+
#WomenInTech #CyberAwarenessMonth",126,1,1,2w,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:46.635Z,,2024-10-21T01:46:37.752Z,
|
| 24 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7254536001191043076,https://media.licdn.com/dms/image/v2/D5610AQEAzsI9Wg3tng/image-shrink_800/image-shrink_800/0/1729616163011?e=1731582000&v=beta&t=FV0uoNv66dIKPW_uN0Kt5u0QJqBLIq_gYoVnW-Rh4Ow,Image,Fresh air meeting vibes.,56,1,0,2w,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:46.635Z,,2024-10-22T16:56:05.445Z,
|
| 25 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7254281556205215744,https://media.licdn.com/dms/image/v2/D5622AQETlhYcjIolwQ/feedshare-shrink_800/feedshare-shrink_800/0/1729483408087?e=1733961600&v=beta&t=WSugkyoUAKUoPtaq9tcILPU1xjXqh44M75BbHelu-gs,Image,"Need some inspo? Matthew, our Traffic Manager on the M7-M12 Integration Project shares his healthy habit:
|
| 26 |
+
|
| 27 |
+
Powerlifting has been a game-changer for my overall wellbeing and work performance. By committing to this discipline, I’ve built physical strength and mental resilience, which translates to increased energy and focus at work.
|
| 28 |
+
|
| 29 |
+
Setting this example for my kids not only instils valuable life skills in them but also reinforces my own commitment to personal growth and wellbeing.
|
| 30 |
+
|
| 31 |
+
This holistic approach boosts my confidence and fosters a positive, productive mindset, enabling me to bring my best self to work every day. 💪👨👩👧👦",109,9,1,2w,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:46.636Z,,2024-10-22T00:05:01.033Z,
|
| 32 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7257535078552018944,https://media.licdn.com/dms/image/v2/D5610AQEjrJ13AYXINQ/ads-video-thumbnail_720_1280/ads-video-thumbnail_720_1280/0/1730331185304?e=1731582000&v=beta&t=vsE1rgddwxJ6qFEs02GQBED9HtbiwtRh429RJI1cn7k,Video (LinkedIn Source),"A trip through NorthConnex in 40 seconds.
|
| 33 |
+
|
| 34 |
+
Skipping 21 sets of lights for 4 years.",151,7,3,1w,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:46.636Z,,2024-10-30T23:33:21.208Z,blob:https://www.linkedin.com/f650d67b-6647-4e24-8cd5-fd0529106260
|
| 35 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7251366127283380224,https://media.licdn.com/dms/image/v2/D5622AQH1bCsSSOOFmQ/feedshare-shrink_800/feedshare-shrink_800/0/1728850318117?e=1733961600&v=beta&t=YDdXkRYJIpgv5twDckNODOmBrrahciSbRhwK3dX7f8I,Image,"Meet Sonal (and Coco) – a Senior Structures Engineer and avid dog lover.
|
| 36 |
+
|
| 37 |
+
""My wellbeing tip is simple: Eat, Sleep, Cuddle and Repeat.""",124,7,0,3w,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:46.637Z,,2024-10-13T23:00:08.612Z,
|
| 38 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7247032590015029250,https://media.licdn.com/dms/image/v2/D5610AQHECX1Qdp12xw/image-shrink_800/image-shrink_800/0/1727827210898?e=1731582000&v=beta&t=YV5XwFJvYqAxTx0Zi2iDS_u7mjx0p8GPoOS1Lgh4ZZI,Image,"We loved seeing our Ignite Mentoring participants exploring various assets, bridges and tunnels across Melbourne, Brisbane and in our WestConnex Control Centre in Sydney.
|
| 39 |
+
|
| 40 |
+
What a better way to ignite your passion than seeing it all for yourself!
|
| 41 |
+
|
| 42 |
+
#WomenInSTEM",170,0,3,1mo,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:46.637Z,,2024-10-02T00:00:12.814Z,
|
| 43 |
+
https://www.linkedin.com/feed/update/urn:li:activity:7246307808109830145,https://media.licdn.com/dms/image/v2/D5610AQEsvAO6mWY8Qg/image-shrink_800/image-shrink_800/0/1727654409678?e=1731582000&v=beta&t=pkJT6kXcO6pzNq3aYtfOjH2hkI5M8Y_iY_QOsfnme6w,Image,"An astonishing 200 tonnes of rubbish was collected from our NSW roads through street sweeping activities over the past year.
|
| 44 |
+
|
| 45 |
+
Over 90% of roadside waste collected is recycled at purpose-built recycling plants such as this one – the Downer Reconomy Centre in Western Sydney. Here, gravel and debris is turned into Recycled Asphalt Product (RAP) which is then re-used as bitumen for paving roads.",107,1,0,1mo,Post,https://www.linkedin.com/company/transurban,2024-11-07T10:18:46.638Z,,2024-09-30T00:00:11.342Z,
|
| 46 |
+
,,,,,,,,,,,,,
|
| 47 |
+
,,,,,,,,,,,,,
|
| 48 |
+
Export limit reached - Get more with our premium plans,,,,,,,,,,,,,
|
| 49 |
+
Upgrade to export all your data with the link below:,,,,,,,,,,,,,
|
| 50 |
+
https://phbuster.io/upgrade,,,,,,,,,,,,,
|
data/Twitter_transurban_phantombuster.csv
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tweetDate,twitterId,handle,text,profileUrl,name,tweetLink,timestamp,query
|
| 2 |
+
Sat Nov 02 10:09:06 +0000 2024,2424176252,VirginiaTechCEE,Robert Ridgell was instrumental in the I-95 Fredericksburg Express Lanes Extension project. Read more in our latest alumni spotlight! https://t.co/eJYZaF3HQ3,https://twitter.com/VirginiaTechCEE,Virginia Tech Civil and Environmental Engineering,https://twitter.com/VirginiaTechCEE/status/1852654156583931932,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 3 |
+
Tue Nov 05 16:14:46 +0000 2024,39762255,ARTBA,.@Transurban and @VaDOT continue to provide connectivity and mobility solutions to growing communities along the I-95 Corridor. Read about their latest project – the $70 million 95 Express Lanes Opitz Boulevard project – in #TransportationBuilder. https://t.co/ehPyW7aeFa #IReadTB https://t.co/1RnjXng0h5,https://twitter.com/ARTBA,ARTBA,https://twitter.com/ARTBA/status/1853833344590475745,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 4 |
+
Wed Nov 06 21:52:20 +0000 2024,22731335,babesandballers,Pack your patience if your going beyond 289 toward Manassas on 66 lanes blocked in @VAExpressLanes and main lanes due to large crash @Newsguy41 @alanhenney @CordellTraffic @WTOPtraffic @DCNewsLive @DCCelebrity https://t.co/EBWI1Z2PyN,https://twitter.com/babesandballers,Solomon Tucker,https://twitter.com/babesandballers/status/1854280683952226607,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 5 |
+
Tue Jun 11 11:30:03 +0000 2024,294682376,Itsjoeco,"As @mattyglesias wrote, time to reframe congestion pricing as express lanes like they do in Virginia.
|
| 6 |
+
|
| 7 |
+
Though of course the real express lane is riding a bus, which skips this traffic altogether. https://t.co/Ep7uV1V9rl",https://twitter.com/Itsjoeco,Joe Colangelo,https://twitter.com/Itsjoeco/status/1800490677861773750,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 8 |
+
Tue Nov 29 19:03:57 +0000 2022,1481701998273650692,GovernorVA,"Congratulations to everyone who played a role in building the 66 Express Lanes project from the ground up! This was one of Virginia’s largest public-private partnership projects, a cornerstone of nearly decade long initiative that will serve as a model for the Nation. https://t.co/YE6uBxgass",https://twitter.com/GovernorVA,Governor Glenn Youngkin,https://twitter.com/GovernorVA/status/1597667697999118336,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 9 |
+
Mon Nov 04 19:23:25 +0000 2024,175476068,VAExpressLanes,🚧 ⚠️ #TrafficAlert: Work continues on the Opitz Boulevard Project with overnight #ExpressLane closures starting tonight until Nov. 7 from 9:30PM - 4:30AM. View the full schedule: https://t.co/PlECpCAONy @VaDOT https://t.co/CXHrKdzrjz,https://twitter.com/VAExpressLanes,VA Express Lanes,https://twitter.com/VAExpressLanes/status/1853518430248333352,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 10 |
+
Thu Oct 31 17:16:30 +0000 2024,175476068,VAExpressLanes,#TrafficAlert: Both the left and right lanes are blocked on the southbound 95 Express Lanes after route 123 due to a crash. The center lane is open to traffic. @VaDOTNOVA https://t.co/x8CRxHMl6i,https://twitter.com/VAExpressLanes,VA Express Lanes,https://twitter.com/VAExpressLanes/status/1852036942168215623,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 11 |
+
Mon Nov 04 00:30:13 +0000 2024,16259594,WAVY_News,"As construction on the Hampton Roads Express Lanes (HREL) project continues, the Virginia Department of Transportation (VDOT) are scheduled to implement another long-term on-ramp closure beginning on Nov. 3. https://t.co/uNfbeE1kP2 https://t.co/Fwy6WbRrhg",https://twitter.com/WAVY_News,WAVY TV 10,https://twitter.com/WAVY_News/status/1853233252347048175,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 12 |
+
Wed Nov 06 20:52:30 +0000 2024,1392368760,VaDOTHR,"@CityofVaBeach motorists...Crews have completed all of the required major roadway work for the expedited sections of the project on Laskin Rd from Republic Rd to Hilltop North Shopping Center, plus on First Colonial Rd from Laurel Lane to I-264.
|
| 13 |
+
For more: https://t.co/O8qoCfzUc4 https://t.co/u3OJZW7rFG",https://twitter.com/VaDOTHR,VDOT Hampton Roads,https://twitter.com/VaDOTHR/status/1854265627319382074,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 14 |
+
Thu Apr 25 14:07:48 +0000 2024,88953642,Conduent,"We're excited to announce the launch of an innovative new Express Lanes tolling solution in partnership with the VDOT. Using an overhead #vehicle classification system, this service improves #traffic flow for an easier drive. Read more: https://t.co/aV3xDoEqor https://t.co/yJtt0U2N7v",https://twitter.com/Conduent,Conduent,https://twitter.com/Conduent/status/1783498147668324445,2024-11-07T11:06:42.582Z,https://x.com/search?q=virginia+express+lanes
|
| 15 |
+
,,,,,,,,
|
| 16 |
+
,,,,,,,,
|
| 17 |
+
Export limit reached - Get more with our premium plans,,,,,,,,
|
| 18 |
+
Upgrade to export all your data with the link below:,,,,,,,,
|
| 19 |
+
https://phbuster.io/upgrade,,,,,,,,
|
requirements.txt
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch
|
| 2 |
+
python-dotenv==1.0.1
|
| 3 |
+
tavily-python==0.5.0
|
| 4 |
+
beautifulsoup4==4.12.3
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
fastapi==0.110.3
|
| 8 |
+
|
| 9 |
+
GoogleNews==1.6.15
|
| 10 |
+
|
| 11 |
+
langchain
|
| 12 |
+
langchain-community
|
| 13 |
+
langchain-core
|
| 14 |
+
langchain-experimental
|
| 15 |
+
langchain-openai
|
| 16 |
+
|
| 17 |
+
openai
|
| 18 |
+
transformers==4.44.0
|
| 19 |
+
|
| 20 |
+
pandas==2.2.2
|
| 21 |
+
praw==7.7.1
|
| 22 |
+
streamlit==1.37.1
|
| 23 |
+
|
tools/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
tools/__pycache__/sentiment_analysis_util.cpython-311.pyc
ADDED
|
Binary file (12 kB). View file
|
|
|
tools/sentiment_analysis_util.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from transformers import pipeline
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from GoogleNews import GoogleNews
|
| 7 |
+
from langchain_openai import ChatOpenAI
|
| 8 |
+
import praw
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
import numpy as np
|
| 11 |
+
from tavily import TavilyClient
|
| 12 |
+
|
| 13 |
+
load_dotenv()
|
| 14 |
+
TAVILY_API_KEY = os.environ["TAVILY_API_KEY"]
|
| 15 |
+
|
| 16 |
+
def fetch_news(topic):
|
| 17 |
+
|
| 18 |
+
""" Fetches news articles within a specified date range.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
- topic (str): Topic of interest
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
- list: A list of dictionaries containing news. """
|
| 25 |
+
|
| 26 |
+
load_dotenv()
|
| 27 |
+
days_to_fetch_news = os.environ["DAYS_TO_FETCH_NEWS"]
|
| 28 |
+
|
| 29 |
+
googlenews = GoogleNews()
|
| 30 |
+
googlenews.set_period(days_to_fetch_news)
|
| 31 |
+
googlenews.get_news(topic)
|
| 32 |
+
news_json=googlenews.get_texts()
|
| 33 |
+
urls=googlenews.get_links()
|
| 34 |
+
|
| 35 |
+
no_of_news_articles_to_fetch = os.environ["NO_OF_NEWS_ARTICLES_TO_FETCH"]
|
| 36 |
+
news_article_list = []
|
| 37 |
+
counter = 0
|
| 38 |
+
for article in news_json:
|
| 39 |
+
|
| 40 |
+
if(counter >= int(no_of_news_articles_to_fetch)):
|
| 41 |
+
break
|
| 42 |
+
|
| 43 |
+
relevant_info = {
|
| 44 |
+
'News_Article': article,
|
| 45 |
+
'URL': urls[counter]
|
| 46 |
+
}
|
| 47 |
+
news_article_list.append(relevant_info)
|
| 48 |
+
counter+=1
|
| 49 |
+
return news_article_list
|
| 50 |
+
|
| 51 |
+
# def fetch_tavily_news(topic):
|
| 52 |
+
# """ Fetches news articles.
|
| 53 |
+
|
| 54 |
+
# Args:
|
| 55 |
+
# - topic (str): Topic of interest
|
| 56 |
+
|
| 57 |
+
# Returns:
|
| 58 |
+
# - list: A list of dictionaries containing news. """
|
| 59 |
+
|
| 60 |
+
# # Step 1. Instantiating your TavilyClient
|
| 61 |
+
# tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
|
| 62 |
+
|
| 63 |
+
# #response = tavily_client.search(topic)
|
| 64 |
+
# # Step 2.1. Executing a context search query
|
| 65 |
+
# answer = tavily_client.get_search_context(query=f"Give me news on {topic}")
|
| 66 |
+
|
| 67 |
+
# line=[]
|
| 68 |
+
# tavily_news=[]
|
| 69 |
+
|
| 70 |
+
# for i in range(len(answer.split("url")))[1:]:
|
| 71 |
+
# https_link=(answer.split("url")[i].split("\\\\\\")[2]).split('"')[1]
|
| 72 |
+
# topic_answer=answer.split("url")[i].split("\\\\\\")[-3]
|
| 73 |
+
# tavily_news=np.append(tavily_news,{'https':https_link,'topic_answer':topic_answer})
|
| 74 |
+
|
| 75 |
+
# return tavily_news
|
| 76 |
+
|
| 77 |
+
def fetch_tavily_news(prompt):
|
| 78 |
+
try:
|
| 79 |
+
# Assuming answer contains the Tavily API response
|
| 80 |
+
# First, let's make the URL extraction more robust
|
| 81 |
+
urls = []
|
| 82 |
+
|
| 83 |
+
# Method 1: Using string manipulation with error handling
|
| 84 |
+
try:
|
| 85 |
+
parts = answer.split("url")
|
| 86 |
+
for part in parts[1:]: # Skip the first part before 'url'
|
| 87 |
+
try:
|
| 88 |
+
# Try different splitting patterns
|
| 89 |
+
if '\\\\' in part:
|
| 90 |
+
url = part.split('\\\\')[2].split('"')[1]
|
| 91 |
+
elif '"' in part:
|
| 92 |
+
url = part.split('"')[1]
|
| 93 |
+
else:
|
| 94 |
+
continue
|
| 95 |
+
|
| 96 |
+
if url.startswith('http'): # Validate URL
|
| 97 |
+
urls.append(url)
|
| 98 |
+
except (IndexError, AttributeError):
|
| 99 |
+
continue
|
| 100 |
+
except Exception as e:
|
| 101 |
+
print(f"Error extracting URLs: {e}")
|
| 102 |
+
|
| 103 |
+
# If no URLs found, try alternative parsing
|
| 104 |
+
if not urls:
|
| 105 |
+
# Method 2: Try JSON parsing if the response is JSON formatted
|
| 106 |
+
try:
|
| 107 |
+
import json
|
| 108 |
+
data = json.loads(answer)
|
| 109 |
+
if isinstance(data, list):
|
| 110 |
+
for item in data:
|
| 111 |
+
if isinstance(item, dict) and 'url' in item:
|
| 112 |
+
urls.append(item['url'])
|
| 113 |
+
except json.JSONDecodeError:
|
| 114 |
+
pass
|
| 115 |
+
|
| 116 |
+
# If still no URLs found, try regex
|
| 117 |
+
if not urls:
|
| 118 |
+
import re
|
| 119 |
+
url_pattern = r'https?://[^\s<>"]+|www\.[^\s<>"]+|http?://[^\s<>"]+'
|
| 120 |
+
urls = re.findall(url_pattern, answer)
|
| 121 |
+
|
| 122 |
+
# Remove duplicates while preserving order
|
| 123 |
+
urls = list(dict.fromkeys(urls))
|
| 124 |
+
|
| 125 |
+
return urls
|
| 126 |
+
|
| 127 |
+
except Exception as e:
|
| 128 |
+
print(f"Error in fetch_tavily_news: {e}")
|
| 129 |
+
return [] # Return empty list on error
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def fetch_reddit_news(topic):
|
| 133 |
+
load_dotenv()
|
| 134 |
+
REDDIT_USER_AGENT= os.environ["REDDIT_USER_AGENT"]
|
| 135 |
+
REDDIT_CLIENT_ID= os.environ["REDDIT_CLIENT_ID"]
|
| 136 |
+
REDDIT_CLIENT_SECRET= os.environ["REDDIT_CLIENT_SECRET"]
|
| 137 |
+
#https://medium.com/geekculture/a-complete-guide-to-web-scraping-reddit-with-python-16e292317a52
|
| 138 |
+
user_agent = REDDIT_USER_AGENT
|
| 139 |
+
reddit = praw.Reddit (
|
| 140 |
+
client_id= REDDIT_CLIENT_ID,
|
| 141 |
+
client_secret= REDDIT_CLIENT_SECRET,
|
| 142 |
+
user_agent=user_agent
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
headlines = set ( )
|
| 146 |
+
for submission in reddit.subreddit('nova').search(topic,time_filter='day'):
|
| 147 |
+
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url)
|
| 148 |
+
|
| 149 |
+
for submission in reddit.subreddit('fednews').search(topic,time_filter='day'):
|
| 150 |
+
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url)
|
| 151 |
+
|
| 152 |
+
for submission in reddit.subreddit('washingtondc').search(topic,time_filter='day'):
|
| 153 |
+
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url)
|
| 154 |
+
|
| 155 |
+
if len(headlines)<10:
|
| 156 |
+
for submission in reddit.subreddit('washingtondc').search(topic,time_filter='year'):
|
| 157 |
+
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url)
|
| 158 |
+
if len(headlines)<10:
|
| 159 |
+
for submission in reddit.subreddit('washingtondc').search(topic): #,time_filter='week'):
|
| 160 |
+
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url)
|
| 161 |
+
|
| 162 |
+
return headlines
|
| 163 |
+
|
| 164 |
+
def analyze_sentiment(article):
|
| 165 |
+
"""
|
| 166 |
+
Analyzes the sentiment of a given news article.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
- news_article (dict): Dictionary containing 'summary', 'headline', and 'created_at' keys.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
- dict: A dictionary containing sentiment analysis results.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
#Analyze sentiment using default model
|
| 176 |
+
#classifier = pipeline('sentiment-analysis')
|
| 177 |
+
|
| 178 |
+
#Analyze sentiment using specific model
|
| 179 |
+
classifier = pipeline(model='tabularisai/robust-sentiment-analysis') #mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis')
|
| 180 |
+
sentiment_result = classifier(str(article))
|
| 181 |
+
|
| 182 |
+
analysis_result = {
|
| 183 |
+
'News_Article': article,
|
| 184 |
+
'Sentiment': sentiment_result
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
return analysis_result
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def generate_summary_of_sentiment(sentiment_analysis_results): #, dominant_sentiment):
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
news_article_sentiment = str(sentiment_analysis_results)
|
| 194 |
+
print("News article sentiment : " + news_article_sentiment)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
|
| 198 |
+
model = ChatOpenAI(
|
| 199 |
+
model="gpt-4o",
|
| 200 |
+
temperature=0,
|
| 201 |
+
max_tokens=None,
|
| 202 |
+
timeout=None,
|
| 203 |
+
max_retries=2,
|
| 204 |
+
api_key=OPENAI_API_KEY, # if you prefer to pass api key in directly instaed of using env vars
|
| 205 |
+
# base_url="...",
|
| 206 |
+
# organization="...",
|
| 207 |
+
# other params...
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
messages=[
|
| 211 |
+
{"role": "system", "content": "You are a helpful assistant that looks at all news articles with their sentiment, hyperlink and date in front of the article text, the articles MUST be ordered by date!, and generate a summary rationalizing dominant sentiment. At the end of the summary, add URL links for all the articles in the markdown format for streamlit. Make sure the articles as well as the links are ordered descending by Date!!!!!!! Example of adding the URLs: The Check out the links: [link](%s) % url. "},
|
| 212 |
+
{"role": "user", "content": f"News articles and their sentiments: {news_article_sentiment}"} #, and dominant sentiment is: {dominant_sentiment}"}
|
| 213 |
+
]
|
| 214 |
+
response = model.invoke(messages)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
summary = response.content
|
| 218 |
+
print ("+++++++++++++++++++++++++++++++++++++++++++++++")
|
| 219 |
+
print(summary)
|
| 220 |
+
print ("+++++++++++++++++++++++++++++++++++++++++++++++")
|
| 221 |
+
return summary
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def plot_sentiment_graph(sentiment_analysis_results):
|
| 225 |
+
"""
|
| 226 |
+
Plots a sentiment analysis graph
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
- sentiment_analysis_result): (dict): Dictionary containing 'Review Title : Summary', 'Rating', and 'Sentiment' keys.
|
| 230 |
+
|
| 231 |
+
Returns:
|
| 232 |
+
- dict: A dictionary containing sentiment analysis results.
|
| 233 |
+
"""
|
| 234 |
+
df = pd.DataFrame(sentiment_analysis_results)
|
| 235 |
+
#print(df)
|
| 236 |
+
|
| 237 |
+
#Group by Rating, sentiment value count
|
| 238 |
+
grouped = df['Sentiment'].value_counts()
|
| 239 |
+
|
| 240 |
+
sentiment_counts = df['Sentiment'].value_counts()
|
| 241 |
+
|
| 242 |
+
# Plotting pie chart
|
| 243 |
+
# fig = plt.figure(figsize=(5, 3))
|
| 244 |
+
# plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct='%1.1f%%', startangle=140)
|
| 245 |
+
# plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
|
| 246 |
+
|
| 247 |
+
#Open below when u running this program locally and c
|
| 248 |
+
#plt.show()
|
| 249 |
+
|
| 250 |
+
return sentiment_counts
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def get_dominant_sentiment (sentiment_analysis_results):
|
| 254 |
+
"""
|
| 255 |
+
Returns overall sentiment, negative or positive or neutral depending on the count of negative sentiment vs positive sentiment
|
| 256 |
+
|
| 257 |
+
Args:
|
| 258 |
+
- sentiment_analysis_result): (dict): Dictionary containing 'summary', 'headline', and 'created_at' keys.
|
| 259 |
+
|
| 260 |
+
Returns:
|
| 261 |
+
- dict: A dictionary containing sentiment analysis results.
|
| 262 |
+
"""
|
| 263 |
+
df = pd.DataFrame(sentiment_analysis_results)
|
| 264 |
+
|
| 265 |
+
# Group by the 'sentiment' column and count the occurrences of each sentiment value
|
| 266 |
+
#print(df)
|
| 267 |
+
#print(df['Sentiment'])
|
| 268 |
+
sentiment_counts = df['Sentiment'].value_counts().reset_index()
|
| 269 |
+
sentiment_counts.columns = ['sentiment', 'count']
|
| 270 |
+
print(sentiment_counts)
|
| 271 |
+
|
| 272 |
+
# Find the sentiment with the highest count
|
| 273 |
+
dominant_sentiment = sentiment_counts.loc[sentiment_counts['count'].idxmax()]
|
| 274 |
+
|
| 275 |
+
return dominant_sentiment['sentiment']
|
| 276 |
+
|
| 277 |
+
#starting point of the program
|
| 278 |
+
if __name__ == '__main__':
|
| 279 |
+
|
| 280 |
+
#fetch news
|
| 281 |
+
news_articles = fetch_news('AAPL')
|
| 282 |
+
|
| 283 |
+
analysis_results = []
|
| 284 |
+
|
| 285 |
+
#Perform sentiment analysis for each product review
|
| 286 |
+
for article in news_articles:
|
| 287 |
+
sentiment_analysis_result = analyze_sentiment(article['News_Article'])
|
| 288 |
+
|
| 289 |
+
# Display sentiment analysis results
|
| 290 |
+
print(f'News Article: {sentiment_analysis_result["News_Article"]} : Sentiment: {sentiment_analysis_result["Sentiment"]}', '\n')
|
| 291 |
+
|
| 292 |
+
result = {
|
| 293 |
+
'News_Article': sentiment_analysis_result["News_Article"],
|
| 294 |
+
'Sentiment': sentiment_analysis_result["Sentiment"][0]['label']
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
analysis_results.append(result)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
#Graph dominant sentiment based on sentiment analysis data of reviews
|
| 301 |
+
dominant_sentiment = get_dominant_sentiment(analysis_results)
|
| 302 |
+
print(dominant_sentiment)
|
| 303 |
+
|
| 304 |
+
#Plot graph
|
| 305 |
+
plot_sentiment_graph(analysis_results)
|
| 306 |
+
|