Dooratre commited on
Commit
10e8373
·
verified ·
1 Parent(s): 7d7d0d5

Upload 4 files

Browse files
Files changed (4) hide show
  1. ai_api.py +45 -0
  2. db_news.py +125 -0
  3. fetch_news.py +187 -0
  4. get_price.py +46 -0
ai_api.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ import time
4
+
5
+ def thinking_animation():
6
+ """
7
+ دالة بسيطة للاستخدام أثناء الانتظار (ليست ضرورية).
8
+ """
9
+ for _ in range(3):
10
+ print(".", end="", flush=True)
11
+ time.sleep(0.5)
12
+
13
+ def call_o1_ai_api(formatted_chat_history):
14
+ """
15
+ يُرسِل المحادثة الحالية إلى واجهة الذكاء الاصطناعي o1
16
+ ويعيد الرد مع تحديث المحادثة.
17
+ """
18
+ url = "https://corvo-ai-xx-gpt-5.hf.space/chat"
19
+ headers = {"Content-Type": "application/json"}
20
+ payload = {
21
+ "chat_history": formatted_chat_history
22
+ }
23
+ max_retries = 5
24
+ retry_delay = 10
25
+ timeout = 600
26
+
27
+ for attempt in range(max_retries):
28
+ try:
29
+ print("AI THINKING", end="", flush=True)
30
+ thinking_animation()
31
+ response = requests.post(url, headers=headers, data=json.dumps(payload), timeout=timeout)
32
+ response.raise_for_status()
33
+ assistant_response = response.json().get("assistant_response", "No response received.")
34
+ # Append the assistant response to chat history
35
+ formatted_chat_history.append({"role": "assistant", "content": assistant_response})
36
+ return assistant_response, formatted_chat_history
37
+ except requests.exceptions.Timeout:
38
+ print(f"Timeout on attempt {attempt + 1}, retrying...")
39
+ time.sleep(retry_delay)
40
+ except Exception as e:
41
+ print(f"Error on attempt {attempt + 1}: {e}, retrying...")
42
+ time.sleep(retry_delay)
43
+
44
+ return "Error processing request. Please try again.", formatted_chat_history
45
+
db_news.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ import json
4
+
5
+ TOKEN = "_device_id=0038e28d4f7d4f9baf8f76b6b9fb8980; GHCC=Required:1-Analytics:1-SocialMedia:1-Advertising:1; MicrosoftApplicationsTelemetryDeviceId=c58113b4-9acb-4ba8-b9f2-4217bdef379a; MSFPC=GUID=79b87b010d464a8783fbf43e19eccddf&HASH=79b8&LV=202408&V=4&LU=1723654762596; _octo=GH1.1.1517954811.1753352111; cpu_bucket=lg; preferred_color_mode=dark; tz=Africa%2FTripoli; ai_session=v+6N2XPmhlxugZWyisQ+ZD|1753646990500|1753646990500; saved_user_sessions=155741452%3ASnvRnrrf0nAjVGrTz3q28Oda2y6wCt6rCADoDwrCx2M6pORt; user_session=SnvRnrrf0nAjVGrTz3q28Oda2y6wCt6rCADoDwrCx2M6pORt; __Host-user_session_same_site=SnvRnrrf0nAjVGrTz3q28Oda2y6wCt6rCADoDwrCx2M6pORt; tz=Africa%2FTripoli; color_mode=%7B%22color_mode%22%3A%22auto%22%2C%22light_theme%22%3A%7B%22name%22%3A%22light%22%2C%22color_mode%22%3A%22light%22%7D%2C%22dark_theme%22%3A%7B%22name%22%3A%22dark%22%2C%22color_mode%22%3A%22dark%22%7D%7D; logged_in=yes; dotcom_user=omarnuwrar; _gh_sess=7FzMK5K8ffnmyyh5LKKv%2FOXiqZJR4qLXxTdaV66E844ZCPq5qw%2FClaVmXHNfu8oc61N461wsjEr7d8vhEwrs0N0X7ITUed9Zj01RnwHGT8mMRUn6oYSv94LpIh2FwmotPwp8jkSQkZ%2BotdEpdYtp3ZoJZKfiZOcpHBtT7g2VwIPgoW2Qx5RpnKNdI3Hq31C6IIPaSzAqqny7O7c6L8nWv1nfx%2FAbF4UFSo7UfW%2F9JLUYF5lVJ2kXdYoesKOL7c2KItGDTaZCwjYr9cHKlHWD4E9wLo22GjFveVKxrEz5dgIrNdAj8WxWXuY5Ou4eYmxaBn2ovIhvnFz8%2F6qLURX81YxLLZbymGERA3MaRzDDzY3yE76U8y8lLPve0Duqc0lr34R3XUiMKE5A3%2FNPQ273e36yNlLsgBGDyuYIEcsQ84XLq2IQygBxX4y%2B6WSPwXAgOku6MiEP8Ro9ihF6scOhbJRrVCPp0toSY3RmJToUy6XRmBF2B0oyJstKbqLPfmZI8p%2B2bQo8DBKARHWWUzTJdjF%2BfgZtm%2Flb3qijcKT5I6SPU%2BiLMH%2Fl2GwHw73d1OFGUNy4tdLT5SO5vCFrf1GIiV7qUhhQdA21dXsAeQ4qoF5LHiGftyhOUBHto3ZZB%2FJ87uqACflXOfbbTCQCAYNa2u4o8I9iKQp9r2ripVxqQF1oyVu12FSIN%2BS%2Fd4Rm%2FN7E1tOw3tcVgYcsFEcbsOViUZBXXmo1Qfd9H%2B4IGnbv3hZe%2FPeJqb33SxWeQpamEWhLjVJL2hMCbZ8v79azeUL93QzkLXuryStKTXOdoyrbD2n93V36z5Sxhzi9Ku6OxVK1PCZW0R7JiYtQOWoeMAMd4oe3Bqrxyc%2BdAdb0sW3L%2FOD8J2nbvJ5gGA%3D%3D--Ngvrt5zzlDZazWNi--k%2F8wjhX57aMmLOJc8i6L7w%3D%3D"
6
+ # Step 1: Fetch the authenticity_token and commitOid from the GitHub edit page
7
+ def fetch_authenticity_token_and_commit_oid():
8
+ url = "https://github.com/omarnuwrar/Trading/edit/main/news.json"
9
+
10
+ headers = {
11
+ "cookie": TOKEN,
12
+ "if-none-match": 'W/"2ff86bd1792cfee5ed79ee070b3b46de"',
13
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
14
+ "x-github-target": "dotcom",
15
+ "x-react-router": "json",
16
+ "x-requested-with": "XMLHttpRequest",
17
+ }
18
+
19
+ response = requests.get(url, headers=headers)
20
+
21
+ if response.status_code == 200:
22
+ soup = BeautifulSoup(response.text, 'html.parser')
23
+ script_tag = soup.find("script", {"type": "application/json", "data-target": "react-app.embeddedData"})
24
+
25
+ if script_tag:
26
+ try:
27
+ json_data = json.loads(script_tag.string.strip())
28
+ authenticity_token = json_data["payload"]["csrf_tokens"]["/omarnuwrar/Trading/tree-save/main/news.json"]["post"]
29
+ commit_oid = json_data["payload"]["webCommitInfo"]["commitOid"]
30
+ return authenticity_token, commit_oid
31
+ except (KeyError, json.JSONDecodeError) as e:
32
+ print(f"Error: Failed to extract data. Details: {str(e)}")
33
+ return None, None
34
+ else:
35
+ print("Error: Could not find the required <script> tag.")
36
+ return None, None
37
+ else:
38
+ print(f"Error: Failed to fetch the page. Status code: {response.status_code}")
39
+ return None, None
40
+
41
+ # Step 2: Send the POST request to update the news.json file
42
+ def update_user_json_file(authenticity_token, commit_oid, new_content):
43
+ url = "https://github.com/omarnuwrar/Trading/tree-save/main/news.json"
44
+
45
+ headers = {
46
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
47
+ "x-requested-with": "XMLHttpRequest",
48
+ "github-verified-fetch": "true",
49
+ "content-type": "application/x-www-form-urlencoded",
50
+ "cookie": TOKEN,
51
+ }
52
+
53
+ payload = {
54
+ "message": "Update news.json",
55
+ "placeholder_message": "Update news.json",
56
+ "description": "",
57
+ "commit-choice": "direct",
58
+ "target_branch": "main",
59
+ "quick_pull": "",
60
+ "guidance_task": "",
61
+ "commit": commit_oid,
62
+ "same_repo": "1",
63
+ "pr": "",
64
+ "content_changed": "true",
65
+ "filename": "news.json",
66
+ "new_filename": "news.json",
67
+ "value": new_content,
68
+ "authenticity_token": authenticity_token,
69
+ }
70
+
71
+ response = requests.post(url, headers=headers, data=payload)
72
+
73
+ if response.status_code == 200:
74
+ return {"success": True, "message": "news.json has been updated!"}
75
+ else:
76
+ return {"success": False, "message": f"Request failed with status code {response.status_code}", "details": response.text}
77
+
78
+
79
+
80
+ # Function to fetch and extract the JSON data
81
+ def fetch_json_from_github():
82
+ # URL of the GitHub page
83
+ url = "https://github.com/omarnuwrar/Trading/blob/main/news.json"
84
+
85
+ # Custom headers
86
+ headers = {
87
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
88
+ "Cookie": TOKEN
89
+ }
90
+
91
+ try:
92
+ # Fetch the HTML content of the page
93
+ response = requests.get(url, headers=headers)
94
+ response.raise_for_status() # Raise an exception for HTTP errors
95
+
96
+ # Parse the HTML using BeautifulSoup
97
+ soup = BeautifulSoup(response.text, 'html.parser')
98
+
99
+ # Find the <script> tag with type="application/json" and `data-target="react-app.embeddedData"`
100
+ script_tag = soup.find('script', {'type': 'application/json', 'data-target': 'react-app.embeddedData'})
101
+ if script_tag:
102
+ # Load the JSON content from the <script> tag
103
+ embedded_data = json.loads(script_tag.string)
104
+
105
+ # Navigate to the "blob" > "rawLines" key for the JSON in the file
106
+ raw_lines = embedded_data.get("payload", {}).get("blob", {}).get("rawLines", [])
107
+ if raw_lines:
108
+ # The JSON content is in the first element of the rawLines list
109
+ json_content = raw_lines[0]
110
+
111
+ # Parse the JSON content
112
+ data = json.loads(json_content)
113
+
114
+ # Return the extracted JSON data
115
+ return {"success": True, "data": data}
116
+ else:
117
+ return {"success": False, "message": "JSON data not found in the 'rawLines' key."}
118
+ else:
119
+ return {"success": False, "message": "Could not find the <script> tag with embedded JSON data."}
120
+ except requests.exceptions.RequestException as e:
121
+ return {"success": False, "message": f"Error fetching data: {e}"}
122
+ except json.JSONDecodeError as je:
123
+ return {"success": False, "message": f"Error parsing JSON: {je}"}
124
+
125
+
fetch_news.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from datetime import datetime, timedelta
3
+
4
+ def fetch_news_items():
5
+ """
6
+ Fetch the main TradingView news feed for XAUUSD and DXY,
7
+ return it as a Python dictionary (JSON).
8
+ """
9
+ url = (
10
+ "https://news-mediator.tradingview.com/news-flow/v2/news"
11
+ "?filter=lang%3Aen&filter=symbol%3AOANDA%3AXAUUSD%2CTVC%3ADXY"
12
+ "&client=screener&streaming=true"
13
+ )
14
+ headers = {
15
+ "User-Agent": (
16
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
17
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
18
+ "Chrome/138.0.0.0 Safari/537.36"
19
+ )
20
+ }
21
+ response = requests.get(url, headers=headers)
22
+ response.raise_for_status()
23
+ return response.json()
24
+
25
+
26
+ def fetch_story_by_id(story_id):
27
+ """
28
+ Given a story ID from the news feed, fetch its full content JSON
29
+ (shortDescription, astDescription, etc.) from TradingView.
30
+ """
31
+ base_url = "https://news-headlines.tradingview.com/v3/story"
32
+ params = {
33
+ "id": story_id,
34
+ "lang": "en"
35
+ }
36
+ headers = {
37
+ "User-Agent": (
38
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
39
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
40
+ "Chrome/138.0.0.0 Safari/537.36"
41
+ )
42
+ }
43
+ response = requests.get(base_url, headers=headers, params=params)
44
+ response.raise_for_status()
45
+ return response.json()
46
+
47
+
48
+ def parse_ast_node(node):
49
+ """
50
+ Recursively parse an AST node from the 'astDescription' field:
51
+ - If it's a string, just return it.
52
+ - If it's a dictionary with a known 'type', handle it.
53
+ - Otherwise, parse children recursively.
54
+ """
55
+ if isinstance(node, str):
56
+ # Direct text
57
+ return node
58
+
59
+ if isinstance(node, dict):
60
+ node_type = node.get("type", "")
61
+ children = node.get("children", [])
62
+
63
+ if node_type == "root":
64
+ # 'root' typically contains multiple children; parse them all
65
+ return "".join(parse_ast_node(child) for child in children)
66
+
67
+ elif node_type == "p":
68
+ # Paragraph node: parse children, add a newline
69
+ paragraph_text = "".join(parse_ast_node(child) for child in children)
70
+ return paragraph_text + "\n"
71
+
72
+ elif node_type == "url":
73
+ # Format a hyperlink: [text](url)
74
+ params = node.get("params", {})
75
+ link_text = params.get("linkText", "")
76
+ url = params.get("url", "")
77
+ return f"{link_text} ({url})"
78
+
79
+ elif node_type == "symbol":
80
+ # Format a symbol mention
81
+ params = node.get("params", {})
82
+ symbol = params.get("symbol", "")
83
+ text = params.get("text", "")
84
+ return text if text else symbol
85
+
86
+ else:
87
+ # Unknown or unhandled node type; parse children anyway
88
+ return "".join(parse_ast_node(child) for child in children)
89
+
90
+ # If not string or dict (e.g., None?), return empty string
91
+ return ""
92
+
93
+
94
+ def extract_full_content_from_story(story_data):
95
+ """
96
+ Extract the full textual content from a story, handling both
97
+ shortDescription and the more detailed astDescription if available.
98
+ """
99
+ # Try to get the AST description first (more detailed)
100
+ ast_description = story_data.get("astDescription")
101
+ if ast_description:
102
+ return parse_ast_node(ast_description)
103
+
104
+ # Fall back to short description if AST not available
105
+ return story_data.get("shortDescription", "No content available")
106
+
107
+
108
+ def get_recent_news_items(hours=100):
109
+ """
110
+ Fetch recent news items within the specified number of hours
111
+ and return detailed information including publish time and full content.
112
+ """
113
+ # Get current time and calculate the threshold
114
+ now_utc = datetime.utcnow()
115
+ time_threshold = now_utc - timedelta(hours=hours)
116
+
117
+ # Fetch news data
118
+ news_data = fetch_news_items()
119
+ items = news_data.get("items", [])
120
+ detailed_news = []
121
+
122
+ for item in items:
123
+ item_id = item.get("id", "")
124
+ published_ts = item.get("published")
125
+
126
+ if not item_id or not published_ts:
127
+ continue
128
+
129
+ published_dt = datetime.utcfromtimestamp(published_ts)
130
+ if published_dt >= time_threshold:
131
+ # For each qualifying news item, fetch its full story content
132
+ try:
133
+ story_data = fetch_story_by_id(item_id)
134
+
135
+ # Create a detailed news item
136
+ detailed_item = {
137
+ "id": item_id,
138
+ "title": item.get("title", ""),
139
+ "source": item.get("source", {}).get("name", ""),
140
+ "published_time": published_dt.strftime("%Y-%m-%d %H:%M:%S UTC"),
141
+ "timestamp": published_ts,
142
+ "content": extract_full_content_from_story(story_data)
143
+ }
144
+ detailed_news.append(detailed_item)
145
+ except Exception as e:
146
+ print(f"Error fetching details for story {item_id}: {e}")
147
+
148
+ # Sort by timestamp descending (newest first)
149
+ detailed_news.sort(key=lambda x: x["timestamp"], reverse=True)
150
+ return detailed_news
151
+
152
+
153
+ def build_news_summary(news_items):
154
+ """
155
+ Build a comprehensive summary of news items, including title, source,
156
+ publication time, and full content.
157
+ """
158
+ if not news_items:
159
+ return "No recent news available."
160
+
161
+ summaries = []
162
+ for item in news_items:
163
+ summary = (
164
+ f"📰 {item['title']}\n"
165
+ f"🔍 Source: {item['source']}\n"
166
+ f"⏰ Published: {item['published_time']}\n"
167
+ f"📝 Content:\n{item['content']}\n"
168
+ f"{'=' * 50}"
169
+ )
170
+ summaries.append(summary)
171
+
172
+ return "\n\n".join(summaries)
173
+
174
+
175
+ def get_formatted_news_summary(hours=24):
176
+ """
177
+ Convenience function to get a formatted news summary for the specified time period.
178
+ """
179
+ news_items = get_recent_news_items(hours)
180
+ return build_news_summary(news_items)
181
+
182
+
183
+ # Example usage:
184
+ if __name__ == "__main__":
185
+ print("Fetching recent gold and DXY news...")
186
+ news_summary = get_formatted_news_summary(48) # Get news from the last 48 hours
187
+ print(news_summary)
get_price.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ def get_live_rates_for_pair(pair="XAUUSD"):
4
+ """
5
+ Get live Bid and Ask prices for a specified forex pair.
6
+ Returns dict with { "pair": ..., "bid": ..., "ask": ..., "difference": ... }
7
+ """
8
+ url = "https://research.titanfx.com/api/live-rate?group=forex"
9
+ headers = {
10
+ "referer": "https://research.titanfx.com/instruments/gbpusd",
11
+ "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Google Chrome\";v=\"138\"",
12
+ "sec-ch-ua-mobile": "?0",
13
+ "sec-ch-ua-platform": "\"Windows\"",
14
+ "sec-fetch-dest": "empty",
15
+ "sec-fetch-mode": "cors",
16
+ "sec-fetch-site": "same-origin",
17
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36"
18
+ }
19
+
20
+ try:
21
+ response = requests.get(url, headers=headers)
22
+ response.raise_for_status()
23
+ data = response.json()
24
+
25
+ if pair not in data:
26
+ return None
27
+
28
+ pair_data = data[pair]
29
+ # Format bid price
30
+ bid_price = float(f"{pair_data[0]}.{pair_data[1]}")
31
+ # Format ask price
32
+ ask_price = float(f"{pair_data[2]}.{pair_data[3]}")
33
+ # Calculate difference
34
+ difference = ask_price - bid_price
35
+
36
+ return {
37
+ "pair": pair,
38
+ "bid": bid_price,
39
+ "ask": ask_price,
40
+ "difference": difference
41
+ }
42
+
43
+ except Exception as e:
44
+ print(f"Error while fetching price: {e}")
45
+ return None
46
+