Datasets:
Update kraken-data-collection-script
Browse files- kraken-data-collection-script +58 -35
kraken-data-collection-script
CHANGED
@@ -5,6 +5,8 @@ import time
|
|
5 |
import os
|
6 |
from typing import Dict, List, Optional
|
7 |
import logging
|
|
|
|
|
8 |
|
9 |
# Set up logging
|
10 |
logging.basicConfig(
|
@@ -17,16 +19,27 @@ logging.basicConfig(
|
|
17 |
)
|
18 |
logger = logging.getLogger(__name__)
|
19 |
|
20 |
-
class
|
21 |
-
"""Handles data collection from Kraken
|
22 |
|
23 |
-
def __init__(self,
|
24 |
-
|
|
|
25 |
try:
|
26 |
-
self.
|
27 |
logger.info("Successfully loaded Kraken API key")
|
28 |
except Exception as e:
|
29 |
-
logger.error(f"Failed to load API key: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
raise
|
31 |
|
32 |
# Trading pairs to collect data for
|
@@ -46,7 +59,7 @@ class KrakenDataCollector:
|
|
46 |
def fetch_ticker_data(self, pair: str) -> Optional[Dict]:
|
47 |
"""Fetch ticker data for a single pair"""
|
48 |
try:
|
49 |
-
response = self.
|
50 |
|
51 |
if 'error' in response and response['error']:
|
52 |
logger.error(f"Kraken API error for {pair}: {response['error']}")
|
@@ -72,17 +85,31 @@ class KrakenDataCollector:
|
|
72 |
logger.error(f"Error fetching data for {pair}: {e}")
|
73 |
return None
|
74 |
|
75 |
-
def
|
76 |
-
"""
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
-
def
|
84 |
"""
|
85 |
-
Collect and
|
86 |
|
87 |
Args:
|
88 |
split: Data split type ('training', 'validation', 'test')
|
@@ -103,16 +130,11 @@ class KrakenDataCollector:
|
|
103 |
if i < num_rows - 1: # Don't sleep after last iteration
|
104 |
time.sleep(delay) # Respect API rate limits
|
105 |
|
106 |
-
# Create DataFrame
|
107 |
df = pd.DataFrame(records)
|
108 |
-
file_path = f"data/{split}/kraken_trades.csv"
|
109 |
-
|
110 |
-
# Create directory if it doesn't exist
|
111 |
-
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
112 |
|
113 |
-
#
|
114 |
-
|
115 |
-
logger.info(f"Successfully saved {len(records)} records to {file_path}")
|
116 |
|
117 |
# Print data summary
|
118 |
logger.info("\nData Summary:")
|
@@ -121,19 +143,20 @@ class KrakenDataCollector:
|
|
121 |
logger.info(f"Time range: {df['timestamp'].min()} to {df['timestamp'].max()}")
|
122 |
|
123 |
except Exception as e:
|
124 |
-
logger.error(f"Error
|
125 |
raise
|
126 |
|
127 |
def main():
|
128 |
-
"""Main function to run data collection"""
|
129 |
try:
|
130 |
# Initialize collector
|
131 |
-
collector =
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
135 |
|
136 |
-
# Collect data for each split
|
137 |
splits_config = {
|
138 |
'training': 1000, # 1000 rows for training
|
139 |
'validation': 200, # 200 rows for validation
|
@@ -141,13 +164,13 @@ def main():
|
|
141 |
}
|
142 |
|
143 |
for split, num_rows in splits_config.items():
|
144 |
-
logger.info(f"\nCollecting {split} data...")
|
145 |
-
collector.
|
146 |
|
147 |
-
logger.info("Data collection completed successfully!")
|
148 |
|
149 |
except Exception as e:
|
150 |
-
logger.error(f"Fatal error
|
151 |
raise
|
152 |
|
153 |
if __name__ == "__main__":
|
|
|
5 |
import os
|
6 |
from typing import Dict, List, Optional
|
7 |
import logging
|
8 |
+
from huggingface_hub import HfApi, login
|
9 |
+
from io import StringIO
|
10 |
|
11 |
# Set up logging
|
12 |
logging.basicConfig(
|
|
|
19 |
)
|
20 |
logger = logging.getLogger(__name__)
|
21 |
|
22 |
+
class KrakenHuggingFaceCollector:
|
23 |
+
"""Handles data collection from Kraken and uploading to Hugging Face"""
|
24 |
|
25 |
+
def __init__(self, kraken_key_path: str, hf_token: str, repo_id: str):
|
26 |
+
# Initialize Kraken API
|
27 |
+
self.kraken_api = krakenex.API()
|
28 |
try:
|
29 |
+
self.kraken_api.load_key(kraken_key_path)
|
30 |
logger.info("Successfully loaded Kraken API key")
|
31 |
except Exception as e:
|
32 |
+
logger.error(f"Failed to load Kraken API key: {e}")
|
33 |
+
raise
|
34 |
+
|
35 |
+
# Initialize Hugging Face
|
36 |
+
try:
|
37 |
+
login(token=hf_token)
|
38 |
+
self.hf_api = HfApi()
|
39 |
+
self.repo_id = repo_id
|
40 |
+
logger.info("Successfully logged in to Hugging Face")
|
41 |
+
except Exception as e:
|
42 |
+
logger.error(f"Failed to login to Hugging Face: {e}")
|
43 |
raise
|
44 |
|
45 |
# Trading pairs to collect data for
|
|
|
59 |
def fetch_ticker_data(self, pair: str) -> Optional[Dict]:
|
60 |
"""Fetch ticker data for a single pair"""
|
61 |
try:
|
62 |
+
response = self.kraken_api.query_public('Ticker', {'pair': pair})
|
63 |
|
64 |
if 'error' in response and response['error']:
|
65 |
logger.error(f"Kraken API error for {pair}: {response['error']}")
|
|
|
85 |
logger.error(f"Error fetching data for {pair}: {e}")
|
86 |
return None
|
87 |
|
88 |
+
def upload_to_huggingface(self, df: pd.DataFrame, split: str) -> None:
|
89 |
+
"""Upload DataFrame to Hugging Face as CSV"""
|
90 |
+
try:
|
91 |
+
# Convert DataFrame to CSV string
|
92 |
+
csv_str = df.to_csv(index=False)
|
93 |
+
|
94 |
+
# Upload to Hugging Face
|
95 |
+
path_in_repo = f"data/{split}/kraken_trades.csv"
|
96 |
+
|
97 |
+
self.hf_api.upload_file(
|
98 |
+
path_or_fileobj=StringIO(csv_str),
|
99 |
+
path_in_repo=path_in_repo,
|
100 |
+
repo_id=self.repo_id,
|
101 |
+
repo_type="dataset"
|
102 |
+
)
|
103 |
+
|
104 |
+
logger.info(f"Successfully uploaded {split} data to Hugging Face")
|
105 |
+
|
106 |
+
except Exception as e:
|
107 |
+
logger.error(f"Error uploading to Hugging Face: {e}")
|
108 |
+
raise
|
109 |
|
110 |
+
def collect_and_upload(self, split: str, num_rows: int, delay: int = 2) -> None:
|
111 |
"""
|
112 |
+
Collect data and upload directly to Hugging Face
|
113 |
|
114 |
Args:
|
115 |
split: Data split type ('training', 'validation', 'test')
|
|
|
130 |
if i < num_rows - 1: # Don't sleep after last iteration
|
131 |
time.sleep(delay) # Respect API rate limits
|
132 |
|
133 |
+
# Create DataFrame
|
134 |
df = pd.DataFrame(records)
|
|
|
|
|
|
|
|
|
135 |
|
136 |
+
# Upload to Hugging Face
|
137 |
+
self.upload_to_huggingface(df, split)
|
|
|
138 |
|
139 |
# Print data summary
|
140 |
logger.info("\nData Summary:")
|
|
|
143 |
logger.info(f"Time range: {df['timestamp'].min()} to {df['timestamp'].max()}")
|
144 |
|
145 |
except Exception as e:
|
146 |
+
logger.error(f"Error in data collection and upload: {e}")
|
147 |
raise
|
148 |
|
149 |
def main():
|
150 |
+
"""Main function to run data collection and upload"""
|
151 |
try:
|
152 |
# Initialize collector
|
153 |
+
collector = KrakenHuggingFaceCollector(
|
154 |
+
kraken_key_path="kraken.key",
|
155 |
+
hf_token="your_huggingface_token", # Replace with your token
|
156 |
+
repo_id="GotThatData/kraken-trading-data" # Replace with your repo name
|
157 |
+
)
|
158 |
|
159 |
+
# Collect and upload data for each split
|
160 |
splits_config = {
|
161 |
'training': 1000, # 1000 rows for training
|
162 |
'validation': 200, # 200 rows for validation
|
|
|
164 |
}
|
165 |
|
166 |
for split, num_rows in splits_config.items():
|
167 |
+
logger.info(f"\nCollecting and uploading {split} data...")
|
168 |
+
collector.collect_and_upload(split=split, num_rows=num_rows)
|
169 |
|
170 |
+
logger.info("Data collection and upload completed successfully!")
|
171 |
|
172 |
except Exception as e:
|
173 |
+
logger.error(f"Fatal error: {e}")
|
174 |
raise
|
175 |
|
176 |
if __name__ == "__main__":
|