Datasets:
Update
Browse files- kraken-data-collection-script +79 -154
kraken-data-collection-script
CHANGED
@@ -1,177 +1,102 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
import os
|
6 |
-
from typing import Dict, List, Optional
|
7 |
-
import logging
|
8 |
-
from huggingface_hub import HfApi, login
|
9 |
-
from io import StringIO
|
10 |
-
|
11 |
-
# Set up logging
|
12 |
-
logging.basicConfig(
|
13 |
-
level=logging.INFO,
|
14 |
-
format='%(asctime)s - %(levelname)s - %(message)s',
|
15 |
-
handlers=[
|
16 |
-
logging.FileHandler('kraken_data_collection.log'),
|
17 |
-
logging.StreamHandler()
|
18 |
-
]
|
19 |
-
)
|
20 |
-
logger = logging.getLogger(__name__)
|
21 |
-
|
22 |
-
class KrakenHuggingFaceCollector:
|
23 |
-
"""Handles data collection from Kraken and uploading to Hugging Face"""
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
login(token=hf_token)
|
38 |
-
self.hf_api = HfApi()
|
39 |
-
self.repo_id = repo_id
|
40 |
-
logger.info("Successfully logged in to Hugging Face")
|
41 |
-
except Exception as e:
|
42 |
-
logger.error(f"Failed to login to Hugging Face: {e}")
|
43 |
-
raise
|
44 |
-
|
45 |
-
# Trading pairs to collect data for
|
46 |
-
self.pairs = [
|
47 |
-
"XXBTZUSD", # Bitcoin
|
48 |
-
"XETHZUSD", # Ethereum
|
49 |
-
"XXRPZUSD", # Ripple
|
50 |
-
"ADAUSD", # Cardano
|
51 |
-
"DOGEUSD", # Dogecoin
|
52 |
-
"BNBUSD", # Binance Coin
|
53 |
-
"SOLUSD", # Solana
|
54 |
-
"DOTUSD", # Polkadot
|
55 |
-
"MATICUSD", # Polygon
|
56 |
-
"LTCUSD" # Litecoin
|
57 |
-
]
|
58 |
-
|
59 |
-
def fetch_ticker_data(self, pair: str) -> Optional[Dict]:
|
60 |
-
"""Fetch ticker data for a single pair"""
|
61 |
-
try:
|
62 |
-
response = self.kraken_api.query_public('Ticker', {'pair': pair})
|
63 |
-
|
64 |
-
if 'error' in response and response['error']:
|
65 |
-
logger.error(f"Kraken API error for {pair}: {response['error']}")
|
66 |
-
return None
|
67 |
-
|
68 |
-
data = response['result']
|
69 |
-
pair_data = list(data.values())[0]
|
70 |
-
|
71 |
-
return {
|
72 |
-
'timestamp': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),
|
73 |
-
'pair': pair,
|
74 |
-
'price': float(pair_data['c'][0]), # Last trade closed price
|
75 |
-
'volume': float(pair_data['v'][0]), # 24h volume
|
76 |
-
'bid': float(pair_data['b'][0]), # Best bid
|
77 |
-
'ask': float(pair_data['a'][0]), # Best ask
|
78 |
-
'low': float(pair_data['l'][0]), # 24h low
|
79 |
-
'high': float(pair_data['h'][0]), # 24h high
|
80 |
-
'vwap': float(pair_data['p'][0]), # 24h VWAP
|
81 |
-
'trades': int(pair_data['t'][0]) # Number of trades
|
82 |
-
}
|
83 |
-
|
84 |
-
except Exception as e:
|
85 |
-
logger.error(f"Error fetching data for {pair}: {e}")
|
86 |
-
return None
|
87 |
-
|
88 |
-
def upload_to_huggingface(self, df: pd.DataFrame, split: str) -> None:
|
89 |
-
"""Upload DataFrame to Hugging Face as CSV"""
|
90 |
-
try:
|
91 |
-
# Convert DataFrame to CSV string
|
92 |
-
csv_str = df.to_csv(index=False)
|
93 |
-
|
94 |
-
# Upload to Hugging Face
|
95 |
-
path_in_repo = f"data/{split}/kraken_trades.csv"
|
96 |
-
|
97 |
-
self.hf_api.upload_file(
|
98 |
-
path_or_fileobj=StringIO(csv_str),
|
99 |
-
path_in_repo=path_in_repo,
|
100 |
-
repo_id=self.repo_id,
|
101 |
-
repo_type="dataset"
|
102 |
-
)
|
103 |
-
|
104 |
-
logger.info(f"Successfully uploaded {split} data to Hugging Face")
|
105 |
-
|
106 |
-
except Exception as e:
|
107 |
-
logger.error(f"Error uploading to Hugging Face: {e}")
|
108 |
-
raise
|
109 |
-
|
110 |
-
def collect_and_upload(self, split: str, num_rows: int, delay: int = 2) -> None:
|
111 |
-
"""
|
112 |
-
Collect data and upload directly to Hugging Face
|
113 |
-
|
114 |
-
Args:
|
115 |
-
split: Data split type ('training', 'validation', 'test')
|
116 |
-
num_rows: Number of data points to collect per pair
|
117 |
-
delay: Delay between API calls in seconds
|
118 |
-
"""
|
119 |
try:
|
|
|
120 |
records = []
|
121 |
|
122 |
-
for i in range(
|
123 |
-
|
|
|
|
|
|
|
|
|
124 |
|
125 |
for pair in self.pairs:
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
-
#
|
137 |
-
|
|
|
138 |
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
logger.info(f"Pairs collected: {len(df['pair'].unique())}")
|
143 |
-
logger.info(f"Time range: {df['timestamp'].min()} to {df['timestamp'].max()}")
|
144 |
|
145 |
except Exception as e:
|
146 |
-
logger.error(f"Error in
|
147 |
-
|
|
|
|
|
148 |
|
|
|
|
|
|
|
149 |
def main():
|
150 |
-
"""Main function to run data collection and upload"""
|
151 |
try:
|
152 |
-
# Initialize collector
|
153 |
collector = KrakenHuggingFaceCollector(
|
154 |
kraken_key_path="kraken.key",
|
155 |
-
|
156 |
-
repo_id="GotThatData/kraken-trading-data" # Replace with your repo name
|
157 |
)
|
158 |
|
159 |
-
#
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
}
|
165 |
-
|
166 |
-
for split, num_rows in splits_config.items():
|
167 |
-
logger.info(f"\nCollecting and uploading {split} data...")
|
168 |
-
collector.collect_and_upload(split=split, num_rows=num_rows)
|
169 |
-
|
170 |
-
logger.info("Data collection and upload completed successfully!")
|
171 |
|
|
|
|
|
|
|
172 |
except Exception as e:
|
173 |
logger.error(f"Fatal error: {e}")
|
174 |
raise
|
|
|
175 |
|
176 |
-
|
177 |
-
|
|
|
|
|
|
|
|
1 |
+
```python
|
2 |
+
def collect_continuous(self, interval_minutes: int = 3, batch_size: int = 30):
|
3 |
+
"""
|
4 |
+
Enhanced continuous data collection with optimal parameters
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
+
Args:
|
7 |
+
interval_minutes: Minutes between each collection (default: 3)
|
8 |
+
batch_size: Number of snapshots per batch (default: 30)
|
9 |
+
"""
|
10 |
+
self.collection_start_time = datetime.now()
|
11 |
+
logger.info(f"Starting enhanced continuous collection at {self.collection_start_time}")
|
12 |
+
logger.info(f"Collecting {batch_size} snapshots every {interval_minutes} minutes")
|
13 |
+
logger.info(f"Total API calls per batch: ~{batch_size * len(self.pairs)}")
|
14 |
+
logger.info(f"Estimated daily data points: {(24 * 60 // interval_minutes) * batch_size * len(self.pairs)}")
|
15 |
+
logger.info("Press CTRL+C to stop collection")
|
16 |
+
|
17 |
+
while self.running:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
try:
|
19 |
+
batch_start_time = datetime.now()
|
20 |
records = []
|
21 |
|
22 |
+
for i in range(batch_size):
|
23 |
+
if not self.running:
|
24 |
+
break
|
25 |
+
|
26 |
+
snapshot_start = datetime.now()
|
27 |
+
logger.info(f"Collecting snapshot {i+1}/{batch_size}")
|
28 |
|
29 |
for pair in self.pairs:
|
30 |
+
if self.check_api_rate():
|
31 |
+
record = self.fetch_ticker_data(pair)
|
32 |
+
if record:
|
33 |
+
records.append(record)
|
34 |
+
else:
|
35 |
+
time.sleep(1) # Wait if approaching rate limit
|
36 |
+
|
37 |
+
# Dynamic sleep calculation
|
38 |
+
elapsed = (datetime.now() - snapshot_start).total_seconds()
|
39 |
+
sleep_time = max(0.5, 1.5 - elapsed) # Ensure at least 0.5s between snapshots
|
40 |
+
|
41 |
+
if i < batch_size - 1 and self.running:
|
42 |
+
time.sleep(sleep_time)
|
43 |
|
44 |
+
if records:
|
45 |
+
df = pd.DataFrame(records)
|
46 |
+
current_timestamp = datetime.now().strftime('%Y%m%d_%H%M')
|
47 |
+
self.upload_to_huggingface(df, current_timestamp)
|
48 |
+
|
49 |
+
self.data_points_collected += len(records)
|
50 |
+
collection_duration = (datetime.now() - self.collection_start_time)
|
51 |
+
|
52 |
+
# Enhanced batch summary
|
53 |
+
logger.info("\nBatch Summary:")
|
54 |
+
logger.info(f"Records in batch: {len(records)}")
|
55 |
+
logger.info(f"Pairs collected: {len(df['pair'].unique())}")
|
56 |
+
logger.info(f"Total data points: {self.data_points_collected}")
|
57 |
+
logger.info(f"Collection duration: {collection_duration}")
|
58 |
+
logger.info(f"Data points per hour: {self.data_points_collected / collection_duration.total_seconds() * 3600:.2f}")
|
59 |
|
60 |
+
# Adaptive interval timing
|
61 |
+
batch_duration = (datetime.now() - batch_start_time).total_seconds()
|
62 |
+
sleep_time = max(0, interval_minutes * 60 - batch_duration)
|
63 |
|
64 |
+
if self.running and sleep_time > 0:
|
65 |
+
logger.info(f"Waiting {sleep_time:.2f} seconds until next batch...")
|
66 |
+
time.sleep(sleep_time)
|
|
|
|
|
67 |
|
68 |
except Exception as e:
|
69 |
+
logger.error(f"Error in continuous collection: {e}")
|
70 |
+
logger.info("Waiting 30 seconds before retry...")
|
71 |
+
time.sleep(30)
|
72 |
+
```
|
73 |
|
74 |
+
And update the main function:
|
75 |
+
|
76 |
+
```python
|
77 |
def main():
|
|
|
78 |
try:
|
|
|
79 |
collector = KrakenHuggingFaceCollector(
|
80 |
kraken_key_path="kraken.key",
|
81 |
+
repo_id="GotThatData/kraken-trading-data"
|
|
|
82 |
)
|
83 |
|
84 |
+
# Enhanced collection parameters
|
85 |
+
collector.collect_continuous(
|
86 |
+
interval_minutes=3, # Collect every 3 minutes
|
87 |
+
batch_size=30 # 30 snapshots per batch
|
88 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
+
except KeyboardInterrupt:
|
91 |
+
logger.info("Stopping collection (CTRL+C pressed)")
|
92 |
+
collector.running = False
|
93 |
except Exception as e:
|
94 |
logger.error(f"Fatal error: {e}")
|
95 |
raise
|
96 |
+
```
|
97 |
|
98 |
+
This enhanced version will give you:
|
99 |
+
- 30 snapshots × 9 pairs = 270 data points per batch
|
100 |
+
- Every 3 minutes = 20 batches per hour
|
101 |
+
- 20 batches × 270 points = 5,400 data points per hour
|
102 |
+
- ~129,600 data points per day
|