import krakenex import pandas as pd from datetime import datetime import time import os from typing import Dict, List, Optional import logging # Set up logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler('kraken_data_collection.log'), logging.StreamHandler() ] ) logger = logging.getLogger(__name__) class KrakenDataCollector: """Handles data collection from Kraken API""" def __init__(self, api_key_path: str): self.api = krakenex.API() try: self.api.load_key(api_key_path) logger.info("Successfully loaded Kraken API key") except Exception as e: logger.error(f"Failed to load API key: {e}") raise # Trading pairs to collect data for self.pairs = [ "XXBTZUSD", # Bitcoin "XETHZUSD", # Ethereum "XXRPZUSD", # Ripple "ADAUSD", # Cardano "DOGEUSD", # Dogecoin "BNBUSD", # Binance Coin "SOLUSD", # Solana "DOTUSD", # Polkadot "MATICUSD", # Polygon "LTCUSD" # Litecoin ] def fetch_ticker_data(self, pair: str) -> Optional[Dict]: """Fetch ticker data for a single pair""" try: response = self.api.query_public('Ticker', {'pair': pair}) if 'error' in response and response['error']: logger.error(f"Kraken API error for {pair}: {response['error']}") return None data = response['result'] pair_data = list(data.values())[0] return { 'timestamp': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), 'pair': pair, 'price': float(pair_data['c'][0]), # Last trade closed price 'volume': float(pair_data['v'][0]), # 24h volume 'bid': float(pair_data['b'][0]), # Best bid 'ask': float(pair_data['a'][0]), # Best ask 'low': float(pair_data['l'][0]), # 24h low 'high': float(pair_data['h'][0]), # 24h high 'vwap': float(pair_data['p'][0]), # 24h VWAP 'trades': int(pair_data['t'][0]) # Number of trades } except Exception as e: logger.error(f"Error fetching data for {pair}: {e}") return None def create_data_directories(self) -> None: """Create directory structure for data storage""" for split in ['training', 'validation', 'test']: directory = f'data/{split}' if not os.path.exists(directory): os.makedirs(directory) logger.info(f"Created directory: {directory}") def save_data_to_csv(self, split: str, num_rows: int, delay: int = 2) -> None: """ Collect and save data for all pairs Args: split: Data split type ('training', 'validation', 'test') num_rows: Number of data points to collect per pair delay: Delay between API calls in seconds """ try: records = [] for i in range(num_rows): logger.info(f"Collecting row {i+1}/{num_rows}") for pair in self.pairs: record = self.fetch_ticker_data(pair) if record: records.append(record) if i < num_rows - 1: # Don't sleep after last iteration time.sleep(delay) # Respect API rate limits # Create DataFrame and save to CSV df = pd.DataFrame(records) file_path = f"data/{split}/kraken_trades.csv" # Create directory if it doesn't exist os.makedirs(os.path.dirname(file_path), exist_ok=True) # Save data df.to_csv(file_path, index=False) logger.info(f"Successfully saved {len(records)} records to {file_path}") # Print data summary logger.info("\nData Summary:") logger.info(f"Total records: {len(records)}") logger.info(f"Pairs collected: {len(df['pair'].unique())}") logger.info(f"Time range: {df['timestamp'].min()} to {df['timestamp'].max()}") except Exception as e: logger.error(f"Error saving data: {e}") raise def main(): """Main function to run data collection""" try: # Initialize collector collector = KrakenDataCollector("kraken.key") # Create directory structure collector.create_data_directories() # Collect data for each split splits_config = { 'training': 1000, # 1000 rows for training 'validation': 200, # 200 rows for validation 'test': 200 # 200 rows for test } for split, num_rows in splits_config.items(): logger.info(f"\nCollecting {split} data...") collector.save_data_to_csv(split=split, num_rows=num_rows) logger.info("Data collection completed successfully!") except Exception as e: logger.error(f"Fatal error in data collection: {e}") raise if __name__ == "__main__": main()