Edwin Salguero commited on
Commit
f35bff2
·
0 Parent(s):

Initial commit: FRED_ML project without binary files

Browse files
Files changed (43) hide show
  1. .gitattributes +35 -0
  2. .gitignore +78 -0
  3. LICENSE +21 -0
  4. README.md +150 -0
  5. __pycache__/config.cpython-39.pyc +0 -0
  6. __pycache__/fred_data_collector_v2.cpython-39.pyc +0 -0
  7. config/__pycache__/settings.cpython-39.pyc +0 -0
  8. config/pipeline.yaml +16 -0
  9. config/settings.py +10 -0
  10. data/exports/fred_data_20250710_221702.csv +0 -0
  11. data/exports/fred_data_20250710_223022.csv +0 -0
  12. data/exports/fred_data_20250710_223149.csv +0 -0
  13. data/exports/regression_summary.txt +28 -0
  14. requirements.txt +13 -0
  15. scripts/run_advanced_analytics.py +56 -0
  16. scripts/run_eda.py +80 -0
  17. scripts/run_fred_pipeline.py +10 -0
  18. scripts/run_segmentation.py +91 -0
  19. scripts/run_statistical_modeling.py +104 -0
  20. scripts/run_time_series.py +80 -0
  21. src/__init__.py +21 -0
  22. src/__pycache__/__init__.cpython-39.pyc +0 -0
  23. src/analysis/__init__.py +7 -0
  24. src/analysis/__pycache__/__init__.cpython-39.pyc +0 -0
  25. src/analysis/__pycache__/advanced_analytics.cpython-39.pyc +0 -0
  26. src/analysis/__pycache__/economic_analyzer.cpython-39.pyc +0 -0
  27. src/analysis/advanced_analytics.py +517 -0
  28. src/analysis/economic_analyzer.py +201 -0
  29. src/core/__init__.py +7 -0
  30. src/core/__pycache__/__init__.cpython-39.pyc +0 -0
  31. src/core/__pycache__/base_pipeline.cpython-39.pyc +0 -0
  32. src/core/__pycache__/fred_client.cpython-39.pyc +0 -0
  33. src/core/__pycache__/fred_pipeline.cpython-39.pyc +0 -0
  34. src/core/base_pipeline.py +38 -0
  35. src/core/fred_client.py +288 -0
  36. src/core/fred_pipeline.py +88 -0
  37. src/utils/__init__.py +7 -0
  38. src/utils/examples.py +139 -0
  39. src/visualization/__init__.py +5 -0
  40. tests/__pycache__/test_fred_api.cpython-39-pytest-7.4.0.pyc +0 -0
  41. tests/__pycache__/test_fredapi_library.cpython-39-pytest-7.4.0.pyc +0 -0
  42. tests/test_fred_api.py +115 -0
  43. tests/test_fredapi_library.py +84 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+ MANIFEST
23
+
24
+ # Virtual environments
25
+ .env
26
+ .venv
27
+ env/
28
+ venv/
29
+ ENV/
30
+ env.bak/
31
+ venv.bak/
32
+
33
+ # IDEs
34
+ .vscode/
35
+ .idea/
36
+ *.swp
37
+ *.swo
38
+ *~
39
+
40
+ # OS
41
+ .DS_Store
42
+ .DS_Store?
43
+ ._*
44
+ .Spotlight-V100
45
+ .Trashes
46
+ ehthumbs.db
47
+ Thumbs.db
48
+
49
+ # Jupyter Notebook
50
+ .ipynb_checkpoints
51
+
52
+ # Data files
53
+ data/exports/*.png
54
+ data/exports/*.jpg
55
+ data/exports/*.jpeg
56
+ data/exports/*.gif
57
+ data/exports/*.svg
58
+ data/exports/*.pdf
59
+ data/processed/*.csv
60
+ data/raw/*
61
+
62
+ # Logs
63
+ logs/*.log
64
+ *.log
65
+
66
+ # Model files
67
+ *.pkl
68
+ *.pickle
69
+ *.joblib
70
+ *.h5
71
+ *.hdf5
72
+ *.model
73
+
74
+ # Temporary files
75
+ *.tmp
76
+ *.temp
77
+ temp/
78
+ tmp/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Edwin Salguero
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FRED Economic Data Analysis Tool
2
+
3
+ A comprehensive Python tool for collecting, analyzing, and visualizing Federal Reserve Economic Data (FRED) using the FRED API.
4
+
5
+ ## Features
6
+
7
+ - **Data Collection**: Fetch economic indicators from FRED API
8
+ - **Data Analysis**: Generate summary statistics and insights
9
+ - **Visualization**: Create time series plots and charts
10
+ - **Data Export**: Save data to CSV format
11
+ - **Flexible Configuration**: Easy customization of indicators and date ranges
12
+
13
+ ## Setup
14
+
15
+ ### 1. Install Dependencies
16
+
17
+ ```bash
18
+ pip install -r requirements.txt
19
+ ```
20
+
21
+ ### 2. API Key Configuration
22
+
23
+ Your FRED API key is already configured in `config.py`:
24
+ - API Key: `acf8bbec7efe3b6dfa6ae083e7152314`
25
+
26
+ ### 3. Project Structure
27
+
28
+ ```
29
+ economic_output_datasets/
30
+ ├── config.py # Configuration settings
31
+ ├── fred_data_collector.py # Main data collection script
32
+ ├── requirements.txt # Python dependencies
33
+ ├── README.md # This file
34
+ ├── data/ # Output directory for CSV files
35
+ └── plots/ # Output directory for visualizations
36
+ ```
37
+
38
+ ## Usage
39
+
40
+ ### Basic Usage
41
+
42
+ Run the main script to collect and analyze economic data:
43
+
44
+ ```bash
45
+ python fred_data_collector.py
46
+ ```
47
+
48
+ This will:
49
+ - Fetch data for key economic indicators (GDP, Unemployment Rate, CPI, Federal Funds Rate, 10-Year Treasury Rate)
50
+ - Generate summary statistics
51
+ - Create visualizations
52
+ - Save data to CSV files
53
+
54
+ ### Custom Analysis
55
+
56
+ You can customize the analysis by modifying the script:
57
+
58
+ ```python
59
+ from fred_data_collector import FREDDataCollector
60
+
61
+ # Initialize collector
62
+ collector = FREDDataCollector()
63
+
64
+ # Custom series and date range
65
+ custom_series = ['GDP', 'UNRATE', 'CPIAUCSL']
66
+ start_date = '2020-01-01'
67
+ end_date = '2024-01-01'
68
+
69
+ # Run analysis
70
+ df, summary = collector.run_analysis(
71
+ series_ids=custom_series,
72
+ start_date=start_date,
73
+ end_date=end_date
74
+ )
75
+ ```
76
+
77
+ ## Available Economic Indicators
78
+
79
+ The tool includes these common economic indicators:
80
+
81
+ | Series ID | Description |
82
+ |-----------|-------------|
83
+ | GDP | Gross Domestic Product |
84
+ | UNRATE | Unemployment Rate |
85
+ | CPIAUCSL | Consumer Price Index |
86
+ | FEDFUNDS | Federal Funds Rate |
87
+ | DGS10 | 10-Year Treasury Rate |
88
+ | DEXUSEU | US/Euro Exchange Rate |
89
+ | PAYEMS | Total Nonfarm Payrolls |
90
+ | INDPRO | Industrial Production |
91
+ | M2SL | M2 Money Stock |
92
+ | PCE | Personal Consumption Expenditures |
93
+
94
+ ## Output Files
95
+
96
+ ### Data Files
97
+ - CSV files saved in the `data/` directory
98
+ - Timestamped filenames (e.g., `fred_economic_data_20241201_143022.csv`)
99
+
100
+ ### Visualization Files
101
+ - PNG plots saved in the `plots/` directory
102
+ - High-resolution charts with economic indicator time series
103
+
104
+ ## API Rate Limits
105
+
106
+ The FRED API has rate limits:
107
+ - 120 requests per minute
108
+ - 1000 requests per day
109
+
110
+ The tool includes error handling for rate limit issues.
111
+
112
+ ## Configuration
113
+
114
+ Edit `config.py` to customize:
115
+ - API key (if needed)
116
+ - Default date ranges
117
+ - Output directories
118
+ - Default indicators
119
+
120
+ ## Dependencies
121
+
122
+ - `fredapi`: FRED API client
123
+ - `pandas`: Data manipulation
124
+ - `numpy`: Numerical computing
125
+ - `matplotlib`: Plotting
126
+ - `seaborn`: Statistical visualization
127
+ - `jupyter`: Interactive notebooks (optional)
128
+
129
+ ## Error Handling
130
+
131
+ The tool includes comprehensive error handling for:
132
+ - API connection issues
133
+ - Invalid series IDs
134
+ - Rate limit exceeded
135
+ - Data format errors
136
+
137
+ ## Contributing
138
+
139
+ To add new features:
140
+ 1. Extend the `FREDDataCollector` class
141
+ 2. Add new methods for specific analysis
142
+ 3. Update the configuration as needed
143
+
144
+ ## License
145
+
146
+ This project is for educational and research purposes. Please respect FRED API terms of service.
147
+
148
+ ## Support
149
+
150
+ For issues with the FRED API, visit: https://fred.stlouisfed.org/docs/api/
__pycache__/config.cpython-39.pyc ADDED
Binary file (340 Bytes). View file
 
__pycache__/fred_data_collector_v2.cpython-39.pyc ADDED
Binary file (7.37 kB). View file
 
config/__pycache__/settings.cpython-39.pyc ADDED
Binary file (377 Bytes). View file
 
config/pipeline.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fred:
2
+ api_key: "acf8bbec7efe3b6dfa6ae083e7152314"
3
+ series:
4
+ - GDP
5
+ - UNRATE
6
+ - CPIAUCSL
7
+ - FEDFUNDS
8
+ - DGS10
9
+ start_date: "2010-01-01"
10
+ end_date: "2024-01-01"
11
+ output_dir: "data/processed"
12
+ export_dir: "data/exports"
13
+ schedule: "0 6 * * *" # Every day at 6am UTC
14
+ logging:
15
+ level: INFO
16
+ file: logs/pipeline.log
config/settings.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # FRED API Configuration
2
+ FRED_API_KEY = "acf8bbec7efe3b6dfa6ae083e7152314"
3
+
4
+ # Data settings
5
+ DEFAULT_START_DATE = "2010-01-01"
6
+ DEFAULT_END_DATE = "2024-01-01"
7
+
8
+ # Output settings
9
+ OUTPUT_DIR = "data"
10
+ PLOTS_DIR = "plots"
data/exports/fred_data_20250710_221702.csv ADDED
The diff for this file is too large to render. See raw diff
 
data/exports/fred_data_20250710_223022.csv ADDED
The diff for this file is too large to render. See raw diff
 
data/exports/fred_data_20250710_223149.csv ADDED
The diff for this file is too large to render. See raw diff
 
data/exports/regression_summary.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OLS Regression Results
2
+ ==============================================================================
3
+ Dep. Variable: GDP R-squared: 0.971
4
+ Model: OLS Adj. R-squared: 0.970
5
+ Method: Least Squares F-statistic: 839.9
6
+ Date: Thu, 10 Jul 2025 Prob (F-statistic): 5.55e-76
7
+ Time: 22:30:42 Log-Likelihood: -903.30
8
+ No. Observations: 105 AIC: 1817.
9
+ Df Residuals: 100 BIC: 1830.
10
+ Df Model: 4
11
+ Covariance Type: nonrobust
12
+ ==============================================================================
13
+ coef std err t P>|t| [0.025 0.975]
14
+ ------------------------------------------------------------------------------
15
+ const -328.8855 630.799 -0.521 0.603 -1580.374 922.603
16
+ UNRATE -21.7142 79.789 -0.272 0.786 -180.013 136.584
17
+ CPIAUCSL 85.7935 2.036 42.144 0.000 81.755 89.832
18
+ FEDFUNDS 492.3433 92.591 5.317 0.000 308.646 676.041
19
+ DGS10 -883.8622 122.881 -7.193 0.000 -1127.655 -640.070
20
+ ==============================================================================
21
+ Omnibus: 12.409 Durbin-Watson: 2.138
22
+ Prob(Omnibus): 0.002 Jarque-Bera (JB): 13.297
23
+ Skew: 0.746 Prob(JB): 0.00130
24
+ Kurtosis: 3.902 Cond. No. 812.
25
+ ==============================================================================
26
+
27
+ Notes:
28
+ [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fredapi==0.4.2
2
+ pandas==2.1.4
3
+ numpy==1.24.3
4
+ matplotlib==3.7.2
5
+ seaborn==0.12.2
6
+ jupyter==1.0.0
7
+ python-dotenv==1.0.0
8
+ requests==2.31.0
9
+ PyYAML==6.0.2
10
+ APScheduler==3.10.4
11
+ scikit-learn==1.3.0
12
+ scipy==1.11.1
13
+ statsmodels==0.14.0
scripts/run_advanced_analytics.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ Advanced Analytics Runner for FRED Economic Data
4
+ Runs comprehensive statistical analysis, modeling, and insights extraction.
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import glob
10
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
11
+
12
+ from analysis.advanced_analytics import AdvancedAnalytics
13
+
14
+ def find_latest_data():
15
+ """Find the most recent FRED data file."""
16
+ data_files = glob.glob('data/processed/fred_data_*.csv')
17
+ if not data_files:
18
+ raise FileNotFoundError("No FRED data files found. Run the pipeline first.")
19
+
20
+ # Get the most recent file
21
+ latest_file = max(data_files, key=os.path.getctime)
22
+ print(f"Using data file: {latest_file}")
23
+ return latest_file
24
+
25
+ def main():
26
+ """Run the complete advanced analytics workflow."""
27
+ print("=" * 80)
28
+ print("FRED ECONOMIC DATA - ADVANCED ANALYTICS")
29
+ print("=" * 80)
30
+
31
+ try:
32
+ # Find the latest data file
33
+ data_file = find_latest_data()
34
+
35
+ # Initialize analytics
36
+ analytics = AdvancedAnalytics(data_path=data_file)
37
+
38
+ # Run complete analysis
39
+ results = analytics.run_complete_analysis()
40
+
41
+ print("\n" + "=" * 80)
42
+ print("ANALYTICS COMPLETE!")
43
+ print("=" * 80)
44
+ print("Generated outputs:")
45
+ print(" 📊 data/exports/insights_report.txt - Comprehensive insights")
46
+ print(" 📈 data/exports/clustering_analysis.png - Clustering results")
47
+ print(" 📉 data/exports/time_series_decomposition.png - Time series decomposition")
48
+ print(" 🔮 data/exports/time_series_forecast.png - Time series forecast")
49
+ print("\nKey findings have been saved to data/exports/insights_report.txt")
50
+
51
+ except Exception as e:
52
+ print(f"Error running analytics: {e}")
53
+ sys.exit(1)
54
+
55
+ if __name__ == "__main__":
56
+ main()
scripts/run_eda.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ Run EDA: Distributions, skewness, kurtosis, correlations, PCA/t-SNE
4
+ """
5
+ import os
6
+ import sys
7
+ import glob
8
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
9
+ import pandas as pd
10
+ import matplotlib.pyplot as plt
11
+ import seaborn as sns
12
+ from sklearn.decomposition import PCA
13
+ from sklearn.preprocessing import StandardScaler
14
+
15
+ # Find latest processed data file
16
+ def find_latest_data():
17
+ data_files = glob.glob('data/processed/fred_data_*.csv')
18
+ if not data_files:
19
+ raise FileNotFoundError("No FRED data files found. Run the pipeline first.")
20
+ return max(data_files, key=os.path.getctime)
21
+
22
+ def main():
23
+ print("="*60)
24
+ print("FRED EDA: Distributions, Skewness, Kurtosis, Correlations, PCA")
25
+ print("="*60)
26
+ data_file = find_latest_data()
27
+ print(f"Using data file: {data_file}")
28
+ df = pd.read_csv(data_file, index_col=0, parse_dates=True)
29
+ df_clean = df.dropna()
30
+ # 1. Distributions, Skewness, Kurtosis
31
+ desc = df.describe()
32
+ skew = df.skew()
33
+ kurt = df.kurtosis()
34
+ print("\nDescriptive Statistics:\n", desc)
35
+ print("\nSkewness:")
36
+ print(skew)
37
+ print("\nKurtosis:")
38
+ print(kurt)
39
+ # Plot distributions
40
+ for col in df.columns:
41
+ plt.figure(figsize=(8,4))
42
+ sns.histplot(df[col].dropna(), kde=True)
43
+ plt.title(f"Distribution of {col}")
44
+ plt.savefig(f"data/exports/distribution_{col}.png", dpi=200, bbox_inches='tight')
45
+ plt.close()
46
+ # 2. Correlation matrices
47
+ pearson_corr = df.corr(method='pearson')
48
+ spearman_corr = df.corr(method='spearman')
49
+ print("\nPearson Correlation Matrix:\n", pearson_corr.round(3))
50
+ print("\nSpearman Correlation Matrix:\n", spearman_corr.round(3))
51
+ plt.figure(figsize=(8,6))
52
+ sns.heatmap(pearson_corr, annot=True, cmap='coolwarm', center=0)
53
+ plt.title('Pearson Correlation Matrix')
54
+ plt.tight_layout()
55
+ plt.savefig('data/exports/pearson_corr_matrix.png', dpi=200)
56
+ plt.close()
57
+ plt.figure(figsize=(8,6))
58
+ sns.heatmap(spearman_corr, annot=True, cmap='coolwarm', center=0)
59
+ plt.title('Spearman Correlation Matrix')
60
+ plt.tight_layout()
61
+ plt.savefig('data/exports/spearman_corr_matrix.png', dpi=200)
62
+ plt.close()
63
+ # 3. PCA for visualization
64
+ scaler = StandardScaler()
65
+ scaled = scaler.fit_transform(df_clean)
66
+ pca = PCA(n_components=2)
67
+ pca_result = pca.fit_transform(scaled)
68
+ pca_df = pd.DataFrame(pca_result, columns=['PC1', 'PC2'], index=df_clean.index)
69
+ plt.figure(figsize=(8,6))
70
+ plt.scatter(pca_df['PC1'], pca_df['PC2'], alpha=0.5)
71
+ plt.xlabel('PC1')
72
+ plt.ylabel('PC2')
73
+ plt.title('PCA Projection (2D)')
74
+ plt.tight_layout()
75
+ plt.savefig('data/exports/pca_2d.png', dpi=200)
76
+ plt.close()
77
+ print("\nEDA complete. Outputs saved to data/exports/.")
78
+
79
+ if __name__ == "__main__":
80
+ main()
scripts/run_fred_pipeline.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ import os
3
+ import sys
4
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
5
+ from core.fred_pipeline import FREDPipeline
6
+
7
+ if __name__ == "__main__":
8
+ config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'pipeline.yaml')
9
+ pipeline = FREDPipeline(config_path)
10
+ pipeline.run()
scripts/run_segmentation.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ Run Segmentation: K-means clustering, elbow method, silhouette score, PCA visualization
4
+ """
5
+ import os
6
+ import sys
7
+ import glob
8
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
9
+ import pandas as pd
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ import seaborn as sns
13
+ from sklearn.cluster import KMeans
14
+ from sklearn.metrics import silhouette_score
15
+ from sklearn.decomposition import PCA
16
+ from sklearn.preprocessing import StandardScaler
17
+
18
+ def find_latest_data():
19
+ data_files = glob.glob('data/processed/fred_data_*.csv')
20
+ if not data_files:
21
+ raise FileNotFoundError("No FRED data files found. Run the pipeline first.")
22
+ return max(data_files, key=os.path.getctime)
23
+
24
+ def main():
25
+ print("="*60)
26
+ print("FRED Segmentation: K-means, Elbow, Silhouette, PCA Visualization")
27
+ print("="*60)
28
+ data_file = find_latest_data()
29
+ print(f"Using data file: {data_file}")
30
+ df = pd.read_csv(data_file, index_col=0, parse_dates=True)
31
+ df_clean = df.dropna()
32
+ if df_clean.shape[0] < 10 or df_clean.shape[1] < 2:
33
+ print("Not enough data for clustering (need at least 10 rows and 2 columns after dropna). Skipping.")
34
+ return
35
+ scaler = StandardScaler()
36
+ scaled_data = scaler.fit_transform(df_clean)
37
+ # Elbow and silhouette
38
+ inertias = []
39
+ silhouette_scores = []
40
+ k_range = range(2, min(11, len(df_clean)//10+1))
41
+ for k in k_range:
42
+ kmeans = KMeans(n_clusters=k, random_state=42)
43
+ kmeans.fit(scaled_data)
44
+ inertias.append(kmeans.inertia_)
45
+ silhouette_scores.append(silhouette_score(scaled_data, kmeans.labels_))
46
+ # Plot elbow and silhouette
47
+ plt.figure(figsize=(12,4))
48
+ plt.subplot(1,2,1)
49
+ plt.plot(list(k_range), inertias, 'bo-')
50
+ plt.xlabel('Number of Clusters (k)')
51
+ plt.ylabel('Inertia')
52
+ plt.title('Elbow Method')
53
+ plt.grid(True)
54
+ plt.subplot(1,2,2)
55
+ plt.plot(list(k_range), silhouette_scores, 'ro-')
56
+ plt.xlabel('Number of Clusters (k)')
57
+ plt.ylabel('Silhouette Score')
58
+ plt.title('Silhouette Analysis')
59
+ plt.grid(True)
60
+ plt.tight_layout()
61
+ plt.savefig('data/exports/clustering_analysis.png', dpi=200)
62
+ plt.close()
63
+ # Choose optimal k
64
+ optimal_k = list(k_range)[np.argmax(silhouette_scores)]
65
+ print(f"Optimal number of clusters: {optimal_k}")
66
+ print(f"Best silhouette score: {max(silhouette_scores):.3f}")
67
+ # Final clustering
68
+ kmeans_optimal = KMeans(n_clusters=optimal_k, random_state=42)
69
+ cluster_labels = kmeans_optimal.fit_predict(scaled_data)
70
+ df_clustered = df_clean.copy()
71
+ df_clustered['Cluster'] = cluster_labels
72
+ # Cluster stats
73
+ cluster_stats = df_clustered.groupby('Cluster').agg(['mean', 'std'])
74
+ print("\nCluster Characteristics:")
75
+ print(cluster_stats.round(3))
76
+ # PCA visualization
77
+ pca = PCA(n_components=2)
78
+ pca_result = pca.fit_transform(scaled_data)
79
+ plt.figure(figsize=(8,6))
80
+ scatter = plt.scatter(pca_result[:,0], pca_result[:,1], c=cluster_labels, cmap='tab10', alpha=0.7)
81
+ plt.xlabel('PC1')
82
+ plt.ylabel('PC2')
83
+ plt.title('Clusters Visualized with PCA')
84
+ plt.legend(*scatter.legend_elements(), title="Cluster")
85
+ plt.tight_layout()
86
+ plt.savefig('data/exports/clusters_pca.png', dpi=200)
87
+ plt.close()
88
+ print("\nSegmentation complete. Outputs saved to data/exports/.")
89
+
90
+ if __name__ == "__main__":
91
+ main()
scripts/run_statistical_modeling.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ Run Statistical Modeling: Linear regression, diagnostics, p-values, confidence intervals, plots
4
+ """
5
+ import os
6
+ import sys
7
+ import glob
8
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
9
+ import pandas as pd
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ import seaborn as sns
13
+ from sklearn.linear_model import LinearRegression
14
+ from sklearn.model_selection import train_test_split
15
+ from sklearn.preprocessing import StandardScaler
16
+ import statsmodels.api as sm
17
+ from scipy import stats
18
+ from statsmodels.stats.diagnostic import het_breuschpagan
19
+ from statsmodels.stats.outliers_influence import variance_inflation_factor
20
+
21
+ def find_latest_data():
22
+ data_files = glob.glob('data/processed/fred_data_*.csv')
23
+ if not data_files:
24
+ raise FileNotFoundError("No FRED data files found. Run the pipeline first.")
25
+ return max(data_files, key=os.path.getctime)
26
+
27
+ def main():
28
+ print("="*60)
29
+ print("FRED Statistical Modeling: Linear Regression & Diagnostics")
30
+ print("="*60)
31
+ data_file = find_latest_data()
32
+ print(f"Using data file: {data_file}")
33
+ df = pd.read_csv(data_file, index_col=0, parse_dates=True)
34
+ df_clean = df.dropna()
35
+ target_var = 'GDP'
36
+ if target_var not in df_clean.columns:
37
+ print(f"Target variable '{target_var}' not found in data.")
38
+ return
39
+ feature_cols = [col for col in df_clean.columns if col != target_var]
40
+ X = df_clean[feature_cols]
41
+ y = df_clean[target_var]
42
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
43
+ # Fit linear regression
44
+ model = LinearRegression()
45
+ model.fit(X_train, y_train)
46
+ y_pred_train = model.predict(X_train)
47
+ y_pred_test = model.predict(X_test)
48
+ # Model performance
49
+ r2_train = model.score(X_train, y_train)
50
+ r2_test = model.score(X_test, y_test)
51
+ print(f"R² (Train): {r2_train:.4f} | R² (Test): {r2_test:.4f}")
52
+ # Coefficients
53
+ print("\nCoefficients:")
54
+ for feature, coef in zip(feature_cols, model.coef_):
55
+ print(f" {feature}: {coef:.4f}")
56
+ print(f" Intercept: {model.intercept_:.4f}")
57
+ # Statsmodels for p-values and CIs
58
+ X_with_const = sm.add_constant(X_train)
59
+ model_sm = sm.OLS(y_train, X_with_const).fit()
60
+ print("\nStatistical Significance:")
61
+ print(model_sm.summary().tables[1])
62
+ # Save summary table
63
+ with open('data/exports/regression_summary.txt', 'w') as f:
64
+ f.write(str(model_sm.summary()))
65
+ # Residuals
66
+ residuals = y_train - y_pred_train
67
+ # Normality test
68
+ _, p_value_norm = stats.normaltest(residuals)
69
+ print(f"Normality test (p-value): {p_value_norm:.4f}")
70
+ # VIF
71
+ vif_data = []
72
+ for i in range(X_train.shape[1]):
73
+ try:
74
+ vif = variance_inflation_factor(X_train.values, i)
75
+ vif_data.append(vif)
76
+ except:
77
+ vif_data.append(np.nan)
78
+ print("\nVariance Inflation Factors:")
79
+ for feature, vif in zip(feature_cols, vif_data):
80
+ print(f" {feature}: {vif:.3f}")
81
+ # Homoscedasticity
82
+ try:
83
+ _, p_value_het = het_breuschpagan(residuals, X_with_const)
84
+ print(f"Homoscedasticity test (p-value): {p_value_het:.4f}")
85
+ except:
86
+ print("Homoscedasticity test failed")
87
+ # Diagnostic plots
88
+ plt.figure(figsize=(12,4))
89
+ plt.subplot(1,2,1)
90
+ plt.scatter(y_pred_train, residuals, alpha=0.5)
91
+ plt.axhline(0, color='red', linestyle='--')
92
+ plt.xlabel('Fitted Values')
93
+ plt.ylabel('Residuals')
94
+ plt.title('Residuals vs Fitted')
95
+ plt.subplot(1,2,2)
96
+ stats.probplot(residuals, dist="norm", plot=plt)
97
+ plt.title('Normal Q-Q')
98
+ plt.tight_layout()
99
+ plt.savefig('data/exports/regression_diagnostics.png', dpi=200)
100
+ plt.close()
101
+ print("\nStatistical modeling complete. Outputs saved to data/exports/.")
102
+
103
+ if __name__ == "__main__":
104
+ main()
scripts/run_time_series.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ Run Time Series Analysis: Decomposition, ARIMA forecasting, plots
4
+ """
5
+ import os
6
+ import sys
7
+ import glob
8
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
9
+ import pandas as pd
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ from statsmodels.tsa.seasonal import seasonal_decompose
13
+ from statsmodels.tsa.arima.model import ARIMA
14
+
15
+ def find_latest_data():
16
+ data_files = glob.glob('data/processed/fred_data_*.csv')
17
+ if not data_files:
18
+ raise FileNotFoundError("No FRED data files found. Run the pipeline first.")
19
+ return max(data_files, key=os.path.getctime)
20
+
21
+ def main():
22
+ print("="*60)
23
+ print("FRED Time Series Analysis: Decomposition & ARIMA Forecasting")
24
+ print("="*60)
25
+ data_file = find_latest_data()
26
+ print(f"Using data file: {data_file}")
27
+ df = pd.read_csv(data_file, index_col=0, parse_dates=True)
28
+ target_var = 'GDP'
29
+ if target_var not in df.columns:
30
+ print(f"Target variable '{target_var}' not found in data.")
31
+ return
32
+ ts_data = df[target_var].dropna()
33
+ if len(ts_data) < 50:
34
+ print("Insufficient data for time series analysis (need at least 50 points). Skipping.")
35
+ return
36
+ print(f"Time series length: {len(ts_data)} observations")
37
+ print(f"Date range: {ts_data.index.min()} to {ts_data.index.max()}")
38
+ # Decomposition
39
+ try:
40
+ if ts_data.index.freq is None:
41
+ ts_monthly = ts_data.resample('M').mean()
42
+ else:
43
+ ts_monthly = ts_data
44
+ decomposition = seasonal_decompose(ts_monthly, model='additive', period=12)
45
+ fig, axes = plt.subplots(4, 1, figsize=(12, 10))
46
+ decomposition.observed.plot(ax=axes[0], title='Original Time Series')
47
+ decomposition.trend.plot(ax=axes[1], title='Trend')
48
+ decomposition.seasonal.plot(ax=axes[2], title='Seasonality')
49
+ decomposition.resid.plot(ax=axes[3], title='Residuals')
50
+ plt.tight_layout()
51
+ plt.savefig('data/exports/time_series_decomposition.png', dpi=200, bbox_inches='tight')
52
+ plt.close()
53
+ print("Decomposition plot saved.")
54
+ except Exception as e:
55
+ print(f"Decomposition failed: {e}")
56
+ # ARIMA Forecasting
57
+ try:
58
+ model = ARIMA(ts_monthly, order=(1, 1, 1))
59
+ fitted_model = model.fit()
60
+ print(f"ARIMA Model Summary:\n{fitted_model.summary()}")
61
+ forecast_steps = min(12, len(ts_monthly) // 4)
62
+ forecast = fitted_model.forecast(steps=forecast_steps)
63
+ conf_int = fitted_model.get_forecast(steps=forecast_steps).conf_int()
64
+ plt.figure(figsize=(12, 6))
65
+ ts_monthly.plot(label='Historical Data')
66
+ forecast.plot(label='Forecast', color='red')
67
+ plt.fill_between(forecast.index, conf_int.iloc[:, 0], conf_int.iloc[:, 1], alpha=0.3, color='red', label='Confidence Interval')
68
+ plt.title(f'{target_var} - ARIMA Forecast')
69
+ plt.legend()
70
+ plt.grid(True)
71
+ plt.tight_layout()
72
+ plt.savefig('data/exports/time_series_forecast.png', dpi=200, bbox_inches='tight')
73
+ plt.close()
74
+ print("Forecast plot saved.")
75
+ except Exception as e:
76
+ print(f"ARIMA modeling failed: {e}")
77
+ print("\nTime series analysis complete. Outputs saved to data/exports/.")
78
+
79
+ if __name__ == "__main__":
80
+ main()
src/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FRED Economic Data Analysis Package
3
+
4
+ A comprehensive tool for collecting, analyzing, and visualizing
5
+ Federal Reserve Economic Data (FRED) using the FRED API.
6
+
7
+ Author: Economic Data Team
8
+ Version: 1.0.0
9
+ """
10
+
11
+ __version__ = "1.0.0"
12
+ __author__ = "Economic Data Team"
13
+ __email__ = "[email protected]"
14
+
15
+ from .core.fred_client import FREDDataCollectorV2
16
+ from .analysis.advanced_analytics import AdvancedAnalytics
17
+
18
+ __all__ = [
19
+ 'FREDDataCollectorV2',
20
+ 'AdvancedAnalytics',
21
+ ]
src/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (644 Bytes). View file
 
src/analysis/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """
2
+ Economic data analysis and visualization tools.
3
+ """
4
+
5
+ from .advanced_analytics import AdvancedAnalytics
6
+
7
+ __all__ = ['AdvancedAnalytics']
src/analysis/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (345 Bytes). View file
 
src/analysis/__pycache__/advanced_analytics.cpython-39.pyc ADDED
Binary file (14.8 kB). View file
 
src/analysis/__pycache__/economic_analyzer.cpython-39.pyc ADDED
Binary file (6.06 kB). View file
 
src/analysis/advanced_analytics.py ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Advanced Analytics Module for FRED Economic Data
4
+ Performs comprehensive statistical analysis, modeling, and insights extraction.
5
+ """
6
+
7
+ import pandas as pd
8
+ import numpy as np
9
+ import matplotlib.pyplot as plt
10
+ import seaborn as sns
11
+ from scipy import stats
12
+ from sklearn.preprocessing import StandardScaler
13
+ from sklearn.decomposition import PCA
14
+ from sklearn.cluster import KMeans
15
+ from sklearn.metrics import silhouette_score
16
+ from sklearn.linear_model import LinearRegression
17
+ from sklearn.model_selection import train_test_split
18
+ from sklearn.metrics import r2_score, mean_squared_error
19
+ import statsmodels.api as sm
20
+ from statsmodels.tsa.seasonal import seasonal_decompose
21
+ from statsmodels.tsa.arima.model import ARIMA
22
+ from statsmodels.stats.diagnostic import het_breuschpagan
23
+ from statsmodels.stats.outliers_influence import variance_inflation_factor
24
+ import warnings
25
+ warnings.filterwarnings('ignore')
26
+
27
+ class AdvancedAnalytics:
28
+ """
29
+ Comprehensive analytics class for FRED economic data.
30
+ Performs EDA, statistical modeling, segmentation, and time series analysis.
31
+ """
32
+
33
+ def __init__(self, data_path=None, df=None):
34
+ """Initialize with data path or DataFrame."""
35
+ if df is not None:
36
+ self.df = df
37
+ elif data_path:
38
+ self.df = pd.read_csv(data_path, index_col=0, parse_dates=True)
39
+ else:
40
+ raise ValueError("Must provide either data_path or DataFrame")
41
+
42
+ self.scaler = StandardScaler()
43
+ self.results = {}
44
+
45
+ def perform_eda(self):
46
+ """Perform comprehensive Exploratory Data Analysis."""
47
+ print("=" * 60)
48
+ print("EXPLORATORY DATA ANALYSIS")
49
+ print("=" * 60)
50
+
51
+ # Basic info
52
+ print(f"\nDataset Shape: {self.df.shape}")
53
+ print(f"Date Range: {self.df.index.min()} to {self.df.index.max()}")
54
+ print(f"Variables: {list(self.df.columns)}")
55
+
56
+ # Descriptive statistics
57
+ print("\n" + "=" * 40)
58
+ print("DESCRIPTIVE STATISTICS")
59
+ print("=" * 40)
60
+ desc_stats = self.df.describe()
61
+ print(desc_stats)
62
+
63
+ # Skewness and Kurtosis
64
+ print("\n" + "=" * 40)
65
+ print("SKEWNESS AND KURTOSIS")
66
+ print("=" * 40)
67
+ skewness = self.df.skew()
68
+ kurtosis = self.df.kurtosis()
69
+
70
+ for col in self.df.columns:
71
+ print(f"{col}:")
72
+ print(f" Skewness: {skewness[col]:.3f}")
73
+ print(f" Kurtosis: {kurtosis[col]:.3f}")
74
+
75
+ # Correlation Analysis
76
+ print("\n" + "=" * 40)
77
+ print("CORRELATION ANALYSIS")
78
+ print("=" * 40)
79
+
80
+ # Pearson correlation
81
+ pearson_corr = self.df.corr(method='pearson')
82
+ print("\nPearson Correlation Matrix:")
83
+ print(pearson_corr.round(3))
84
+
85
+ # Spearman correlation
86
+ spearman_corr = self.df.corr(method='spearman')
87
+ print("\nSpearman Correlation Matrix:")
88
+ print(spearman_corr.round(3))
89
+
90
+ # Store results
91
+ self.results['eda'] = {
92
+ 'descriptive_stats': desc_stats,
93
+ 'skewness': skewness,
94
+ 'kurtosis': kurtosis,
95
+ 'pearson_corr': pearson_corr,
96
+ 'spearman_corr': spearman_corr
97
+ }
98
+
99
+ return self.results['eda']
100
+
101
+ def perform_dimensionality_reduction(self, method='pca', n_components=2):
102
+ """Perform dimensionality reduction for visualization."""
103
+ print("\n" + "=" * 40)
104
+ print(f"DIMENSIONALITY REDUCTION ({method.upper()})")
105
+ print("=" * 40)
106
+
107
+ # Prepare data (remove NaN values)
108
+ df_clean = self.df.dropna()
109
+
110
+ if method.lower() == 'pca':
111
+ # PCA
112
+ pca = PCA(n_components=n_components)
113
+ scaled_data = self.scaler.fit_transform(df_clean)
114
+ pca_result = pca.fit_transform(scaled_data)
115
+
116
+ print(f"Explained variance ratio: {pca.explained_variance_ratio_}")
117
+ print(f"Total explained variance: {sum(pca.explained_variance_ratio_):.3f}")
118
+
119
+ # Create DataFrame with PCA results
120
+ pca_df = pd.DataFrame(
121
+ pca_result,
122
+ columns=[f'PC{i+1}' for i in range(n_components)],
123
+ index=df_clean.index
124
+ )
125
+
126
+ self.results['pca'] = {
127
+ 'components': pca_df,
128
+ 'explained_variance': pca.explained_variance_ratio_,
129
+ 'feature_importance': pd.DataFrame(
130
+ pca.components_.T,
131
+ columns=[f'PC{i+1}' for i in range(n_components)],
132
+ index=df_clean.columns
133
+ )
134
+ }
135
+
136
+ return self.results['pca']
137
+
138
+ return None
139
+
140
+ def perform_statistical_modeling(self, target_var='GDP', test_size=0.2):
141
+ """Perform linear regression with comprehensive diagnostics."""
142
+ print("\n" + "=" * 40)
143
+ print("STATISTICAL MODELING - LINEAR REGRESSION")
144
+ print("=" * 40)
145
+
146
+ # Prepare data
147
+ df_clean = self.df.dropna()
148
+
149
+ if target_var not in df_clean.columns:
150
+ print(f"Target variable '{target_var}' not found in dataset")
151
+ return None
152
+
153
+ # Prepare features and target
154
+ feature_cols = [col for col in df_clean.columns if col != target_var]
155
+ X = df_clean[feature_cols]
156
+ y = df_clean[target_var]
157
+
158
+ # Split data
159
+ X_train, X_test, y_train, y_test = train_test_split(
160
+ X, y, test_size=test_size, random_state=42
161
+ )
162
+
163
+ # Fit linear regression
164
+ model = LinearRegression()
165
+ model.fit(X_train, y_train)
166
+
167
+ # Predictions
168
+ y_pred_train = model.predict(X_train)
169
+ y_pred_test = model.predict(X_test)
170
+
171
+ # Model performance
172
+ r2_train = r2_score(y_train, y_pred_train)
173
+ r2_test = r2_score(y_test, y_pred_test)
174
+ rmse_train = np.sqrt(mean_squared_error(y_train, y_pred_train))
175
+ rmse_test = np.sqrt(mean_squared_error(y_test, y_pred_test))
176
+
177
+ print(f"\nModel Performance:")
178
+ print(f"R² (Training): {r2_train:.4f}")
179
+ print(f"R² (Test): {r2_test:.4f}")
180
+ print(f"RMSE (Training): {rmse_train:.4f}")
181
+ print(f"RMSE (Test): {rmse_test:.4f}")
182
+
183
+ # Coefficients
184
+ print(f"\nCoefficients:")
185
+ for feature, coef in zip(feature_cols, model.coef_):
186
+ print(f" {feature}: {coef:.4f}")
187
+ print(f" Intercept: {model.intercept_:.4f}")
188
+
189
+ # Statistical significance using statsmodels
190
+ X_with_const = sm.add_constant(X_train)
191
+ model_sm = sm.OLS(y_train, X_with_const).fit()
192
+
193
+ print(f"\nStatistical Significance:")
194
+ print(model_sm.summary().tables[1])
195
+
196
+ # Assumption tests
197
+ print(f"\n" + "=" * 30)
198
+ print("REGRESSION ASSUMPTIONS")
199
+ print("=" * 30)
200
+
201
+ # 1. Normality of residuals
202
+ residuals = y_train - y_pred_train
203
+ _, p_value_norm = stats.normaltest(residuals)
204
+ print(f"Normality test (p-value): {p_value_norm:.4f}")
205
+
206
+ # 2. Multicollinearity (VIF)
207
+ vif_data = []
208
+ for i in range(X_train.shape[1]):
209
+ try:
210
+ vif = variance_inflation_factor(X_train.values, i)
211
+ vif_data.append(vif)
212
+ except:
213
+ vif_data.append(np.nan)
214
+
215
+ print(f"\nVariance Inflation Factors:")
216
+ for feature, vif in zip(feature_cols, vif_data):
217
+ print(f" {feature}: {vif:.3f}")
218
+
219
+ # 3. Homoscedasticity
220
+ try:
221
+ _, p_value_het = het_breuschpagan(residuals, X_with_const)
222
+ print(f"\nHomoscedasticity test (p-value): {p_value_het:.4f}")
223
+ except:
224
+ p_value_het = np.nan
225
+ print(f"\nHomoscedasticity test failed")
226
+
227
+ # Store results
228
+ self.results['regression'] = {
229
+ 'model': model,
230
+ 'model_sm': model_sm,
231
+ 'performance': {
232
+ 'r2_train': r2_train,
233
+ 'r2_test': r2_test,
234
+ 'rmse_train': rmse_train,
235
+ 'rmse_test': rmse_test
236
+ },
237
+ 'coefficients': dict(zip(feature_cols, model.coef_)),
238
+ 'assumptions': {
239
+ 'normality_p': p_value_norm,
240
+ 'homoscedasticity_p': p_value_het,
241
+ 'vif': dict(zip(feature_cols, vif_data))
242
+ }
243
+ }
244
+
245
+ return self.results['regression']
246
+
247
+ def perform_clustering(self, max_k=10):
248
+ """Perform clustering analysis with optimal k selection."""
249
+ print("\n" + "=" * 40)
250
+ print("CLUSTERING ANALYSIS")
251
+ print("=" * 40)
252
+
253
+ # Prepare data
254
+ df_clean = self.df.dropna()
255
+ if df_clean.shape[0] < 10 or df_clean.shape[1] < 2:
256
+ print("Not enough data for clustering (need at least 10 rows and 2 columns after dropna). Skipping.")
257
+ self.results['clustering'] = None
258
+ return None
259
+ try:
260
+ scaled_data = self.scaler.fit_transform(df_clean)
261
+ except Exception as e:
262
+ print(f"Scaling failed: {e}")
263
+ self.results['clustering'] = None
264
+ return None
265
+ # Find optimal k using elbow method and silhouette score
266
+ inertias = []
267
+ silhouette_scores = []
268
+ k_range = range(2, min(max_k + 1, len(df_clean) // 10 + 1))
269
+ if len(k_range) < 2:
270
+ print("Not enough data for multiple clusters. Skipping clustering.")
271
+ self.results['clustering'] = None
272
+ return None
273
+ try:
274
+ for k in k_range:
275
+ kmeans = KMeans(n_clusters=k, random_state=42)
276
+ kmeans.fit(scaled_data)
277
+ inertias.append(kmeans.inertia_)
278
+ silhouette_scores.append(silhouette_score(scaled_data, kmeans.labels_))
279
+ # Plot elbow curve only if there are results
280
+ if inertias and silhouette_scores:
281
+ plt.figure(figsize=(12, 4))
282
+ plt.subplot(1, 2, 1)
283
+ plt.plot(list(k_range), inertias, 'bo-')
284
+ plt.xlabel('Number of Clusters (k)')
285
+ plt.ylabel('Inertia')
286
+ plt.title('Elbow Method')
287
+ plt.grid(True)
288
+ plt.subplot(1, 2, 2)
289
+ plt.plot(list(k_range), silhouette_scores, 'ro-')
290
+ plt.xlabel('Number of Clusters (k)')
291
+ plt.ylabel('Silhouette Score')
292
+ plt.title('Silhouette Analysis')
293
+ plt.grid(True)
294
+ plt.tight_layout()
295
+ plt.savefig('data/exports/clustering_analysis.png', dpi=300, bbox_inches='tight')
296
+ plt.show()
297
+ # Choose optimal k (highest silhouette score)
298
+ optimal_k = list(k_range)[np.argmax(silhouette_scores)]
299
+ print(f"Optimal number of clusters: {optimal_k}")
300
+ print(f"Best silhouette score: {max(silhouette_scores):.3f}")
301
+ # Perform clustering with optimal k
302
+ kmeans_optimal = KMeans(n_clusters=optimal_k, random_state=42)
303
+ cluster_labels = kmeans_optimal.fit_predict(scaled_data)
304
+ # Add cluster labels to data
305
+ df_clustered = df_clean.copy()
306
+ df_clustered['Cluster'] = cluster_labels
307
+ # Cluster characteristics
308
+ print(f"\nCluster Characteristics:")
309
+ cluster_stats = df_clustered.groupby('Cluster').agg(['mean', 'std'])
310
+ print(cluster_stats.round(3))
311
+ # Store results
312
+ self.results['clustering'] = {
313
+ 'optimal_k': optimal_k,
314
+ 'silhouette_score': max(silhouette_scores),
315
+ 'cluster_labels': cluster_labels,
316
+ 'clustered_data': df_clustered,
317
+ 'cluster_stats': cluster_stats,
318
+ 'inertias': inertias,
319
+ 'silhouette_scores': silhouette_scores
320
+ }
321
+ return self.results['clustering']
322
+ except Exception as e:
323
+ print(f"Clustering failed: {e}")
324
+ self.results['clustering'] = None
325
+ return None
326
+
327
+ def perform_time_series_analysis(self, target_var='GDP'):
328
+ """Perform comprehensive time series analysis."""
329
+ print("\n" + "=" * 40)
330
+ print("TIME SERIES ANALYSIS")
331
+ print("=" * 40)
332
+
333
+ if target_var not in self.df.columns:
334
+ print(f"Target variable '{target_var}' not found")
335
+ self.results['time_series'] = None
336
+ return None
337
+ # Prepare time series data
338
+ ts_data = self.df[target_var].dropna()
339
+ if len(ts_data) < 50:
340
+ print("Insufficient data for time series analysis (need at least 50 points). Skipping.")
341
+ self.results['time_series'] = None
342
+ return None
343
+ print(f"Time series length: {len(ts_data)} observations")
344
+ print(f"Date range: {ts_data.index.min()} to {ts_data.index.max()}")
345
+ # 1. Time Series Decomposition
346
+ print(f"\nTime Series Decomposition:")
347
+ try:
348
+ # Resample to monthly data if needed
349
+ if ts_data.index.freq is None:
350
+ ts_monthly = ts_data.resample('M').mean()
351
+ else:
352
+ ts_monthly = ts_data
353
+ decomposition = seasonal_decompose(ts_monthly, model='additive', period=12)
354
+ # Plot decomposition
355
+ fig, axes = plt.subplots(4, 1, figsize=(12, 10))
356
+ decomposition.observed.plot(ax=axes[0], title='Original Time Series')
357
+ decomposition.trend.plot(ax=axes[1], title='Trend')
358
+ decomposition.seasonal.plot(ax=axes[2], title='Seasonality')
359
+ decomposition.resid.plot(ax=axes[3], title='Residuals')
360
+ plt.tight_layout()
361
+ plt.savefig('data/exports/time_series_decomposition.png', dpi=300, bbox_inches='tight')
362
+ plt.show()
363
+ except Exception as e:
364
+ print(f"Decomposition failed: {e}")
365
+ # 2. ARIMA Modeling
366
+ print(f"\nARIMA Modeling:")
367
+ try:
368
+ # Fit ARIMA model
369
+ model = ARIMA(ts_monthly, order=(1, 1, 1))
370
+ fitted_model = model.fit()
371
+ print(f"ARIMA Model Summary:")
372
+ print(fitted_model.summary())
373
+ # Forecast
374
+ forecast_steps = min(12, len(ts_monthly) // 4)
375
+ forecast = fitted_model.forecast(steps=forecast_steps)
376
+ conf_int = fitted_model.get_forecast(steps=forecast_steps).conf_int()
377
+ # Plot forecast
378
+ plt.figure(figsize=(12, 6))
379
+ ts_monthly.plot(label='Historical Data')
380
+ forecast.plot(label='Forecast', color='red')
381
+ plt.fill_between(forecast.index,
382
+ conf_int.iloc[:, 0],
383
+ conf_int.iloc[:, 1],
384
+ alpha=0.3, color='red', label='Confidence Interval')
385
+ plt.title(f'{target_var} - ARIMA Forecast')
386
+ plt.legend()
387
+ plt.grid(True)
388
+ plt.tight_layout()
389
+ plt.savefig('data/exports/time_series_forecast.png', dpi=300, bbox_inches='tight')
390
+ plt.show()
391
+ # Store results
392
+ self.results['time_series'] = {
393
+ 'model': fitted_model,
394
+ 'forecast': forecast,
395
+ 'confidence_intervals': conf_int,
396
+ 'decomposition': decomposition if 'decomposition' in locals() else None
397
+ }
398
+ except Exception as e:
399
+ print(f"ARIMA modeling failed: {e}")
400
+ self.results['time_series'] = None
401
+ return self.results.get('time_series')
402
+
403
+ def generate_insights_report(self):
404
+ """Generate comprehensive insights report in layman's terms."""
405
+ print("\n" + "=" * 60)
406
+ print("COMPREHENSIVE INSIGHTS REPORT")
407
+ print("=" * 60)
408
+
409
+ insights = []
410
+ # EDA Insights
411
+ if 'eda' in self.results and self.results['eda'] is not None:
412
+ insights.append("EXPLORATORY DATA ANALYSIS INSIGHTS:")
413
+ insights.append("-" * 40)
414
+ # Correlation insights
415
+ pearson_corr = self.results['eda']['pearson_corr']
416
+ high_corr_pairs = []
417
+ for i in range(len(pearson_corr.columns)):
418
+ for j in range(i+1, len(pearson_corr.columns)):
419
+ corr_val = pearson_corr.iloc[i, j]
420
+ if abs(corr_val) > 0.7:
421
+ high_corr_pairs.append((pearson_corr.columns[i], pearson_corr.columns[j], corr_val))
422
+ if high_corr_pairs:
423
+ insights.append("Strong correlations found:")
424
+ for var1, var2, corr in high_corr_pairs:
425
+ insights.append(f" • {var1} and {var2}: {corr:.3f}")
426
+ else:
427
+ insights.append("No strong correlations (>0.7) found between variables.")
428
+ else:
429
+ insights.append("EDA could not be performed or returned no results.")
430
+ # Regression Insights
431
+ if 'regression' in self.results and self.results['regression'] is not None:
432
+ insights.append("\nREGRESSION MODEL INSIGHTS:")
433
+ insights.append("-" * 40)
434
+ reg_results = self.results['regression']
435
+ r2_test = reg_results['performance']['r2_test']
436
+ insights.append(f"Model Performance:")
437
+ insights.append(f" • The model explains {r2_test:.1%} of the variation in the target variable")
438
+ if r2_test > 0.7:
439
+ insights.append(" • This is considered a good model fit")
440
+ elif r2_test > 0.5:
441
+ insights.append(" • This is considered a moderate model fit")
442
+ else:
443
+ insights.append(" • This model has limited predictive power")
444
+ # Assumption insights
445
+ assumptions = reg_results['assumptions']
446
+ if assumptions['normality_p'] > 0.05:
447
+ insights.append(" • Residuals are normally distributed (assumption met)")
448
+ else:
449
+ insights.append(" • Residuals are not normally distributed (assumption violated)")
450
+ else:
451
+ insights.append("Regression modeling could not be performed or returned no results.")
452
+ # Clustering Insights
453
+ if 'clustering' in self.results and self.results['clustering'] is not None:
454
+ insights.append("\nCLUSTERING INSIGHTS:")
455
+ insights.append("-" * 40)
456
+ cluster_results = self.results['clustering']
457
+ optimal_k = cluster_results['optimal_k']
458
+ silhouette_score = cluster_results['silhouette_score']
459
+ insights.append(f"Optimal number of clusters: {optimal_k}")
460
+ insights.append(f"Cluster quality score: {silhouette_score:.3f}")
461
+ if silhouette_score > 0.5:
462
+ insights.append(" • Clusters are well-separated and distinct")
463
+ elif silhouette_score > 0.3:
464
+ insights.append(" • Clusters show moderate separation")
465
+ else:
466
+ insights.append(" • Clusters may not be well-defined")
467
+ else:
468
+ insights.append("Clustering could not be performed or returned no results.")
469
+ # Time Series Insights
470
+ if 'time_series' in self.results and self.results['time_series'] is not None:
471
+ insights.append("\nTIME SERIES INSIGHTS:")
472
+ insights.append("-" * 40)
473
+ insights.append(" • Time series decomposition shows trend, seasonality, and random components")
474
+ insights.append(" • ARIMA model provides future forecasts with confidence intervals")
475
+ insights.append(" • Forecasts can be used for planning and decision-making")
476
+ else:
477
+ insights.append("Time series analysis could not be performed or returned no results.")
478
+ # Print insights
479
+ for insight in insights:
480
+ print(insight)
481
+ # Save insights to file
482
+ with open('data/exports/insights_report.txt', 'w') as f:
483
+ f.write('\n'.join(insights))
484
+ return insights
485
+
486
+ def run_complete_analysis(self):
487
+ """Run the complete advanced analytics workflow."""
488
+ print("Starting comprehensive advanced analytics...")
489
+
490
+ # 1. EDA
491
+ self.perform_eda()
492
+
493
+ # 2. Dimensionality reduction
494
+ self.perform_dimensionality_reduction()
495
+
496
+ # 3. Statistical modeling
497
+ self.perform_statistical_modeling()
498
+
499
+ # 4. Clustering
500
+ self.perform_clustering()
501
+
502
+ # 5. Time series analysis
503
+ self.perform_time_series_analysis()
504
+
505
+ # 6. Generate insights
506
+ self.generate_insights_report()
507
+
508
+ print("\n" + "=" * 60)
509
+ print("ANALYSIS COMPLETE!")
510
+ print("=" * 60)
511
+ print("Check the following outputs:")
512
+ print(" • data/exports/insights_report.txt - Comprehensive insights")
513
+ print(" • data/exports/clustering_analysis.png - Clustering results")
514
+ print(" • data/exports/time_series_decomposition.png - Time series decomposition")
515
+ print(" • data/exports/time_series_forecast.png - Time series forecast")
516
+
517
+ return self.results
src/analysis/economic_analyzer.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Quick Start Guide for FRED Economic Data Analysis
4
+ Demonstrates how to load and analyze the collected data
5
+ """
6
+
7
+ import pandas as pd
8
+ import matplotlib.pyplot as plt
9
+ import seaborn as sns
10
+ import sys
11
+ import os
12
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
13
+
14
+ from core.fred_client import FREDDataCollectorV2
15
+ from datetime import datetime, timedelta
16
+
17
+ def load_latest_data():
18
+ """Load the most recent data file."""
19
+ import os
20
+ import glob
21
+
22
+ # Find the most recent data file
23
+ data_files = glob.glob('data/fred_economic_data_*.csv')
24
+ if not data_files:
25
+ print("No data files found. Run the collector first.")
26
+ return None
27
+
28
+ latest_file = max(data_files, key=os.path.getctime)
29
+ print(f"Loading data from: {latest_file}")
30
+
31
+ df = pd.read_csv(latest_file, index_col=0, parse_dates=True)
32
+ return df
33
+
34
+ def analyze_gdp_trends(df):
35
+ """Analyze GDP trends."""
36
+ print("\n=== GDP Analysis ===")
37
+
38
+ if 'GDP' not in df.columns:
39
+ print("GDP data not available")
40
+ return
41
+
42
+ gdp_data = df['GDP'].dropna()
43
+
44
+ print(f"GDP Data Points: {len(gdp_data)}")
45
+ print(f"Date Range: {gdp_data.index.min()} to {gdp_data.index.max()}")
46
+ print(f"Latest GDP: ${gdp_data.iloc[-1]:,.2f} billion")
47
+ print(f"GDP Growth (last 5 years): {((gdp_data.iloc[-1] / gdp_data.iloc[-20]) - 1) * 100:.2f}%")
48
+
49
+ # Plot GDP trend
50
+ plt.figure(figsize=(12, 6))
51
+ gdp_data.plot(linewidth=2)
52
+ plt.title('US GDP Over Time')
53
+ plt.ylabel('GDP (Billions of Dollars)')
54
+ plt.grid(True, alpha=0.3)
55
+ plt.tight_layout()
56
+ plt.show()
57
+
58
+ def analyze_unemployment(df):
59
+ """Analyze unemployment trends."""
60
+ print("\n=== Unemployment Analysis ===")
61
+
62
+ if 'UNRATE' not in df.columns:
63
+ print("Unemployment data not available")
64
+ return
65
+
66
+ unrate_data = df['UNRATE'].dropna()
67
+
68
+ print(f"Unemployment Data Points: {len(unrate_data)}")
69
+ print(f"Current Unemployment Rate: {unrate_data.iloc[-1]:.1f}%")
70
+ print(f"Average Unemployment Rate: {unrate_data.mean():.1f}%")
71
+ print(f"Lowest Rate: {unrate_data.min():.1f}%")
72
+ print(f"Highest Rate: {unrate_data.max():.1f}%")
73
+
74
+ # Plot unemployment trend
75
+ plt.figure(figsize=(12, 6))
76
+ unrate_data.plot(linewidth=2, color='red')
77
+ plt.title('US Unemployment Rate Over Time')
78
+ plt.ylabel('Unemployment Rate (%)')
79
+ plt.grid(True, alpha=0.3)
80
+ plt.tight_layout()
81
+ plt.show()
82
+
83
+ def analyze_inflation(df):
84
+ """Analyze inflation trends using CPI."""
85
+ print("\n=== Inflation Analysis (CPI) ===")
86
+
87
+ if 'CPIAUCSL' not in df.columns:
88
+ print("CPI data not available")
89
+ return
90
+
91
+ cpi_data = df['CPIAUCSL'].dropna()
92
+
93
+ # Calculate year-over-year inflation
94
+ cpi_yoy = cpi_data.pct_change(periods=12) * 100
95
+
96
+ print(f"CPI Data Points: {len(cpi_data)}")
97
+ print(f"Current CPI: {cpi_data.iloc[-1]:.2f}")
98
+ print(f"Current YoY Inflation: {cpi_yoy.iloc[-1]:.2f}%")
99
+ print(f"Average YoY Inflation: {cpi_yoy.mean():.2f}%")
100
+
101
+ # Plot inflation trend
102
+ fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))
103
+
104
+ cpi_data.plot(ax=ax1, linewidth=2, color='green')
105
+ ax1.set_title('Consumer Price Index (CPI)')
106
+ ax1.set_ylabel('CPI')
107
+ ax1.grid(True, alpha=0.3)
108
+
109
+ cpi_yoy.plot(ax=ax2, linewidth=2, color='orange')
110
+ ax2.set_title('Year-over-Year Inflation Rate')
111
+ ax2.set_ylabel('Inflation Rate (%)')
112
+ ax2.grid(True, alpha=0.3)
113
+
114
+ plt.tight_layout()
115
+ plt.show()
116
+
117
+ def analyze_interest_rates(df):
118
+ """Analyze interest rate trends."""
119
+ print("\n=== Interest Rate Analysis ===")
120
+
121
+ rates_data = {}
122
+ if 'FEDFUNDS' in df.columns:
123
+ rates_data['Federal Funds Rate'] = df['FEDFUNDS'].dropna()
124
+ if 'DGS10' in df.columns:
125
+ rates_data['10-Year Treasury'] = df['DGS10'].dropna()
126
+
127
+ if not rates_data:
128
+ print("No interest rate data available")
129
+ return
130
+
131
+ for name, data in rates_data.items():
132
+ print(f"\n{name}:")
133
+ print(f" Current Rate: {data.iloc[-1]:.2f}%")
134
+ print(f" Average Rate: {data.mean():.2f}%")
135
+ print(f" Range: {data.min():.2f}% - {data.max():.2f}%")
136
+
137
+ # Plot interest rates
138
+ plt.figure(figsize=(12, 6))
139
+ for name, data in rates_data.items():
140
+ data.plot(linewidth=2, label=name)
141
+
142
+ plt.title('Interest Rates Over Time')
143
+ plt.ylabel('Interest Rate (%)')
144
+ plt.legend()
145
+ plt.grid(True, alpha=0.3)
146
+ plt.tight_layout()
147
+ plt.show()
148
+
149
+ def correlation_analysis(df):
150
+ """Analyze correlations between economic indicators."""
151
+ print("\n=== Correlation Analysis ===")
152
+
153
+ # Select available indicators
154
+ available_cols = [col for col in ['GDP', 'UNRATE', 'CPIAUCSL', 'FEDFUNDS', 'DGS10']
155
+ if col in df.columns]
156
+
157
+ if len(available_cols) < 2:
158
+ print("Need at least 2 indicators for correlation analysis")
159
+ return
160
+
161
+ # Calculate correlations
162
+ corr_data = df[available_cols].corr()
163
+
164
+ print("Correlation Matrix:")
165
+ print(corr_data.round(3))
166
+
167
+ # Plot correlation heatmap
168
+ plt.figure(figsize=(8, 6))
169
+ sns.heatmap(corr_data, annot=True, cmap='coolwarm', center=0,
170
+ square=True, linewidths=0.5)
171
+ plt.title('Economic Indicators Correlation Matrix')
172
+ plt.tight_layout()
173
+ plt.show()
174
+
175
+ def main():
176
+ """Run the quick start analysis."""
177
+ print("FRED Economic Data - Quick Start Analysis")
178
+ print("=" * 50)
179
+
180
+ # Load data
181
+ df = load_latest_data()
182
+ if df is None:
183
+ return
184
+
185
+ print(f"Data loaded successfully!")
186
+ print(f"Shape: {df.shape}")
187
+ print(f"Columns: {list(df.columns)}")
188
+ print(f"Date range: {df.index.min()} to {df.index.max()}")
189
+
190
+ # Run analyses
191
+ analyze_gdp_trends(df)
192
+ analyze_unemployment(df)
193
+ analyze_inflation(df)
194
+ analyze_interest_rates(df)
195
+ correlation_analysis(df)
196
+
197
+ print("\n=== Analysis Complete ===")
198
+ print("Check the generated plots for visual insights!")
199
+
200
+ if __name__ == "__main__":
201
+ main()
src/core/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """
2
+ Core functionality for FRED data collection and processing.
3
+ """
4
+
5
+ from .fred_client import FREDDataCollectorV2
6
+
7
+ __all__ = ['FREDDataCollectorV2']
src/core/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (348 Bytes). View file
 
src/core/__pycache__/base_pipeline.cpython-39.pyc ADDED
Binary file (1.75 kB). View file
 
src/core/__pycache__/fred_client.cpython-39.pyc ADDED
Binary file (7.48 kB). View file
 
src/core/__pycache__/fred_pipeline.cpython-39.pyc ADDED
Binary file (3.37 kB). View file
 
src/core/base_pipeline.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import logging
3
+ import yaml
4
+ import os
5
+
6
+ class BasePipeline(abc.ABC):
7
+ """
8
+ Abstract base class for all data pipelines.
9
+ Handles config loading, logging, and pipeline orchestration.
10
+ """
11
+ def __init__(self, config_path: str):
12
+ self.config = self.load_config(config_path)
13
+ self.logger = self.setup_logger()
14
+
15
+ @staticmethod
16
+ def load_config(config_path: str):
17
+ with open(config_path, 'r') as f:
18
+ return yaml.safe_load(f)
19
+
20
+ def setup_logger(self):
21
+ log_cfg = self.config.get('logging', {})
22
+ log_level = getattr(logging, log_cfg.get('level', 'INFO').upper(), logging.INFO)
23
+ log_file = log_cfg.get('file', 'pipeline.log')
24
+ os.makedirs(os.path.dirname(log_file), exist_ok=True)
25
+ logging.basicConfig(
26
+ level=log_level,
27
+ format='%(asctime)s %(levelname)s %(name)s %(message)s',
28
+ handlers=[
29
+ logging.FileHandler(log_file),
30
+ logging.StreamHandler()
31
+ ]
32
+ )
33
+ return logging.getLogger(self.__class__.__name__)
34
+
35
+ @abc.abstractmethod
36
+ def run(self):
37
+ """Run the pipeline (to be implemented by subclasses)."""
38
+ pass
src/core/fred_client.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ FRED Data Collector v2
4
+ A tool for collecting and analyzing Federal Reserve Economic Data (FRED)
5
+ using direct API calls instead of the fredapi library
6
+ """
7
+
8
+ import os
9
+ import pandas as pd
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ import seaborn as sns
13
+ import requests
14
+ from datetime import datetime, timedelta
15
+ import warnings
16
+ warnings.filterwarnings('ignore')
17
+
18
+ import sys
19
+ import os
20
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
21
+
22
+ from config.settings import FRED_API_KEY, DEFAULT_START_DATE, DEFAULT_END_DATE, OUTPUT_DIR, PLOTS_DIR
23
+
24
+ class FREDDataCollectorV2:
25
+ def __init__(self, api_key=None):
26
+ """Initialize the FRED data collector with API key."""
27
+ self.api_key = api_key or FRED_API_KEY
28
+ self.base_url = "https://api.stlouisfed.org/fred"
29
+
30
+ # Create output directories
31
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
32
+ os.makedirs(PLOTS_DIR, exist_ok=True)
33
+
34
+ # Common economic indicators
35
+ self.indicators = {
36
+ 'GDP': 'GDP', # Gross Domestic Product
37
+ 'UNRATE': 'UNRATE', # Unemployment Rate
38
+ 'CPIAUCSL': 'CPIAUCSL', # Consumer Price Index
39
+ 'FEDFUNDS': 'FEDFUNDS', # Federal Funds Rate
40
+ 'DGS10': 'DGS10', # 10-Year Treasury Rate
41
+ 'DEXUSEU': 'DEXUSEU', # US/Euro Exchange Rate
42
+ 'PAYEMS': 'PAYEMS', # Total Nonfarm Payrolls
43
+ 'INDPRO': 'INDPRO', # Industrial Production
44
+ 'M2SL': 'M2SL', # M2 Money Stock
45
+ 'PCE': 'PCE' # Personal Consumption Expenditures
46
+ }
47
+
48
+ def get_series_info(self, series_id):
49
+ """Get information about a FRED series."""
50
+ try:
51
+ url = f"{self.base_url}/series"
52
+ params = {
53
+ 'series_id': series_id,
54
+ 'api_key': self.api_key,
55
+ 'file_type': 'json'
56
+ }
57
+
58
+ response = requests.get(url, params=params)
59
+
60
+ if response.status_code == 200:
61
+ data = response.json()
62
+ series = data.get('seriess', [])
63
+
64
+ if series:
65
+ s = series[0]
66
+ return {
67
+ 'id': s['id'],
68
+ 'title': s['title'],
69
+ 'units': s.get('units', ''),
70
+ 'frequency': s.get('frequency', ''),
71
+ 'last_updated': s.get('last_updated', ''),
72
+ 'notes': s.get('notes', '')
73
+ }
74
+
75
+ return None
76
+
77
+ except Exception as e:
78
+ print(f"Error getting info for {series_id}: {e}")
79
+ return None
80
+
81
+ def get_economic_data(self, series_ids, start_date=None, end_date=None):
82
+ """Fetch economic data for specified series."""
83
+ start_date = start_date or DEFAULT_START_DATE
84
+ end_date = end_date or DEFAULT_END_DATE
85
+
86
+ data = {}
87
+
88
+ for series_id in series_ids:
89
+ try:
90
+ print(f"Fetching data for {series_id}...")
91
+
92
+ url = f"{self.base_url}/series/observations"
93
+ params = {
94
+ 'series_id': series_id,
95
+ 'api_key': self.api_key,
96
+ 'file_type': 'json',
97
+ 'start_date': start_date,
98
+ 'end_date': end_date
99
+ }
100
+
101
+ response = requests.get(url, params=params)
102
+
103
+ if response.status_code == 200:
104
+ response_data = response.json()
105
+ observations = response_data.get('observations', [])
106
+
107
+ if observations:
108
+ # Convert to pandas Series
109
+ dates = []
110
+ values = []
111
+
112
+ for obs in observations:
113
+ try:
114
+ date = pd.to_datetime(obs['date'])
115
+ value = float(obs['value']) if obs['value'] != '.' else np.nan
116
+ dates.append(date)
117
+ values.append(value)
118
+ except (ValueError, KeyError):
119
+ continue
120
+
121
+ if dates and values:
122
+ series_data = pd.Series(values, index=dates, name=series_id)
123
+ data[series_id] = series_data
124
+ print(f"✓ Retrieved {len(series_data)} observations for {series_id}")
125
+ else:
126
+ print(f"✗ No valid data for {series_id}")
127
+ else:
128
+ print(f"✗ No observations found for {series_id}")
129
+ else:
130
+ print(f"✗ Error fetching {series_id}: HTTP {response.status_code}")
131
+
132
+ except Exception as e:
133
+ print(f"✗ Error fetching {series_id}: {e}")
134
+
135
+ return data
136
+
137
+ def create_dataframe(self, data_dict):
138
+ """Convert dictionary of series data to a pandas DataFrame."""
139
+ if not data_dict:
140
+ return pd.DataFrame()
141
+
142
+ # Find the common date range
143
+ all_dates = set()
144
+ for series in data_dict.values():
145
+ all_dates.update(series.index)
146
+
147
+ # Create a complete date range
148
+ if all_dates:
149
+ date_range = pd.date_range(min(all_dates), max(all_dates), freq='D')
150
+ df = pd.DataFrame(index=date_range)
151
+
152
+ # Add each series
153
+ for series_id, series_data in data_dict.items():
154
+ df[series_id] = series_data
155
+
156
+ df.index.name = 'Date'
157
+ return df
158
+
159
+ return pd.DataFrame()
160
+
161
+ def save_data(self, df, filename):
162
+ """Save data to CSV file."""
163
+ if df.empty:
164
+ print("No data to save")
165
+ return None
166
+
167
+ filepath = os.path.join(OUTPUT_DIR, filename)
168
+ df.to_csv(filepath)
169
+ print(f"Data saved to {filepath}")
170
+ return filepath
171
+
172
+ def plot_economic_indicators(self, df, indicators_to_plot=None):
173
+ """Create plots for economic indicators."""
174
+ if df.empty:
175
+ print("No data to plot")
176
+ return
177
+
178
+ if indicators_to_plot is None:
179
+ indicators_to_plot = [col for col in df.columns if col in df.columns]
180
+
181
+ if not indicators_to_plot:
182
+ print("No indicators to plot")
183
+ return
184
+
185
+ # Set up the plotting style
186
+ plt.style.use('default')
187
+ sns.set_palette("husl")
188
+
189
+ # Create subplots
190
+ n_indicators = len(indicators_to_plot)
191
+ fig, axes = plt.subplots(n_indicators, 1, figsize=(15, 4*n_indicators))
192
+
193
+ if n_indicators == 1:
194
+ axes = [axes]
195
+
196
+ for i, indicator in enumerate(indicators_to_plot):
197
+ if indicator in df.columns:
198
+ ax = axes[i]
199
+ df[indicator].dropna().plot(ax=ax, linewidth=2)
200
+
201
+ # Get series info for title
202
+ info = self.get_series_info(indicator)
203
+ title = f'{indicator} - {info["title"]}' if info else indicator
204
+ ax.set_title(title)
205
+ ax.set_ylabel('Value')
206
+ ax.grid(True, alpha=0.3)
207
+
208
+ plt.tight_layout()
209
+ plot_path = os.path.join(PLOTS_DIR, 'economic_indicators.png')
210
+ plt.savefig(plot_path, dpi=300, bbox_inches='tight')
211
+ plt.show()
212
+ print(f"Plot saved to {plot_path}")
213
+
214
+ def generate_summary_statistics(self, df):
215
+ """Generate summary statistics for the economic data."""
216
+ if df.empty:
217
+ return pd.DataFrame()
218
+
219
+ summary = df.describe()
220
+
221
+ # Add additional statistics
222
+ summary.loc['missing_values'] = df.isnull().sum()
223
+ summary.loc['missing_percentage'] = (df.isnull().sum() / len(df)) * 100
224
+
225
+ return summary
226
+
227
+ def run_analysis(self, series_ids=None, start_date=None, end_date=None):
228
+ """Run a complete analysis of economic indicators."""
229
+ if series_ids is None:
230
+ series_ids = list(self.indicators.values())
231
+
232
+ print("=== FRED Economic Data Analysis v2 ===")
233
+ print(f"API Key: {self.api_key[:8]}...")
234
+ print(f"Date Range: {start_date or DEFAULT_START_DATE} to {end_date or DEFAULT_END_DATE}")
235
+ print(f"Series to analyze: {series_ids}")
236
+ print("=" * 50)
237
+
238
+ # Fetch data
239
+ data = self.get_economic_data(series_ids, start_date, end_date)
240
+
241
+ if not data:
242
+ print("No data retrieved. Please check your API key and series IDs.")
243
+ return None, None
244
+
245
+ # Create DataFrame
246
+ df = self.create_dataframe(data)
247
+
248
+ if df.empty:
249
+ print("No data to analyze")
250
+ return None, None
251
+
252
+ # Save data
253
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
254
+ self.save_data(df, f'fred_economic_data_{timestamp}.csv')
255
+
256
+ # Generate summary statistics
257
+ summary = self.generate_summary_statistics(df)
258
+ print("\n=== Summary Statistics ===")
259
+ print(summary)
260
+
261
+ # Create plots
262
+ print("\n=== Creating Visualizations ===")
263
+ self.plot_economic_indicators(df)
264
+
265
+ return df, summary
266
+
267
+ def main():
268
+ """Main function to run the FRED data analysis."""
269
+ collector = FREDDataCollectorV2()
270
+
271
+ # Example: Analyze key economic indicators
272
+ key_indicators = ['GDP', 'UNRATE', 'CPIAUCSL', 'FEDFUNDS', 'DGS10']
273
+
274
+ try:
275
+ df, summary = collector.run_analysis(series_ids=key_indicators)
276
+
277
+ if df is not None:
278
+ print("\n=== Analysis Complete ===")
279
+ print(f"Data shape: {df.shape}")
280
+ print(f"Date range: {df.index.min()} to {df.index.max()}")
281
+ else:
282
+ print("\n=== Analysis Failed ===")
283
+
284
+ except Exception as e:
285
+ print(f"Error during analysis: {e}")
286
+
287
+ if __name__ == "__main__":
288
+ main()
src/core/fred_pipeline.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .base_pipeline import BasePipeline
2
+ import requests
3
+ import pandas as pd
4
+ import os
5
+ from datetime import datetime
6
+
7
+ class FREDPipeline(BasePipeline):
8
+ """
9
+ FRED Data Pipeline: Extracts, transforms, and loads FRED data using config.
10
+ """
11
+ def __init__(self, config_path: str):
12
+ super().__init__(config_path)
13
+ self.fred_cfg = self.config['fred']
14
+ self.api_key = self.fred_cfg['api_key']
15
+ self.series = self.fred_cfg['series']
16
+ self.start_date = self.fred_cfg['start_date']
17
+ self.end_date = self.fred_cfg['end_date']
18
+ self.output_dir = self.fred_cfg['output_dir']
19
+ self.export_dir = self.fred_cfg['export_dir']
20
+ os.makedirs(self.output_dir, exist_ok=True)
21
+ os.makedirs(self.export_dir, exist_ok=True)
22
+
23
+ def extract(self):
24
+ """Extract data from FRED API for all configured series."""
25
+ base_url = "https://api.stlouisfed.org/fred/series/observations"
26
+ data = {}
27
+ for series_id in self.series:
28
+ params = {
29
+ 'series_id': series_id,
30
+ 'api_key': self.api_key,
31
+ 'file_type': 'json',
32
+ 'start_date': self.start_date,
33
+ 'end_date': self.end_date
34
+ }
35
+ try:
36
+ resp = requests.get(base_url, params=params)
37
+ resp.raise_for_status()
38
+ obs = resp.json().get('observations', [])
39
+ dates, values = [], []
40
+ for o in obs:
41
+ try:
42
+ dates.append(pd.to_datetime(o['date']))
43
+ values.append(float(o['value']) if o['value'] != '.' else None)
44
+ except Exception:
45
+ continue
46
+ data[series_id] = pd.Series(values, index=dates, name=series_id)
47
+ self.logger.info(f"Extracted {len(values)} records for {series_id}")
48
+ except Exception as e:
49
+ self.logger.error(f"Failed to extract {series_id}: {e}")
50
+ return data
51
+
52
+ def transform(self, data):
53
+ """Transform raw data into a DataFrame, align dates, handle missing."""
54
+ if not data:
55
+ self.logger.warning("No data to transform.")
56
+ return pd.DataFrame()
57
+ all_dates = set()
58
+ for s in data.values():
59
+ all_dates.update(s.index)
60
+ if not all_dates:
61
+ return pd.DataFrame()
62
+ date_range = pd.date_range(min(all_dates), max(all_dates), freq='D')
63
+ df = pd.DataFrame(index=date_range)
64
+ for k, v in data.items():
65
+ df[k] = v
66
+ df.index.name = 'Date'
67
+ self.logger.info(f"Transformed data to DataFrame with shape {df.shape}")
68
+ return df
69
+
70
+ def load(self, df):
71
+ """Save DataFrame to CSV in output_dir and export_dir."""
72
+ if df.empty:
73
+ self.logger.warning("No data to load.")
74
+ return None
75
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
76
+ out_path = os.path.join(self.output_dir, f'fred_data_{ts}.csv')
77
+ exp_path = os.path.join(self.export_dir, f'fred_data_{ts}.csv')
78
+ df.to_csv(out_path)
79
+ df.to_csv(exp_path)
80
+ self.logger.info(f"Saved data to {out_path} and {exp_path}")
81
+ return out_path, exp_path
82
+
83
+ def run(self):
84
+ self.logger.info("Starting FRED data pipeline run...")
85
+ data = self.extract()
86
+ df = self.transform(data)
87
+ self.load(df)
88
+ self.logger.info("FRED data pipeline run complete.")
src/utils/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions and helper modules.
3
+ """
4
+
5
+ from .examples import *
6
+
7
+ __all__ = ['examples']
src/utils/examples.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Example usage of the FRED Data Collector
4
+ Demonstrates various ways to use the tool for economic data analysis
5
+ """
6
+
7
+ import sys
8
+ import os
9
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
10
+
11
+ from core.fred_client import FREDDataCollectorV2
12
+ import pandas as pd
13
+ from datetime import datetime, timedelta
14
+
15
+ def example_basic_usage():
16
+ """Basic usage example."""
17
+ print("=== Basic Usage Example ===")
18
+
19
+ collector = FREDDataCollectorV2()
20
+
21
+ # Get data for a single indicator
22
+ gdp_data = collector.get_economic_data(['GDP'], '2020-01-01', '2024-01-01')
23
+ df = collector.create_dataframe(gdp_data)
24
+
25
+ print(f"GDP data shape: {df.shape}")
26
+ print(f"Date range: {df.index.min()} to {df.index.max()}")
27
+ print(f"Latest GDP value: ${df['GDP'].iloc[-1]:,.2f} billion")
28
+
29
+ return df
30
+
31
+ def example_multiple_indicators():
32
+ """Example with multiple economic indicators."""
33
+ print("\n=== Multiple Indicators Example ===")
34
+
35
+ collector = FREDDataCollectorV2()
36
+
37
+ # Define indicators of interest
38
+ indicators = ['UNRATE', 'CPIAUCSL', 'FEDFUNDS']
39
+
40
+ # Get data for the last 5 years
41
+ end_date = datetime.now().strftime('%Y-%m-%d')
42
+ start_date = (datetime.now() - timedelta(days=5*365)).strftime('%Y-%m-%d')
43
+
44
+ data = collector.get_economic_data(indicators, start_date, end_date)
45
+ df = collector.create_dataframe(data)
46
+
47
+ # Generate summary statistics
48
+ summary = collector.generate_summary_statistics(df)
49
+ print("\nSummary Statistics:")
50
+ print(summary)
51
+
52
+ # Save data
53
+ collector.save_data(df, 'example_multiple_indicators.csv')
54
+
55
+ return df
56
+
57
+ def example_custom_analysis():
58
+ """Example of custom analysis."""
59
+ print("\n=== Custom Analysis Example ===")
60
+
61
+ collector = FREDDataCollectorV2()
62
+
63
+ # Focus on monetary policy indicators
64
+ monetary_indicators = ['FEDFUNDS', 'DGS10', 'M2SL']
65
+
66
+ # Get data for the last 10 years
67
+ end_date = datetime.now().strftime('%Y-%m-%d')
68
+ start_date = (datetime.now() - timedelta(days=10*365)).strftime('%Y-%m-%d')
69
+
70
+ data = collector.get_economic_data(monetary_indicators, start_date, end_date)
71
+ df = collector.create_dataframe(data)
72
+
73
+ # Calculate some custom metrics
74
+ if 'FEDFUNDS' in df.columns and 'DGS10' in df.columns:
75
+ # Calculate yield curve spread (10Y - Fed Funds)
76
+ df['YIELD_SPREAD'] = df['DGS10'] - df['FEDFUNDS']
77
+
78
+ print(f"\nYield Curve Analysis:")
79
+ print(f"Current Fed Funds Rate: {df['FEDFUNDS'].iloc[-1]:.2f}%")
80
+ print(f"Current 10Y Treasury Rate: {df['DGS10'].iloc[-1]:.2f}%")
81
+ print(f"Current Yield Spread: {df['YIELD_SPREAD'].iloc[-1]:.2f}%")
82
+
83
+ # Check for inverted yield curve (negative spread)
84
+ inverted_periods = df[df['YIELD_SPREAD'] < 0]
85
+ if not inverted_periods.empty:
86
+ print(f"Yield curve inverted for {len(inverted_periods)} periods")
87
+
88
+ return df
89
+
90
+ def example_series_info():
91
+ """Example of getting series information."""
92
+ print("\n=== Series Information Example ===")
93
+
94
+ collector = FREDDataCollectorV2()
95
+
96
+ # Get information about different series
97
+ series_to_check = ['GDP', 'UNRATE', 'CPIAUCSL']
98
+
99
+ for series_id in series_to_check:
100
+ info = collector.get_series_info(series_id)
101
+ if info:
102
+ print(f"\n{series_id}:")
103
+ print(f" Title: {info['title']}")
104
+ print(f" Units: {info['units']}")
105
+ print(f" Frequency: {info['frequency']}")
106
+ print(f" Last Updated: {info['last_updated']}")
107
+
108
+ def example_error_handling():
109
+ """Example showing error handling."""
110
+ print("\n=== Error Handling Example ===")
111
+
112
+ collector = FREDDataCollectorV2()
113
+
114
+ # Try to get data for an invalid series ID
115
+ invalid_series = ['INVALID_SERIES_ID']
116
+
117
+ data = collector.get_economic_data(invalid_series)
118
+ print("Attempted to fetch invalid series - handled gracefully")
119
+
120
+ def main():
121
+ """Run all examples."""
122
+ print("FRED Data Collector - Example Usage")
123
+ print("=" * 50)
124
+
125
+ try:
126
+ # Run examples
127
+ example_basic_usage()
128
+ example_multiple_indicators()
129
+ example_custom_analysis()
130
+ example_series_info()
131
+ example_error_handling()
132
+
133
+ print("\n=== All Examples Completed Successfully ===")
134
+
135
+ except Exception as e:
136
+ print(f"Error running examples: {e}")
137
+
138
+ if __name__ == "__main__":
139
+ main()
src/visualization/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """
2
+ Data visualization and plotting utilities.
3
+ """
4
+
5
+ __all__ = []
tests/__pycache__/test_fred_api.cpython-39-pytest-7.4.0.pyc ADDED
Binary file (3.01 kB). View file
 
tests/__pycache__/test_fredapi_library.cpython-39-pytest-7.4.0.pyc ADDED
Binary file (2.56 kB). View file
 
tests/test_fred_api.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Simple FRED API test
4
+ """
5
+
6
+ import requests
7
+ import sys
8
+ import os
9
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
10
+
11
+ from config.settings import FRED_API_KEY
12
+
13
+ def test_fred_api_direct():
14
+ """Test FRED API directly using requests."""
15
+ print("Testing FRED API directly...")
16
+
17
+ # Test URL for GDP series
18
+ url = f"https://api.stlouisfed.org/fred/series/observations"
19
+ params = {
20
+ 'series_id': 'GDP',
21
+ 'api_key': FRED_API_KEY,
22
+ 'file_type': 'json',
23
+ 'start_date': '2023-01-01',
24
+ 'end_date': '2023-12-31'
25
+ }
26
+
27
+ try:
28
+ response = requests.get(url, params=params)
29
+
30
+ if response.status_code == 200:
31
+ data = response.json()
32
+ observations = data.get('observations', [])
33
+
34
+ if observations:
35
+ print("✓ API connection successful!")
36
+ print(f"✓ Retrieved {len(observations)} GDP observations")
37
+
38
+ # Get the latest observation
39
+ latest = observations[-1]
40
+ print(f"✓ Latest GDP value: ${float(latest['value']):,.2f} billion")
41
+ print(f"✓ Date: {latest['date']}")
42
+ return True
43
+ else:
44
+ print("✗ No observations found")
45
+ return False
46
+ else:
47
+ print(f"✗ API request failed with status code: {response.status_code}")
48
+ print(f"Response: {response.text}")
49
+ return False
50
+
51
+ except Exception as e:
52
+ print(f"✗ API connection failed: {e}")
53
+ return False
54
+
55
+ def test_series_search():
56
+ """Test searching for series."""
57
+ print("\nTesting series search...")
58
+
59
+ url = "https://api.stlouisfed.org/fred/series/search"
60
+ params = {
61
+ 'search_text': 'GDP',
62
+ 'api_key': FRED_API_KEY,
63
+ 'file_type': 'json'
64
+ }
65
+
66
+ try:
67
+ response = requests.get(url, params=params)
68
+
69
+ if response.status_code == 200:
70
+ data = response.json()
71
+ series = data.get('seriess', [])
72
+
73
+ if series:
74
+ print("✓ Series search successful!")
75
+ print(f"✓ Found {len(series)} series matching 'GDP'")
76
+
77
+ # Show first few results
78
+ for i, s in enumerate(series[:3]):
79
+ print(f" {i+1}. {s['id']}: {s['title']}")
80
+ return True
81
+ else:
82
+ print("✗ No series found")
83
+ return False
84
+ else:
85
+ print(f"✗ Search request failed: {response.status_code}")
86
+ return False
87
+
88
+ except Exception as e:
89
+ print(f"✗ Search failed: {e}")
90
+ return False
91
+
92
+ def main():
93
+ """Run simple API tests."""
94
+ print("Simple FRED API Test")
95
+ print("=" * 30)
96
+ print(f"API Key: {FRED_API_KEY[:8]}...")
97
+ print()
98
+
99
+ # Test direct API access
100
+ api_ok = test_fred_api_direct()
101
+
102
+ # Test series search
103
+ search_ok = test_series_search()
104
+
105
+ print("\n" + "=" * 30)
106
+ if api_ok and search_ok:
107
+ print("✓ All tests passed! Your API key is working correctly.")
108
+ print("The issue is with the fredapi library, not your API key.")
109
+ else:
110
+ print("✗ Some tests failed. Please check your API key.")
111
+
112
+ return api_ok and search_ok
113
+
114
+ if __name__ == "__main__":
115
+ main()
tests/test_fredapi_library.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script to verify FRED API key functionality
4
+ """
5
+
6
+ from fredapi import Fred
7
+ import sys
8
+ import os
9
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
10
+
11
+ from config.settings import FRED_API_KEY
12
+
13
+ def test_api_connection():
14
+ """Test the FRED API connection with the provided key."""
15
+ print("Testing FRED API connection...")
16
+
17
+ try:
18
+ # Initialize FRED client
19
+ fred = Fred(api_key=FRED_API_KEY)
20
+
21
+ # Test with a simple series (GDP)
22
+ print("Fetching GDP data as a test...")
23
+ gdp_data = fred.get_series('GDP', start='2023-01-01', end='2023-12-31')
24
+
25
+ if not gdp_data.empty:
26
+ print("✓ API connection successful!")
27
+ print(f"✓ Retrieved {len(gdp_data)} GDP observations")
28
+ print(f"✓ Latest GDP value: ${gdp_data.iloc[-1]:,.2f} billion")
29
+ print(f"✓ Date range: {gdp_data.index.min()} to {gdp_data.index.max()}")
30
+ return True
31
+ else:
32
+ print("✗ No data retrieved")
33
+ return False
34
+
35
+ except Exception as e:
36
+ print(f"✗ API connection failed: {e}")
37
+ return False
38
+
39
+ def test_series_info():
40
+ """Test getting series information."""
41
+ print("\nTesting series information retrieval...")
42
+
43
+ try:
44
+ fred = Fred(api_key=FRED_API_KEY)
45
+
46
+ # Test getting info for GDP
47
+ series_info = fred.get_series_info('GDP')
48
+
49
+ print("✓ Series information retrieved successfully!")
50
+ print(f" Title: {series_info.title}")
51
+ print(f" Units: {series_info.units}")
52
+ print(f" Frequency: {series_info.frequency}")
53
+ print(f" Last Updated: {series_info.last_updated}")
54
+
55
+ return True
56
+
57
+ except Exception as e:
58
+ print(f"✗ Failed to get series info: {e}")
59
+ return False
60
+
61
+ def main():
62
+ """Run API tests."""
63
+ print("FRED API Key Test")
64
+ print("=" * 30)
65
+ print(f"API Key: {FRED_API_KEY[:8]}...")
66
+ print()
67
+
68
+ # Test connection
69
+ connection_ok = test_api_connection()
70
+
71
+ # Test series info
72
+ info_ok = test_series_info()
73
+
74
+ print("\n" + "=" * 30)
75
+ if connection_ok and info_ok:
76
+ print("✓ All tests passed! Your API key is working correctly.")
77
+ print("You can now use the FRED data collector tool.")
78
+ else:
79
+ print("✗ Some tests failed. Please check your API key.")
80
+
81
+ return connection_ok and info_ok
82
+
83
+ if __name__ == "__main__":
84
+ main()