aditya-me13 commited on
Commit
4f0125c
ยท
1 Parent(s): 610152e

Integration of Aurora

Browse files
app.py CHANGED
@@ -12,7 +12,7 @@ from werkzeug.utils import secure_filename
12
  from flask import Flask, render_template, request, redirect, url_for, flash, jsonify, send_file
13
 
14
  # Import our custom modules
15
- from data_processor import NetCDFProcessor, analyze_netcdf_file
16
  from plot_generator import IndiaMapPlotter
17
  from interactive_plot_generator import InteractiveIndiaMapPlotter
18
  from cams_downloader import CAMSDownloader
@@ -830,39 +830,47 @@ def aurora_status():
830
  # Aurora ML Prediction Routes
831
  @app.route('/aurora_predict', methods=['GET', 'POST'])
832
  def aurora_predict():
833
- """Aurora prediction form and handler"""
834
  if not AURORA_AVAILABLE:
835
  flash('Aurora model is not available. Please install required dependencies.', 'error')
836
  return redirect(url_for('index'))
837
 
838
  if request.method == 'GET':
839
  current_date = datetime.now().strftime('%Y-%m-%d')
840
- return render_template('aurora_predict.html', current_date=current_date)
 
 
 
 
841
 
842
  # POST: Run the pipeline
843
  date_str = request.form.get('date')
844
- steps = int(request.form.get('steps', 2)) # Default to 2 steps for CPU-friendly execution
 
 
 
 
 
845
 
846
  # Limit steps for local/CPU execution
847
  if hasattr(aurora_pipeline, 'cpu_only') and aurora_pipeline.cpu_only:
848
- steps = min(steps, 2) # Max 2 steps for CPU
849
- if steps != int(request.form.get('steps', 2)):
 
850
  flash(f'Steps reduced to {steps} for CPU mode optimization', 'info')
851
 
852
  if not date_str:
853
  flash('Please select a valid date.', 'error')
854
  return redirect(url_for('aurora_predict'))
855
-
856
- cams_date = date_str
857
- cams_time = "12:00" # Always use 12:00 UTC for Aurora
858
 
859
  try:
860
- print(f"๐Ÿš€ Starting Aurora prediction pipeline for {cams_date}")
 
861
 
862
- # 1. Download CAMS data for the selected date
863
- print("๐Ÿ“ฅ Step 1/5: Downloading CAMS atmospheric data...")
864
  try:
865
- zip_path = downloader.download_cams_data(cams_date)
866
  except Exception as e:
867
  error_msg = f"Failed to download CAMS data: {str(e)}"
868
  if "error response" in str(e).lower():
@@ -886,23 +894,28 @@ def aurora_predict():
886
  print(f"โŒ Extraction error: {traceback.format_exc()}")
887
  return redirect(url_for('aurora_predict'))
888
 
889
- # 2. Run Aurora pipeline (batch creation, model, prediction, save NetCDF)
890
- print("๐Ÿ”ฎ Step 2/5: Initializing Aurora ML pipeline...")
891
- output_nc = f"predictions_{cams_date}_{cams_time.replace(':','')}.nc"
892
-
893
- print(f"๐Ÿง  Step 3/5: Loading Aurora model (this may take a few minutes)...")
894
- print(f"โšก Step 4/5: Running {steps} prediction steps...")
895
 
896
  try:
897
- predictions = aurora_pipeline.run_pipeline(
898
- date_str=cams_date,
 
899
  Batch=Batch,
900
  Metadata=Metadata,
901
  AuroraAirPollution=AuroraAirPollution,
902
  rollout=rollout,
903
- steps=steps,
904
- output_path=Path('predictions') / output_nc
905
  )
 
 
 
 
 
 
 
 
 
906
  except Exception as e:
907
  error_msg = f"Aurora model execution failed: {str(e)}"
908
  if "map_location" in str(e):
@@ -914,12 +927,13 @@ def aurora_predict():
914
  flash(error_msg, 'error')
915
  print(f"โŒ Aurora model error: {traceback.format_exc()}")
916
  return redirect(url_for('aurora_predict'))
917
-
918
- print("๐Ÿ’พ Step 5/5: Saving results and preparing visualization...")
919
- print(f"โœ… Aurora predictions completed for {cams_date} 12:00 UTC")
920
-
921
- flash(f'๐Ÿ”ฎ Aurora predictions generated successfully for {cams_date} ({steps} steps)', 'success')
922
- return redirect(url_for('visualize_prediction', filename=output_nc))
 
923
 
924
  except Exception as e:
925
  # Catch-all for any other unexpected errors
@@ -929,97 +943,319 @@ def aurora_predict():
929
  return redirect(url_for('aurora_predict'))
930
 
931
 
932
- @app.route('/visualize_prediction/<filename>', methods=['GET', 'POST'])
933
  def visualize_prediction(filename):
934
- """Visualize or download Aurora prediction output with variable and step selection"""
935
- file_path = Path('predictions') / filename
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
936
  if not file_path.exists():
937
  flash('Prediction file not found', 'error')
938
  return redirect(url_for('index'))
939
 
940
- ds = xr.open_dataset(file_path)
941
- variables = list(ds.data_vars.keys())
942
- steps = ds['step'].values if 'step' in ds else np.arange(ds[variables[0]].shape[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
943
 
944
- # Handle form submission
945
- if request.method == 'POST':
946
- var_name = request.form.get('variable')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
947
  step = int(request.form.get('step', 0))
 
 
948
  color_theme = request.form.get('color_theme', 'viridis')
949
- else:
950
- var_name = variables[0]
951
- step = 0
952
- color_theme = 'viridis'
953
-
954
- # Prepare data for plotting
955
- data = ds[var_name].values
956
- if data.ndim == 3:
957
- data_to_plot = data[step]
958
- elif data.ndim == 2:
959
- data_to_plot = data
960
- else:
961
- flash('Prediction data shape not supported for plotting', 'error')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
962
  return redirect(url_for('index'))
963
 
964
- # Extract lat/lon
965
- lats = ds['lat'].values if 'lat' in ds else ds['latitude'].values
966
- lons = ds['lon'].values if 'lon' in ds else ds['longitude'].values
967
-
968
- # Prepare metadata for plot
969
- from constants import NETCDF_VARIABLES
970
- var_info = NETCDF_VARIABLES.get(var_name, {})
971
- display_name = var_info.get('name', var_name)
972
- units = ds[var_name].attrs.get('units', var_info.get('units', ''))
973
- # Use user-selected color theme, fallback to variable default, then viridis
974
- if 'color_theme' not in locals():
975
- color_theme = var_info.get('cmap', 'viridis')
976
-
977
- metadata = {
978
- 'variable_name': var_name,
979
- 'display_name': display_name,
980
- 'units': units,
981
- 'lats': lats,
982
- 'lons': lons,
983
- 'pressure_level': None,
984
- 'timestamp_str': str(steps[step]) if len(steps) > step else '',
985
- }
986
 
987
- # Generate plot
988
- plot_path = plotter.create_india_map(
989
- data_to_plot,
990
- metadata,
991
- color_theme=color_theme,
992
- save_plot=True,
993
- custom_title=f"Aurora Prediction: {display_name} (step {step})"
994
- )
995
 
996
- if plot_path:
997
- plot_filename = Path(plot_path).name
998
-
999
- # Provide download link for NetCDF
1000
- download_url = url_for('download_prediction_netcdf', filename=filename)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001
 
1002
- return render_template(
1003
- 'aurora_prediction_plot.html',
1004
- plot_filename=plot_filename,
1005
- var_name=var_name,
1006
- step=step,
1007
- variables=variables,
1008
- steps=range(len(steps)),
1009
- filename=filename,
1010
- download_url=download_url,
1011
- color_themes=COLOR_THEMES,
1012
- current_color_theme=color_theme
1013
- )
1014
- else:
1015
- flash('Error generating prediction plot', 'error')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016
  return redirect(url_for('index'))
1017
 
1018
-
1019
- @app.route('/download_prediction_netcdf/<filename>')
1020
  def download_prediction_netcdf(filename):
1021
  """Download the Aurora prediction NetCDF file"""
1022
- file_path = Path('predictions') / filename
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023
  if not file_path.exists():
1024
  flash('Prediction file not found', 'error')
1025
  return redirect(url_for('index'))
@@ -1033,6 +1269,141 @@ def too_large(e):
1033
  return redirect(url_for('index'))
1034
 
1035
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036
  @app.errorhandler(404)
1037
  def not_found(e):
1038
  """Handle 404 errors"""
@@ -1059,4 +1430,4 @@ if __name__ == '__main__':
1059
  print("๐Ÿ”ง CDS API Ready:", downloader.is_client_ready())
1060
 
1061
  # Run the Flask app
1062
- app.run(debug=True, host='0.0.0.0', port=port)
 
12
  from flask import Flask, render_template, request, redirect, url_for, flash, jsonify, send_file
13
 
14
  # Import our custom modules
15
+ from data_processor import NetCDFProcessor, AuroraPredictionProcessor, analyze_netcdf_file
16
  from plot_generator import IndiaMapPlotter
17
  from interactive_plot_generator import InteractiveIndiaMapPlotter
18
  from cams_downloader import CAMSDownloader
 
830
  # Aurora ML Prediction Routes
831
  @app.route('/aurora_predict', methods=['GET', 'POST'])
832
  def aurora_predict():
833
+ """Aurora prediction form and handler with enhanced step selection"""
834
  if not AURORA_AVAILABLE:
835
  flash('Aurora model is not available. Please install required dependencies.', 'error')
836
  return redirect(url_for('index'))
837
 
838
  if request.method == 'GET':
839
  current_date = datetime.now().strftime('%Y-%m-%d')
840
+ # Get list of existing prediction runs
841
+ existing_runs = AuroraPipeline.list_prediction_runs() if hasattr(AuroraPipeline, 'list_prediction_runs') else []
842
+ return render_template('aurora_predict.html',
843
+ current_date=current_date,
844
+ existing_runs=existing_runs)
845
 
846
  # POST: Run the pipeline
847
  date_str = request.form.get('date')
848
+ steps = int(request.form.get('steps', 2)) # Default to 2 steps
849
+
850
+ # Validate steps (1-4 allowed, each representing 12 hours)
851
+ if steps < 1 or steps > 4:
852
+ flash('Number of steps must be between 1 and 4 (each step = 12 hours)', 'error')
853
+ return redirect(url_for('aurora_predict'))
854
 
855
  # Limit steps for local/CPU execution
856
  if hasattr(aurora_pipeline, 'cpu_only') and aurora_pipeline.cpu_only:
857
+ max_cpu_steps = 2
858
+ if steps > max_cpu_steps:
859
+ steps = max_cpu_steps
860
  flash(f'Steps reduced to {steps} for CPU mode optimization', 'info')
861
 
862
  if not date_str:
863
  flash('Please select a valid date.', 'error')
864
  return redirect(url_for('aurora_predict'))
 
 
 
865
 
866
  try:
867
+ print(f"๐Ÿš€ Starting Aurora prediction pipeline for {date_str}")
868
+ print(f"๐Ÿ“Š Requested {steps} forward steps ({steps * 12} hours coverage)")
869
 
870
+ # 1. Download CAMS data for the selected date (if not already available)
871
+ print("๐Ÿ“ฅ Step 1/5: Checking/downloading CAMS atmospheric data...")
872
  try:
873
+ zip_path = downloader.download_cams_data(date_str)
874
  except Exception as e:
875
  error_msg = f"Failed to download CAMS data: {str(e)}"
876
  if "error response" in str(e).lower():
 
894
  print(f"โŒ Extraction error: {traceback.format_exc()}")
895
  return redirect(url_for('aurora_predict'))
896
 
897
+ # 2. Run enhanced Aurora pipeline
898
+ print("๐Ÿ”ฎ Step 2/5: Running enhanced Aurora ML pipeline...")
 
 
 
 
899
 
900
  try:
901
+ # Use the enhanced pipeline method
902
+ run_metadata = aurora_pipeline.run_aurora_prediction_pipeline(
903
+ date_str=date_str,
904
  Batch=Batch,
905
  Metadata=Metadata,
906
  AuroraAirPollution=AuroraAirPollution,
907
  rollout=rollout,
908
+ steps=steps
 
909
  )
910
+
911
+ print("โœ… Aurora predictions completed successfully")
912
+
913
+ # Redirect to aurora variables page
914
+ run_dir_name = run_metadata['run_directory'].split('/')[-1]
915
+
916
+ flash(f'๐Ÿ”ฎ Aurora predictions generated successfully for {date_str} ({steps} steps, {steps * 12}h coverage)', 'success')
917
+ return redirect(url_for('aurora_variables', run_dir=run_dir_name))
918
+
919
  except Exception as e:
920
  error_msg = f"Aurora model execution failed: {str(e)}"
921
  if "map_location" in str(e):
 
927
  flash(error_msg, 'error')
928
  print(f"โŒ Aurora model error: {traceback.format_exc()}")
929
  return redirect(url_for('aurora_predict'))
930
+
931
+ except Exception as e:
932
+ # Catch-all for any other unexpected errors
933
+ error_msg = f'Unexpected error in Aurora pipeline: {str(e)}'
934
+ flash(error_msg, 'error')
935
+ print(f"โŒ Unexpected Aurora pipeline error: {traceback.format_exc()}")
936
+ return redirect(url_for('aurora_predict'))
937
 
938
  except Exception as e:
939
  # Catch-all for any other unexpected errors
 
943
  return redirect(url_for('aurora_predict'))
944
 
945
 
946
+ @app.route('/visualize_prediction/<path:filename>', methods=['GET', 'POST'])
947
  def visualize_prediction(filename):
948
+ """Aurora prediction visualization with step, variable, and pressure level selection"""
949
+ # Handle both old and new filename formats
950
+ if filename.endswith('.nc'):
951
+ file_path = Path('predictions') / filename
952
+ else:
953
+ # Try to find the prediction file in the run directory
954
+ run_dir = Path('predictions') / filename
955
+ if run_dir.is_dir():
956
+ # Look for the .nc file in the directory
957
+ nc_files = list(run_dir.glob("*.nc"))
958
+ if nc_files:
959
+ file_path = nc_files[0]
960
+ else:
961
+ flash('No prediction file found in run directory', 'error')
962
+ return redirect(url_for('index'))
963
+ else:
964
+ file_path = Path('predictions') / filename
965
+
966
  if not file_path.exists():
967
  flash('Prediction file not found', 'error')
968
  return redirect(url_for('index'))
969
 
970
+ try:
971
+ ds = xr.open_dataset(file_path)
972
+
973
+ # Get all variables and separate surface from atmospheric
974
+ all_variables = list(ds.data_vars.keys())
975
+ surface_vars = []
976
+ atmospheric_vars = []
977
+
978
+ for var in all_variables:
979
+ if 'pressure_level' in ds[var].dims:
980
+ atmospheric_vars.append(var)
981
+ else:
982
+ surface_vars.append(var)
983
+
984
+ # Get steps and pressure levels
985
+ steps = list(range(len(ds['step']))) if 'step' in ds else [0]
986
+ pressure_levels = list(ds['pressure_level'].values) if 'pressure_level' in ds else []
987
+
988
+ # Handle form submission
989
+ if request.method == 'POST':
990
+ selected_step = int(request.form.get('step', 0))
991
+ var_name = request.form.get('variable')
992
+ pressure_level = request.form.get('pressure_level')
993
+ color_theme = request.form.get('color_theme', 'viridis')
994
+ plot_type = request.form.get('plot_type', 'static')
995
+ else:
996
+ selected_step = 0
997
+ var_name = surface_vars[0] if surface_vars else all_variables[0] if all_variables else None
998
+ pressure_level = None
999
+ color_theme = 'viridis'
1000
+ plot_type = 'static'
1001
+
1002
+ if not var_name or var_name not in all_variables:
1003
+ flash('Invalid variable selected', 'error')
1004
+ return redirect(url_for('index'))
1005
 
1006
+ # Validate step
1007
+ if selected_step < 0 or selected_step >= len(steps):
1008
+ selected_step = 0
1009
+
1010
+ return render_template(
1011
+ 'aurora_variables.html',
1012
+ filename=filename,
1013
+ file_path=str(file_path),
1014
+ surface_vars=surface_vars,
1015
+ atmospheric_vars=atmospheric_vars,
1016
+ steps=steps,
1017
+ pressure_levels=pressure_levels,
1018
+ selected_step=selected_step,
1019
+ selected_variable=var_name,
1020
+ selected_pressure_level=pressure_level,
1021
+ color_theme=color_theme,
1022
+ plot_type=plot_type,
1023
+ color_themes=COLOR_THEMES
1024
+ )
1025
+
1026
+ except Exception as e:
1027
+ flash(f'Error processing prediction file: {str(e)}', 'error')
1028
+ print(f"โŒ Prediction visualization error: {traceback.format_exc()}")
1029
+ return redirect(url_for('index'))
1030
+
1031
+
1032
+ @app.route('/generate_aurora_plot', methods=['POST'])
1033
+ def generate_aurora_plot():
1034
+ """Generate plot from Aurora prediction data"""
1035
+ try:
1036
+ file_path = request.form.get('file_path')
1037
  step = int(request.form.get('step', 0))
1038
+ var_name = request.form.get('variable')
1039
+ pressure_level = request.form.get('pressure_level')
1040
  color_theme = request.form.get('color_theme', 'viridis')
1041
+ plot_type = request.form.get('plot_type', 'static')
1042
+
1043
+ if not file_path or not var_name:
1044
+ flash('Missing required parameters', 'error')
1045
+ return redirect(url_for('index'))
1046
+
1047
+ # Open dataset
1048
+ ds = xr.open_dataset(file_path)
1049
+
1050
+ # Get data for the selected variable and step
1051
+ data = ds[var_name]
1052
+
1053
+ # Handle different dimensions
1054
+ if 'step' in data.dims:
1055
+ data = data.isel(step=step)
1056
+
1057
+ if pressure_level and 'pressure_level' in data.dims:
1058
+ pressure_level = float(pressure_level)
1059
+ data = data.sel(pressure_level=pressure_level, method='nearest')
1060
+
1061
+ # Convert to numpy
1062
+ data_to_plot = data.values
1063
+
1064
+ # Get coordinates
1065
+ lats = ds['lat'].values if 'lat' in ds else ds['latitude'].values
1066
+ lons = ds['lon'].values if 'lon' in ds else ds['longitude'].values
1067
+
1068
+ # Prepare metadata
1069
+ from constants import NETCDF_VARIABLES
1070
+ var_info = NETCDF_VARIABLES.get(var_name, {})
1071
+ display_name = var_info.get('name', var_name)
1072
+ units = ds[var_name].attrs.get('units', var_info.get('units', ''))
1073
+
1074
+ hours_from_start = (step + 1) * 12
1075
+ step_time_str = f"T+{hours_from_start}h (Step {step + 1})"
1076
+
1077
+ metadata = {
1078
+ 'variable_name': var_name,
1079
+ 'display_name': display_name,
1080
+ 'units': units,
1081
+ 'lats': lats,
1082
+ 'lons': lons,
1083
+ 'pressure_level': pressure_level if pressure_level else None,
1084
+ 'timestamp_str': step_time_str,
1085
+ }
1086
+
1087
+ # Generate plot based on type
1088
+ if plot_type == 'interactive':
1089
+ # Generate interactive plot
1090
+ plot_result = interactive_plotter.create_india_map(
1091
+ data_to_plot,
1092
+ metadata,
1093
+ color_theme=color_theme,
1094
+ save_plot=True,
1095
+ custom_title=f"Aurora Prediction: {display_name} ({step_time_str})"
1096
+ )
1097
+
1098
+ if plot_result and plot_result.get('html_path'):
1099
+ plot_filename = Path(plot_result['html_path']).name
1100
+ return render_template(
1101
+ 'interactive_plot.html',
1102
+ plot_filename=plot_filename,
1103
+ var_name=var_name,
1104
+ pressure_level=pressure_level,
1105
+ metadata=metadata
1106
+ )
1107
+ else:
1108
+ # Generate static plot
1109
+ plot_path = plotter.create_india_map(
1110
+ data_to_plot,
1111
+ metadata,
1112
+ color_theme=color_theme,
1113
+ save_plot=True,
1114
+ custom_title=f"Aurora Prediction: {display_name} ({step_time_str})"
1115
+ )
1116
+
1117
+ if plot_path:
1118
+ plot_filename = Path(plot_path).name
1119
+ return render_template(
1120
+ 'plot.html',
1121
+ plot_filename=plot_filename,
1122
+ var_name=var_name,
1123
+ pressure_level=pressure_level,
1124
+ metadata=metadata
1125
+ )
1126
+
1127
+ flash('Error generating plot', 'error')
1128
+ return redirect(url_for('index'))
1129
+
1130
+ except Exception as e:
1131
+ flash(f'Error generating plot: {str(e)}', 'error')
1132
+ print(f"โŒ Plot generation error: {traceback.format_exc()}")
1133
  return redirect(url_for('index'))
1134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135
 
1136
+ @app.route('/aurora_plot', methods=['POST'])
1137
+ def aurora_plot():
1138
+ """Generate plot from Aurora prediction variables"""
1139
+ if not AURORA_AVAILABLE:
1140
+ flash('Aurora model is not available.', 'error')
1141
+ return redirect(url_for('index'))
 
 
1142
 
1143
+ try:
1144
+ run_dir = request.form.get('run_dir')
1145
+ step = request.form.get('step')
1146
+ variable = request.form.get('variable')
1147
+ pressure_level = request.form.get('pressure_level')
1148
+ color_theme = request.form.get('color_theme', 'viridis')
1149
+ plot_type = request.form.get('plot_type', 'static')
1150
+
1151
+ if not all([run_dir, step, variable]):
1152
+ flash('Missing required parameters', 'error')
1153
+ return redirect(url_for('aurora_variables', run_dir=run_dir))
1154
+
1155
+ # Find the filename for this step
1156
+ run_path = Path('predictions') / run_dir
1157
+ step_files = list(run_path.glob(f'*_step{int(step):02d}_*.nc'))
1158
+
1159
+ if not step_files:
1160
+ flash(f'No file found for step {step}', 'error')
1161
+ return redirect(url_for('aurora_variables', run_dir=run_dir))
1162
+
1163
+ file_path = step_files[0] # Take the first match
1164
+ filename = file_path.name
1165
+
1166
+ if not file_path.exists():
1167
+ flash('Prediction file not found', 'error')
1168
+ return redirect(url_for('aurora_variables', run_dir=run_dir))
1169
+
1170
+ # Use Aurora prediction processor
1171
+ processor = AuroraPredictionProcessor(str(file_path))
1172
+
1173
+ try:
1174
+ file_info = analyze_netcdf_file(str(file_path))
1175
+
1176
+ var_info = file_info['detected_variables'].get(variable)
1177
+ if not var_info:
1178
+ flash('Variable not found in file', 'error')
1179
+ return redirect(url_for('aurora_variables', run_dir=run_dir))
1180
+
1181
+ # Extract data using Aurora processor with step=0 (single timestep files)
1182
+ if var_info.get('type') == 'atmospheric' and pressure_level:
1183
+ pressure_level = float(pressure_level)
1184
+ data, metadata = processor.extract_variable_data(variable, pressure_level=pressure_level, step=0)
1185
+ else:
1186
+ data, metadata = processor.extract_variable_data(variable, step=0)
1187
+
1188
 
1189
+
1190
+ # Prepare plot_info for templates
1191
+ plot_info = {
1192
+ 'variable': metadata.get('display_name', 'Unknown Variable'),
1193
+ 'units': metadata.get('units', ''),
1194
+ 'shape': str(data.shape),
1195
+ 'pressure_level': metadata.get('pressure_level'),
1196
+ 'color_theme': COLOR_THEMES.get(color_theme, color_theme),
1197
+ 'generated_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
1198
+ 'data_range': {
1199
+ 'min': float(f"{data.min():.3f}") if hasattr(data, 'min') and data.min() is not None else 0,
1200
+ 'max': float(f"{data.max():.3f}") if hasattr(data, 'max') and data.max() is not None else 0,
1201
+ 'mean': float(f"{data.mean():.3f}") if hasattr(data, 'mean') and data.mean() is not None else 0
1202
+ },
1203
+ 'timestamp': metadata.get('timestamp_str', 'Unknown Time'),
1204
+ 'source': metadata.get('source', 'Aurora Prediction')
1205
+ }
1206
+
1207
+ if plot_type == 'interactive':
1208
+ plot_result = interactive_plotter.create_india_map(
1209
+ data, metadata, color_theme=color_theme, save_plot=True
1210
+ )
1211
+ if plot_result and plot_result.get('html_path'):
1212
+ plot_filename = Path(plot_result['html_path']).name
1213
+ return render_template('view_interactive.html',
1214
+ plot_filename=plot_filename,
1215
+ metadata=metadata,
1216
+ plot_info=plot_info)
1217
+ else:
1218
+ plot_path = plotter.create_india_map(
1219
+ data, metadata, color_theme=color_theme, save_plot=True
1220
+ )
1221
+ if plot_path:
1222
+ plot_filename = Path(plot_path).name
1223
+ return render_template('plot.html',
1224
+ plot_filename=plot_filename,
1225
+ metadata=metadata,
1226
+ plot_info=plot_info,
1227
+ filename=filename)
1228
+
1229
+ flash('Error generating plot', 'error')
1230
+ return redirect(url_for('aurora_variables', run_dir=run_dir))
1231
+
1232
+ finally:
1233
+ processor.close()
1234
+
1235
+ except Exception as e:
1236
+ flash(f'Error generating Aurora plot: {str(e)}', 'error')
1237
  return redirect(url_for('index'))
1238
 
1239
+ @app.route('/download_prediction_netcdf/<path:filename>')
 
1240
  def download_prediction_netcdf(filename):
1241
  """Download the Aurora prediction NetCDF file"""
1242
+ # Handle both old and new filename formats
1243
+ if filename.endswith('.nc'):
1244
+ file_path = Path('predictions') / filename
1245
+ else:
1246
+ # Try to find the prediction file in the run directory
1247
+ run_dir = Path('predictions') / filename
1248
+ if run_dir.is_dir():
1249
+ nc_files = list(run_dir.glob("*.nc"))
1250
+ if nc_files:
1251
+ file_path = nc_files[0]
1252
+ filename = file_path.name
1253
+ else:
1254
+ flash('Prediction file not found', 'error')
1255
+ return redirect(url_for('index'))
1256
+ else:
1257
+ file_path = Path('predictions') / filename
1258
+
1259
  if not file_path.exists():
1260
  flash('Prediction file not found', 'error')
1261
  return redirect(url_for('index'))
 
1269
  return redirect(url_for('index'))
1270
 
1271
 
1272
+ @app.route('/api/aurora_step_variables/<run_dir>/<int:step>')
1273
+ def get_aurora_step_variables(run_dir, step):
1274
+ """Get variables and pressure levels for a specific Aurora prediction step"""
1275
+ if not AURORA_AVAILABLE:
1276
+ return jsonify({'error': 'Aurora model not available'}), 400
1277
+
1278
+ try:
1279
+ # Find the file for this step
1280
+ run_path = Path('predictions') / run_dir
1281
+ step_files = list(run_path.glob(f'*_step{step:02d}_*.nc'))
1282
+
1283
+ if not step_files:
1284
+ return jsonify({'error': f'No file found for step {step}'}), 404
1285
+
1286
+ file_path = step_files[0]
1287
+
1288
+ # Load and analyze the file using the same method as regular CAMS files
1289
+ file_info = analyze_netcdf_file(str(file_path))
1290
+
1291
+ if not file_info['success']:
1292
+ return jsonify({'error': f'Failed to analyze file: {file_info.get("error", "Unknown error")}'}), 500
1293
+
1294
+ surface_vars = []
1295
+ atmos_vars = []
1296
+ pressure_levels = []
1297
+
1298
+ # Extract variables from detected_variables
1299
+ for var_name, var_info in file_info['detected_variables'].items():
1300
+ if var_info['type'] == 'surface':
1301
+ surface_vars.append(var_name)
1302
+ elif var_info['type'] == 'atmospheric':
1303
+ atmos_vars.append(var_name)
1304
+
1305
+ # Get pressure levels from the first atmospheric variable
1306
+ ds = xr.open_dataset(file_path)
1307
+ if 'pressure_level' in ds.coords:
1308
+ pressure_levels = ds.pressure_level.values.tolist()
1309
+ ds.close()
1310
+
1311
+ return jsonify({
1312
+ 'surface_vars': surface_vars,
1313
+ 'atmos_vars': atmos_vars,
1314
+ 'pressure_levels': pressure_levels,
1315
+ 'filename': file_path.name
1316
+ })
1317
+
1318
+ except Exception as e:
1319
+ return jsonify({'error': str(e)}), 500
1320
+
1321
+ @app.route('/aurora_variables/<run_dir>')
1322
+ def aurora_variables(run_dir):
1323
+ """Show Aurora prediction variables selection page similar to variables.html"""
1324
+ if not AURORA_AVAILABLE:
1325
+ flash('Aurora model is not available.', 'error')
1326
+ return redirect(url_for('index'))
1327
+
1328
+ try:
1329
+ # Get prediction files from run directory
1330
+ run_path = Path('predictions') / run_dir
1331
+ if not run_path.exists():
1332
+ flash(f'Prediction run not found: {run_path}', 'error')
1333
+ return redirect(url_for('index'))
1334
+
1335
+ # Find all prediction files in the directory
1336
+ pred_files = sorted(run_path.glob('*.nc'))
1337
+ if not pred_files:
1338
+ flash('No prediction files found in run', 'error')
1339
+ return redirect(url_for('index'))
1340
+
1341
+ # Get step numbers and filenames
1342
+ steps_data = []
1343
+ for file_path in pred_files:
1344
+ filename = file_path.name
1345
+ # Extract step number from filename
1346
+ if 'step' in filename:
1347
+ try:
1348
+ step_part = filename.split('step')[1].split('_')[0]
1349
+ step_num = int(step_part)
1350
+ steps_data.append({
1351
+ 'step': step_num,
1352
+ 'filename': filename,
1353
+ 'forecast_hours': step_num * 12
1354
+ })
1355
+ except:
1356
+ pass
1357
+
1358
+ steps_data.sort(key=lambda x: x['step'])
1359
+
1360
+ # Get variables from the first file
1361
+ first_file = pred_files[0]
1362
+ ds = xr.open_dataset(first_file)
1363
+
1364
+ # Separate surface and atmospheric variables
1365
+ surface_vars = []
1366
+ atmos_vars = []
1367
+ pressure_levels = []
1368
+
1369
+ for var_name in ds.data_vars:
1370
+ if len(ds[var_name].dims) == 2: # lat, lon
1371
+ surface_vars.append(var_name)
1372
+ elif len(ds[var_name].dims) == 3: # pressure_level, lat, lon
1373
+ atmos_vars.append(var_name)
1374
+
1375
+ if 'pressure_level' in ds.coords:
1376
+ pressure_levels = ds.pressure_level.values.tolist()
1377
+
1378
+ ds.close()
1379
+
1380
+ return render_template('aurora_variables.html',
1381
+ run_dir=run_dir,
1382
+ steps_data=steps_data,
1383
+ surface_vars=surface_vars,
1384
+ atmos_vars=atmos_vars,
1385
+ pressure_levels=pressure_levels,
1386
+ color_themes=COLOR_THEMES)
1387
+
1388
+ except Exception as e:
1389
+ flash(f'Error loading Aurora variables: {str(e)}', 'error')
1390
+ return redirect(url_for('index'))
1391
+
1392
+ @app.route('/prediction_runs')
1393
+ def prediction_runs():
1394
+ """Browse available Aurora prediction runs"""
1395
+ if not AURORA_AVAILABLE:
1396
+ flash('Aurora model is not available.', 'error')
1397
+ return redirect(url_for('index'))
1398
+
1399
+ try:
1400
+ runs = AuroraPipeline.list_prediction_runs()
1401
+ return render_template('prediction_runs.html', runs=runs)
1402
+ except Exception as e:
1403
+ flash(f'Error listing prediction runs: {str(e)}', 'error')
1404
+ return redirect(url_for('index'))
1405
+
1406
+
1407
  @app.errorhandler(404)
1408
  def not_found(e):
1409
  """Handle 404 errors"""
 
1430
  print("๐Ÿ”ง CDS API Ready:", downloader.is_client_ready())
1431
 
1432
  # Run the Flask app
1433
+ app.run(debug=False, host='0.0.0.0', port=port)
aurora_pipeline.py CHANGED
@@ -4,33 +4,20 @@ import subprocess
4
  import os
5
 
6
  def get_freest_cuda_device_id():
7
- """Get the freest CUDA device ID if available, otherwise return None for CPU mode"""
8
  try:
9
- # Check if nvidia-smi exists first
10
  result = subprocess.run(
11
  ['nvidia-smi', '--query-gpu=memory.free', '--format=csv,nounits,noheader'],
12
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8'
13
  )
14
- if result.returncode == 0:
15
- memory_free = [int(x) for x in result.stdout.strip().split('\n')]
16
- device_id = memory_free.index(max(memory_free))
17
- print(f"๐ŸŽฎ GPU available - using device {device_id}")
18
- return str(device_id)
19
- else:
20
- print("โš ๏ธ nvidia-smi returned error, using CPU mode")
21
- return None
22
- except (FileNotFoundError, subprocess.SubprocessError, Exception) as e:
23
- print(f"๐Ÿ’ป No GPU detected, using CPU mode: {e}")
24
- return None
25
-
26
- # Set CUDA_VISIBLE_DEVICES only if GPU is available
27
- gpu_device = get_freest_cuda_device_id()
28
- if gpu_device is not None:
29
- os.environ["CUDA_VISIBLE_DEVICES"] = gpu_device
30
- else:
31
- # Force CPU mode
32
- os.environ["CUDA_VISIBLE_DEVICES"] = ""
33
- print("๐Ÿ”ง Configured for CPU-only execution")
34
 
35
 
36
  import torch
@@ -57,35 +44,17 @@ class AuroraPipeline:
57
  device=None,
58
  cpu_only=False):
59
 
60
- # Device selection with CPU fallback
61
- if cpu_only or device == "cpu":
62
- self.device = "cpu"
63
- print("๐Ÿ’ป Aurora configured for CPU-only execution")
64
- elif device is None:
65
- # Auto-detect: prefer CPU for local testing, GPU for production
66
- if torch.cuda.is_available() and not cpu_only:
67
- self.device = "cuda:0"
68
- print(f"๐ŸŽฎ Aurora using GPU: {self.device}")
69
- else:
70
- self.device = "cpu"
71
- print("๐Ÿ’ป Aurora using CPU (GPU not available or CPU forced)")
72
- else:
73
- self.device = device
74
- print(f"๐Ÿ”ง Aurora using specified device: {self.device}")
75
 
76
  self.extracted_dir = Path(extracted_dir)
77
  self.static_path = Path(static_path)
78
  self.model_ckpt = model_ckpt
79
  self.model_repo = model_repo
80
- self.cpu_only = cpu_only or (self.device == "cpu")
81
-
82
- # Load static variables with error handling
83
- try:
84
- self.static_vars = self._load_static_vars()
85
- except Exception as e:
86
- print(f"โš ๏ธ Warning: Could not load static variables: {e}")
87
- self.static_vars = {}
88
-
89
  self.model = None
90
 
91
  def _load_static_vars(self):
@@ -100,8 +69,15 @@ class AuroraPipeline:
100
  static_vars = pickle.load(f)
101
  return static_vars
102
 
103
- def create_batch(self, date_str, Batch, Metadata):
104
- """Create a batch for Aurora model from CAMS data"""
 
 
 
 
 
 
 
105
  surface_path = self.extracted_dir / f"{date_str}-cams-surface.nc"
106
  atmos_path = self.extracted_dir / f"{date_str}-cams-atmospheric.nc"
107
  if not surface_path.exists() or not atmos_path.exists():
@@ -110,9 +86,15 @@ class AuroraPipeline:
110
  surf_vars_ds = xr.open_dataset(surface_path, engine="netcdf4", decode_timedelta=True)
111
  atmos_vars_ds = xr.open_dataset(atmos_path, engine="netcdf4", decode_timedelta=True)
112
 
113
- # Select zero-hour forecast
114
  surf_vars_ds = surf_vars_ds.isel(forecast_period=0)
115
  atmos_vars_ds = atmos_vars_ds.isel(forecast_period=0)
 
 
 
 
 
 
116
 
117
  batch = Batch(
118
  surf_vars={
@@ -145,19 +127,17 @@ class AuroraPipeline:
145
  metadata=Metadata(
146
  lat=torch.from_numpy(atmos_vars_ds.latitude.values),
147
  lon=torch.from_numpy(atmos_vars_ds.longitude.values),
148
- time=(atmos_vars_ds.valid_time.values.astype("datetime64[s]").tolist()[-1],),
149
  atmos_levels=tuple(int(level) for level in atmos_vars_ds.pressure_level.values),
150
  ),
151
  )
152
  return batch
153
  def load_model(self, AuroraAirPollution):
154
- """Load Aurora model with CPU/GPU optimization"""
155
  import gc
156
 
157
- print(f"๐Ÿ”„ Loading Aurora model on {self.device}")
158
-
159
- # Memory check for GPU
160
- if self.device != "cpu" and torch.cuda.is_available():
161
  print(f"๐Ÿ“Š GPU Memory BEFORE loading model:")
162
  print(f" Allocated: {torch.cuda.memory_allocated(0) / 1024**3:.2f} GB")
163
  print(f" Reserved: {torch.cuda.memory_reserved(0) / 1024**3:.2f} GB")
@@ -166,115 +146,209 @@ class AuroraPipeline:
166
  # Clear cache
167
  if torch.cuda.is_available():
168
  torch.cuda.empty_cache()
169
- gc.collect()
170
 
171
- # Initialize model with CPU-friendly settings
172
- if self.cpu_only:
173
- print("๐Ÿ’ป Initializing model for CPU execution...")
174
- # Set CPU-friendly torch settings
175
- torch.set_num_threads(2) # Limit CPU threads for local testing
176
- model = AuroraAirPollution()
177
- else:
178
- model = AuroraAirPollution()
179
 
180
- # Load checkpoint with device mapping
181
- try:
182
- if self.cpu_only:
183
- print("๐Ÿ“ Loading checkpoint for CPU execution...")
184
- # For CPU mode, we may need to handle device mapping differently
185
- model.load_checkpoint(self.model_repo, self.model_ckpt)
186
- else:
187
- print("๐Ÿ“ Loading checkpoint for GPU execution...")
188
- model.load_checkpoint(self.model_repo, self.model_ckpt)
189
- except Exception as e:
190
- print(f"โš ๏ธ Checkpoint loading failed: {e}")
191
- print("๐Ÿ”„ Trying alternative loading method...")
192
- try:
193
- # Alternative: try loading without any special parameters
194
- model.load_checkpoint(self.model_repo, self.model_ckpt)
195
- print("โœ… Checkpoint loaded successfully with fallback method")
196
- except Exception as e2:
197
- print(f"โŒ All loading methods failed: {e2}")
198
- # Set device to CPU as last resort
199
- self.device = "cpu"
200
- self.cpu_only = True
201
- raise RuntimeError(f"Failed to load Aurora model: {e2}")
202
 
203
- model.eval()
204
 
205
- # Move to device
 
 
 
 
 
206
  model = model.to(self.device)
207
 
208
- # Memory check after loading
209
- if self.device != "cpu" and torch.cuda.is_available():
210
- print(f"๐Ÿ“Š GPU Memory AFTER model load:")
211
  print(f" Allocated: {torch.cuda.memory_allocated(0) / 1024**3:.2f} GB")
212
  print(f" Reserved: {torch.cuda.memory_reserved(0) / 1024**3:.2f} GB")
213
 
214
  self.model = model
215
- print(f"โœ… Aurora model loaded on {self.device}")
216
-
217
- if self.cpu_only:
218
- print("โš ๏ธ WARNING: CPU mode will be slower than GPU. Consider using fewer steps for faster inference.")
219
-
220
  return model
221
 
222
  def predict(self, batch, rollout, steps=4):
223
- """Run model prediction with CPU/GPU optimization"""
224
  if self.model is None:
225
  raise RuntimeError("Model not loaded. Call load_model() first.")
226
 
227
- # Limit steps for CPU to avoid memory issues
228
- if self.cpu_only and steps > 2:
229
- print(f"โš ๏ธ CPU mode: reducing steps from {steps} to 2 for memory efficiency")
230
- steps = 2
231
-
232
- print(f"๐Ÿ”„ Running {steps} prediction steps on {self.device}...")
233
-
234
  # Move batch to device
235
  batch = batch.to(self.device)
236
 
237
- # CPU-friendly inference settings
238
- if self.cpu_only:
239
- torch.set_grad_enabled(False) # Disable gradients for inference
240
-
241
  with torch.inference_mode():
242
- predictions = []
243
- for step in range(steps):
244
- print(f" Step {step + 1}/{steps}...")
245
- if step == 0:
246
- # First prediction from initial batch
247
- pred_generator = rollout(self.model, batch, steps=1)
248
- pred = next(pred_generator)
249
- else:
250
- # Subsequent predictions from previous output
251
- pred_generator = rollout(self.model, pred, steps=1)
252
- pred = next(pred_generator)
253
-
254
- # Move to CPU immediately to save memory
255
- predictions.append(pred.to("cpu"))
256
-
257
- # Clear GPU cache after each step if using GPU
258
- if not self.cpu_only and torch.cuda.is_available():
259
- torch.cuda.empty_cache()
260
 
261
- print(f"โœ… Completed {len(predictions)} prediction steps")
262
  return predictions
263
 
264
- def save_predictions_to_netcdf(self, predictions, output_path):
265
- """Save all prediction steps to a single NetCDF file compatible with visualization pipeline"""
266
- output_path = Path(output_path)
267
- output_path.parent.mkdir(parents=True, exist_ok=True)
268
-
269
- print(f"๐Ÿ’พ Saving {len(predictions)} prediction steps to {output_path}")
270
 
271
- try:
272
- # Try the new single-file method
273
- return self._save_predictions_single_file(predictions, output_path)
274
- except Exception as e:
275
- print(f"โš ๏ธ Single file method failed: {e}")
276
- print(f"๐Ÿ”„ Falling back to original method...")
277
- return self._save_predictions_original_method(predictions, output_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
 
279
  def _save_predictions_single_file(self, predictions, output_path):
280
  """Save all prediction steps to a single NetCDF file (new method)"""
@@ -357,7 +431,7 @@ class AuroraPipeline:
357
 
358
  # Stack along step dimension: (steps, levels, lat, lon)
359
  arr = np.stack(var_data_list, axis=0)
360
- data_vars[f"{var}_atmos"] = (['step', 'pressure_level', 'lat', 'lon'], arr)
361
 
362
  # Create dataset
363
  ds = xr.Dataset(data_vars, coords=coords)
@@ -466,6 +540,91 @@ class AuroraPipeline:
466
  self.save_predictions_to_netcdf(predictions, output_path)
467
  return predictions
468
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469
  # Example usage (not run on import)
470
  if __name__ == "__main__":
471
  pass
 
4
  import os
5
 
6
  def get_freest_cuda_device_id():
 
7
  try:
 
8
  result = subprocess.run(
9
  ['nvidia-smi', '--query-gpu=memory.free', '--format=csv,nounits,noheader'],
10
+ stdout=subprocess.PIPE, encoding='utf-8'
11
  )
12
+ memory_free = [int(x) for x in result.stdout.strip().split('\n')]
13
+ device_id = memory_free.index(max(memory_free))
14
+ return str(device_id)
15
+ except Exception as e:
16
+ print(f"Could not query nvidia-smi, defaulting to 0. Error: {e}")
17
+ return "0"
18
+
19
+ # Set CUDA_VISIBLE_DEVICES before importing torch
20
+ os.environ["CUDA_VISIBLE_DEVICES"] = get_freest_cuda_device_id()
 
 
 
 
 
 
 
 
 
 
 
21
 
22
 
23
  import torch
 
44
  device=None,
45
  cpu_only=False):
46
 
47
+ if device is None or device == "cuda":
48
+ # CUDA_VISIBLE_DEVICES is set, so use 'cuda:0'
49
+ device = "cuda:0" if torch.cuda.is_available() and not cpu_only else "cpu"
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  self.extracted_dir = Path(extracted_dir)
52
  self.static_path = Path(static_path)
53
  self.model_ckpt = model_ckpt
54
  self.model_repo = model_repo
55
+ self.device = device
56
+ self.cpu_only = cpu_only or (device == "cpu")
57
+ self.static_vars = self._load_static_vars()
 
 
 
 
 
 
58
  self.model = None
59
 
60
  def _load_static_vars(self):
 
69
  static_vars = pickle.load(f)
70
  return static_vars
71
 
72
+ def create_batch(self, date_str, Batch, Metadata, time_index=1):
73
+ """Create a batch for Aurora model from CAMS data
74
+
75
+ Args:
76
+ date_str: Date string (YYYY-MM-DD)
77
+ Batch: Aurora Batch class
78
+ Metadata: Aurora Metadata class
79
+ time_index: 0 for T-1 (first time), 1 for T (second time)
80
+ """
81
  surface_path = self.extracted_dir / f"{date_str}-cams-surface.nc"
82
  atmos_path = self.extracted_dir / f"{date_str}-cams-atmospheric.nc"
83
  if not surface_path.exists() or not atmos_path.exists():
 
86
  surf_vars_ds = xr.open_dataset(surface_path, engine="netcdf4", decode_timedelta=True)
87
  atmos_vars_ds = xr.open_dataset(atmos_path, engine="netcdf4", decode_timedelta=True)
88
 
89
+ # Select zero-hour forecast but keep both time steps
90
  surf_vars_ds = surf_vars_ds.isel(forecast_period=0)
91
  atmos_vars_ds = atmos_vars_ds.isel(forecast_period=0)
92
+
93
+ # Don't select time index - Aurora needs both T-1 and T as input
94
+ print(f"๐Ÿ• Using both time steps (T-1 and T) as input for Aurora")
95
+
96
+ # Get the time for metadata (use the specified time_index for metadata only)
97
+ selected_time = surf_vars_ds.forecast_reference_time.values[time_index].astype("datetime64[s]").tolist()
98
 
99
  batch = Batch(
100
  surf_vars={
 
127
  metadata=Metadata(
128
  lat=torch.from_numpy(atmos_vars_ds.latitude.values),
129
  lon=torch.from_numpy(atmos_vars_ds.longitude.values),
130
+ time=(selected_time,),
131
  atmos_levels=tuple(int(level) for level in atmos_vars_ds.pressure_level.values),
132
  ),
133
  )
134
  return batch
135
  def load_model(self, AuroraAirPollution):
136
+ """Load Aurora model and move to device"""
137
  import gc
138
 
139
+ # Check memory BEFORE loading
140
+ if torch.cuda.is_available():
 
 
141
  print(f"๐Ÿ“Š GPU Memory BEFORE loading model:")
142
  print(f" Allocated: {torch.cuda.memory_allocated(0) / 1024**3:.2f} GB")
143
  print(f" Reserved: {torch.cuda.memory_reserved(0) / 1024**3:.2f} GB")
 
146
  # Clear cache
147
  if torch.cuda.is_available():
148
  torch.cuda.empty_cache()
149
+ gc.collect()
150
 
151
+ model = AuroraAirPollution()
 
 
 
 
 
 
 
152
 
153
+ # Check AFTER initialization but BEFORE loading checkpoint
154
+ if torch.cuda.is_available():
155
+ print(f"๏ฟฝ GPU Memory AFTER model init:")
156
+ print(f" Allocated: {torch.cuda.memory_allocated(0) / 1024**3:.2f} GB")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
 
158
+ model.load_checkpoint(self.model_repo, self.model_ckpt)
159
 
160
+ # Check AFTER loading checkpoint
161
+ if torch.cuda.is_available():
162
+ print(f"๐Ÿ“Š GPU Memory AFTER checkpoint load:")
163
+ print(f" Allocated: {torch.cuda.memory_allocated(0) / 1024**3:.2f} GB")
164
+
165
+ model.eval()
166
  model = model.to(self.device)
167
 
168
+ # Check AFTER moving to device
169
+ if torch.cuda.is_available():
170
+ print(f"๐Ÿ“Š GPU Memory AFTER moving to device:")
171
  print(f" Allocated: {torch.cuda.memory_allocated(0) / 1024**3:.2f} GB")
172
  print(f" Reserved: {torch.cuda.memory_reserved(0) / 1024**3:.2f} GB")
173
 
174
  self.model = model
175
+ print(f"โœ… Model loaded on {self.device}")
 
 
 
 
176
  return model
177
 
178
  def predict(self, batch, rollout, steps=4):
 
179
  if self.model is None:
180
  raise RuntimeError("Model not loaded. Call load_model() first.")
181
 
 
 
 
 
 
 
 
182
  # Move batch to device
183
  batch = batch.to(self.device)
184
 
 
 
 
 
185
  with torch.inference_mode():
186
+ predictions = [pred.to("cpu") for pred in rollout(self.model, batch, steps=steps)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
 
188
  return predictions
189
 
190
+ def save_predictions_to_netcdf(self, predictions, output_dir, date_str):
191
+ """Save each prediction step as separate NetCDF files in CAMS format"""
192
+ output_dir = Path(output_dir)
193
+ output_dir.mkdir(parents=True, exist_ok=True)
 
 
194
 
195
+ print(f"๐Ÿ’พ Saving {len(predictions)} prediction steps as separate files")
196
+
197
+ generation_date = datetime.now().strftime("%Y%m%d")
198
+ saved_files = []
199
+
200
+ for step_idx, pred in enumerate(predictions):
201
+ step_num = step_idx + 1
202
+
203
+ # Create filename: predictiondate_step_generationdate.nc
204
+ filename = f"{date_str}_step{step_num:02d}_{generation_date}.nc"
205
+ file_path = output_dir / filename
206
+
207
+ # Extract coordinates from first prediction
208
+ metadata = pred.metadata
209
+ lats = metadata.lat.cpu().numpy() if hasattr(metadata.lat, 'cpu') else metadata.lat.numpy()
210
+ lons = metadata.lon.cpu().numpy() if hasattr(metadata.lon, 'cpu') else metadata.lon.numpy()
211
+
212
+ # Create CAMS-compatible coordinates and dimensions
213
+ # CAMS format uses: forecast_period, forecast_reference_time, latitude, longitude
214
+ coords = {
215
+ 'forecast_period': ('forecast_period', [0]), # Single forecast period
216
+ 'forecast_reference_time': ('forecast_reference_time', [0, 1]), # Two reference times (T-1, T)
217
+ 'latitude': ('latitude', lats),
218
+ 'longitude': ('longitude', lons)
219
+ }
220
+
221
+ # Add valid_time variable (CAMS format)
222
+ data_vars = {
223
+ 'valid_time': (['forecast_reference_time', 'forecast_period'],
224
+ np.array([[step_num * 12], [step_num * 12]])) # Same forecast hours for both ref times
225
+ }
226
+
227
+ # Add surface variables in CAMS format: (forecast_period, forecast_reference_time, latitude, longitude)
228
+ # Map Aurora variable names to CAMS variable names
229
+ aurora_to_cams_surface = {
230
+ '2t': 't2m', # 2 metre temperature
231
+ '10u': 'u10', # 10 metre U wind component
232
+ '10v': 'v10', # 10 metre V wind component
233
+ 'msl': 'msl', # Mean sea level pressure (same)
234
+ 'pm1': 'pm1', # PM1 (same)
235
+ 'pm2p5': 'pm2p5', # PM2.5 (same)
236
+ 'pm10': 'pm10', # PM10 (same)
237
+ 'tcco': 'tcco', # Total column CO (same)
238
+ 'tc_no': 'tc_no', # Total column NO (same)
239
+ 'tcno2': 'tcno2', # Total column NO2 (same)
240
+ 'gtco3': 'gtco3', # Total column O3 (same)
241
+ 'tcso2': 'tcso2' # Total column SO2 (same)
242
+ }
243
+
244
+ for aurora_var, var_tensor in pred.surf_vars.items():
245
+ cams_var = aurora_to_cams_surface.get(aurora_var, aurora_var) # Use CAMS name or fallback to Aurora name
246
+
247
+ var_data = var_tensor.cpu().numpy() if hasattr(var_tensor, 'cpu') else var_tensor.numpy()
248
+ var_data = np.squeeze(var_data)
249
+
250
+ # Ensure 2D for surface variables
251
+ if var_data.ndim > 2:
252
+ while var_data.ndim > 2:
253
+ var_data = var_data[0]
254
+ elif var_data.ndim < 2:
255
+ raise ValueError(f"Surface variable {aurora_var} has insufficient dimensions: {var_data.shape}")
256
+
257
+ # Expand to CAMS format: (1, 2, lat, lon) - same data for both forecast reference times
258
+ cams_data = np.broadcast_to(var_data[np.newaxis, np.newaxis, :, :], (1, 2, var_data.shape[0], var_data.shape[1]))
259
+ data_vars[cams_var] = (['forecast_period', 'forecast_reference_time', 'latitude', 'longitude'], cams_data)
260
+
261
+ # Add atmospheric variables if present
262
+ # CAMS format: (forecast_period, forecast_reference_time, pressure_level, latitude, longitude)
263
+ # Map Aurora atmospheric variable names to CAMS names
264
+ aurora_to_cams_atmos = {
265
+ 't': 't', # Temperature (same)
266
+ 'u': 'u', # U wind component (same)
267
+ 'v': 'v', # V wind component (same)
268
+ 'q': 'q', # Specific humidity (same)
269
+ 'z': 'z', # Geopotential (same)
270
+ 'co': 'co', # Carbon monoxide (same)
271
+ 'no': 'no', # Nitrogen monoxide (same)
272
+ 'no2': 'no2', # Nitrogen dioxide (same)
273
+ 'go3': 'go3', # Ozone (same)
274
+ 'so2': 'so2' # Sulphur dioxide (same)
275
+ }
276
+ if hasattr(pred, 'atmos_vars') and pred.atmos_vars:
277
+ atmos_levels = list(metadata.atmos_levels) if hasattr(metadata, 'atmos_levels') else None
278
+ if atmos_levels:
279
+ coords['pressure_level'] = ('pressure_level', atmos_levels)
280
+
281
+ for aurora_var, var_tensor in pred.atmos_vars.items():
282
+ cams_var = aurora_to_cams_atmos.get(aurora_var, aurora_var) # Use CAMS name or fallback
283
+
284
+ var_data = var_tensor.cpu().numpy() if hasattr(var_tensor, 'cpu') else var_tensor.numpy()
285
+ var_data = np.squeeze(var_data)
286
+
287
+ # Ensure 3D for atmospheric variables (pressure, lat, lon)
288
+ if var_data.ndim > 3:
289
+ while var_data.ndim > 3:
290
+ var_data = var_data[0]
291
+ elif var_data.ndim < 3:
292
+ raise ValueError(f"Atmospheric variable {aurora_var} has insufficient dimensions: {var_data.shape}")
293
+
294
+ # Expand to CAMS format: (1, 2, pressure, lat, lon) - same data for both forecast reference times
295
+ cams_data = np.broadcast_to(var_data[np.newaxis, np.newaxis, :, :, :],
296
+ (1, 2, var_data.shape[0], var_data.shape[1], var_data.shape[2]))
297
+ data_vars[cams_var] = (['forecast_period', 'forecast_reference_time', 'pressure_level', 'latitude', 'longitude'], cams_data)
298
+
299
+ # Create dataset for this step
300
+ ds = xr.Dataset(data_vars, coords=coords)
301
+
302
+ # Add attributes
303
+ ds.attrs.update({
304
+ 'title': f'Aurora Air Pollution Prediction - Step {step_num}',
305
+ 'source': 'Aurora model by Microsoft Research',
306
+ 'prediction_date': date_str,
307
+ 'step': step_num,
308
+ 'forecast_hours': step_num * 12,
309
+ 'generation_date': generation_date,
310
+ 'creation_time': datetime.now().isoformat(),
311
+ 'spatial_resolution': f"{abs(lons[1] - lons[0]):.3f} degrees"
312
+ })
313
+
314
+ # Add variable attributes (using CAMS variable names)
315
+ var_attrs = {
316
+ 't2m': {'long_name': '2 metre temperature', 'units': 'K'},
317
+ 'u10': {'long_name': '10 metre U wind component', 'units': 'm s-1'},
318
+ 'v10': {'long_name': '10 metre V wind component', 'units': 'm s-1'},
319
+ 'msl': {'long_name': 'Mean sea level pressure', 'units': 'Pa'},
320
+ 'pm1': {'long_name': 'Particulate matter d < 1 um', 'units': 'kg m-3'},
321
+ 'pm2p5': {'long_name': 'Particulate matter d < 2.5 um', 'units': 'kg m-3'},
322
+ 'pm10': {'long_name': 'Particulate matter d < 10 um', 'units': 'kg m-3'},
323
+ 'tcco': {'long_name': 'Total column carbon monoxide', 'units': 'kg m-2'},
324
+ 'tc_no': {'long_name': 'Total column nitrogen monoxide', 'units': 'kg m-2'},
325
+ 'tcno2': {'long_name': 'Total column nitrogen dioxide', 'units': 'kg m-2'},
326
+ 'gtco3': {'long_name': 'Total column ozone', 'units': 'kg m-2'},
327
+ 'tcso2': {'long_name': 'Total column sulphur dioxide', 'units': 'kg m-2'},
328
+ # Atmospheric variables
329
+ 't': {'long_name': 'Temperature', 'units': 'K'},
330
+ 'u': {'long_name': 'U component of wind', 'units': 'm s-1'},
331
+ 'v': {'long_name': 'V component of wind', 'units': 'm s-1'},
332
+ 'q': {'long_name': 'Specific humidity', 'units': 'kg kg-1'},
333
+ 'z': {'long_name': 'Geopotential', 'units': 'm2 s-2'},
334
+ 'co': {'long_name': 'Carbon monoxide', 'units': 'kg kg-1'},
335
+ 'no': {'long_name': 'Nitrogen monoxide', 'units': 'kg kg-1'},
336
+ 'no2': {'long_name': 'Nitrogen dioxide', 'units': 'kg kg-1'},
337
+ 'go3': {'long_name': 'Ozone', 'units': 'kg kg-1'},
338
+ 'so2': {'long_name': 'Sulphur dioxide', 'units': 'kg kg-1'}
339
+ }
340
+
341
+ for var_name, attrs in var_attrs.items():
342
+ if var_name in ds.data_vars:
343
+ ds[var_name].attrs.update(attrs)
344
+
345
+ # Save to NetCDF
346
+ ds.to_netcdf(file_path, format='NETCDF4')
347
+ saved_files.append(str(file_path))
348
+ print(f" โœ… Step {step_num}: {filename}")
349
+
350
+ print(f"โœ… Saved {len(saved_files)} prediction files")
351
+ return saved_files
352
 
353
  def _save_predictions_single_file(self, predictions, output_path):
354
  """Save all prediction steps to a single NetCDF file (new method)"""
 
431
 
432
  # Stack along step dimension: (steps, levels, lat, lon)
433
  arr = np.stack(var_data_list, axis=0)
434
+ data_vars[var] = (['step', 'pressure_level', 'lat', 'lon'], arr)
435
 
436
  # Create dataset
437
  ds = xr.Dataset(data_vars, coords=coords)
 
540
  self.save_predictions_to_netcdf(predictions, output_path)
541
  return predictions
542
 
543
+ def run_aurora_prediction_pipeline(self, date_str, Batch, Metadata, AuroraAirPollution, rollout, steps=4, base_predictions_dir="predictions"):
544
+ """Enhanced Aurora prediction pipeline with organized storage"""
545
+ print(f"๐Ÿš€ Starting Aurora prediction pipeline for {date_str}")
546
+ print(f"๐Ÿ“Š Forward prediction steps: {steps} (covering {steps * 12} hours)")
547
+
548
+ # Create organized directory structure
549
+ run_timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
550
+ run_dir = Path(base_predictions_dir) / f"{date_str}_run_{run_timestamp}"
551
+ run_dir.mkdir(parents=True, exist_ok=True)
552
+
553
+ # Load model once
554
+ print("๐Ÿง  Loading Aurora model...")
555
+ self.load_model(AuroraAirPollution)
556
+
557
+ # Use the latest timestamp (index 1) for prediction
558
+ print("๐Ÿ“ฅ Creating input batch for T (second time)...")
559
+ batch = self.create_batch(date_str, Batch, Metadata, time_index=1)
560
+
561
+ # Run predictions
562
+ print(f"โšก Running {steps} prediction steps...")
563
+ predictions = self.predict(batch, rollout, steps=steps)
564
+
565
+ # Save predictions as separate files
566
+ saved_files = self.save_predictions_to_netcdf(predictions, run_dir, date_str)
567
+
568
+ # Save metadata about the run
569
+ run_metadata = {
570
+ "date": date_str,
571
+ "run_timestamp": run_timestamp,
572
+ "steps": steps,
573
+ "time_coverage_hours": steps * 12,
574
+ "input_times": ["T-1", "T"],
575
+ "prediction_files": saved_files,
576
+ "run_directory": str(run_dir)
577
+ }
578
+
579
+ metadata_file = run_dir / "run_metadata.json"
580
+ with open(metadata_file, 'w') as f:
581
+ import json
582
+ json.dump(run_metadata, f, indent=2)
583
+
584
+ print(f"โœ… Aurora prediction pipeline completed")
585
+ print(f"๐Ÿ“ Results saved to: {run_dir}")
586
+ print(f"๐Ÿ“Š Coverage: {steps * 12} hours forward from {date_str}")
587
+
588
+ return run_metadata
589
+
590
+ @staticmethod
591
+ def list_prediction_runs(base_predictions_dir="predictions"):
592
+ """List all available prediction runs with metadata"""
593
+ runs = []
594
+ predictions_path = Path(base_predictions_dir)
595
+
596
+ if not predictions_path.exists():
597
+ return runs
598
+
599
+ for run_dir in predictions_path.iterdir():
600
+ if run_dir.is_dir() and "_run_" in run_dir.name:
601
+ metadata_file = run_dir / "run_metadata.json"
602
+
603
+ if metadata_file.exists():
604
+ try:
605
+ import json
606
+ with open(metadata_file, 'r') as f:
607
+ metadata = json.load(f)
608
+
609
+ # Check if any prediction files exist (new format with separate step files)
610
+ nc_files = list(run_dir.glob("*.nc"))
611
+ has_predictions = len(nc_files) > 0
612
+
613
+ # Add additional info
614
+ metadata['available'] = has_predictions
615
+ metadata['run_dir'] = str(run_dir)
616
+ metadata['relative_path'] = run_dir.name
617
+ metadata['prediction_files'] = [f.name for f in nc_files]
618
+ metadata['num_files'] = len(nc_files)
619
+
620
+ runs.append(metadata)
621
+ except Exception as e:
622
+ print(f"โš ๏ธ Could not read metadata for {run_dir}: {e}")
623
+
624
+ # Sort by run timestamp (newest first)
625
+ runs.sort(key=lambda x: x.get('run_timestamp', ''), reverse=True)
626
+ return runs
627
+
628
  # Example usage (not run on import)
629
  if __name__ == "__main__":
630
  pass
data_processor.py CHANGED
@@ -12,6 +12,14 @@ import xarray as xr
12
  from pathlib import Path
13
  from datetime import datetime
14
 
 
 
 
 
 
 
 
 
15
  # Imports from our Modules
16
  from constants import NETCDF_VARIABLES, AIR_POLLUTION_VARIABLES, PRESSURE_LEVELS
17
  warnings.filterwarnings('ignore')
@@ -124,6 +132,12 @@ class NetCDFProcessor:
124
 
125
  for var_name in dataset.data_vars:
126
  var_name_lower = var_name.lower()
 
 
 
 
 
 
127
 
128
  # Check exact matches first in NETCDF_VARIABLES
129
  if var_name in NETCDF_VARIABLES:
@@ -131,14 +145,18 @@ class NetCDFProcessor:
131
  detected[var_name]['original_name'] = var_name
132
  detected[var_name]['dataset_type'] = dataset_type
133
  detected[var_name]['shape'] = dataset[var_name].shape
134
- detected[var_name]['dims'] = list(dataset[var_name].dims)
 
 
135
 
136
  elif var_name_lower in NETCDF_VARIABLES:
137
  detected[var_name] = NETCDF_VARIABLES[var_name_lower].copy()
138
  detected[var_name]['original_name'] = var_name
139
  detected[var_name]['dataset_type'] = dataset_type
140
  detected[var_name]['shape'] = dataset[var_name].shape
141
- detected[var_name]['dims'] = list(dataset[var_name].dims)
 
 
142
 
143
  else:
144
  # Auto-detect unknown variables by examining their attributes
@@ -159,7 +177,9 @@ class NetCDFProcessor:
159
  detected[var_name]['original_name'] = var_name
160
  detected[var_name]['dataset_type'] = dataset_type
161
  detected[var_name]['shape'] = dataset[var_name].shape
162
- detected[var_name]['dims'] = list(dataset[var_name].dims)
 
 
163
  if units != 'unknown':
164
  detected[var_name]['units'] = units # Use actual units from file
165
  matched = True
@@ -167,16 +187,13 @@ class NetCDFProcessor:
167
 
168
  # If still no match, create a generic entry for any 2D+ variable
169
  if not matched and len(dataset[var_name].dims) >= 2:
170
- # Check if it has lat/lon dimensions
171
- dims = list(dataset[var_name].dims)
172
  has_spatial = any(dim in ['lat', 'lon', 'latitude', 'longitude', 'x', 'y']
173
- for dim in [d.lower() for d in dims])
174
 
175
  if has_spatial:
176
- # Determine variable type based on dimensions
177
- var_type = 'surface'
178
- if any(dim in ['level', 'plev', 'pressure', 'height'] for dim in [d.lower() for d in dims]):
179
- var_type = 'atmospheric'
180
 
181
  # Auto-determine color scheme based on variable name or units
182
  cmap = 'viridis' # default
@@ -200,7 +217,7 @@ class NetCDFProcessor:
200
  'original_name': var_name,
201
  'dataset_type': dataset_type,
202
  'shape': dataset[var_name].shape,
203
- 'dims': dims,
204
  'auto_detected': True # Flag to indicate this was auto-detected
205
  }
206
 
@@ -405,9 +422,7 @@ class NetCDFProcessor:
405
  Returns:
406
  tuple: (cropped_data, cropped_lats, cropped_lons)
407
  """
408
- from constants import INDIA_BOUNDS
409
-
410
- # Define extended India bounds with some buffer
411
  lat_min = INDIA_BOUNDS['lat_min'] - 2 # 6-2 = 4ยฐN
412
  lat_max = INDIA_BOUNDS['lat_max'] + 2 # 38+2 = 40ยฐN
413
  lon_min = INDIA_BOUNDS['lon_min'] - 2 # 68-2 = 66ยฐE
@@ -529,6 +544,218 @@ class NetCDFProcessor:
529
  pass # Dataset already closed or invalid
530
 
531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532
  def analyze_netcdf_file(file_path):
533
  """
534
  Analyze NetCDF file structure and return summary
 
12
  from pathlib import Path
13
  from datetime import datetime
14
 
15
+ # India geographical bounds for coordinate trimming
16
+ INDIA_BOUNDS = {
17
+ 'lat_min': 6.0, # Southern tip (including southern islands)
18
+ 'lat_max': 38.0, # Northern border (including Kashmir)
19
+ 'lon_min': 68.0, # Western border
20
+ 'lon_max': 98.0 # Eastern border (including Andaman & Nicobar)
21
+ }
22
+
23
  # Imports from our Modules
24
  from constants import NETCDF_VARIABLES, AIR_POLLUTION_VARIABLES, PRESSURE_LEVELS
25
  warnings.filterwarnings('ignore')
 
132
 
133
  for var_name in dataset.data_vars:
134
  var_name_lower = var_name.lower()
135
+ var_dims = list(dataset[var_name].dims)
136
+
137
+ # Determine actual variable type based on dimensions (not just dictionary)
138
+ actual_var_type = 'surface'
139
+ if any(dim in ['level', 'plev', 'pressure_level', 'height'] for dim in [d.lower() for d in var_dims]):
140
+ actual_var_type = 'atmospheric'
141
 
142
  # Check exact matches first in NETCDF_VARIABLES
143
  if var_name in NETCDF_VARIABLES:
 
145
  detected[var_name]['original_name'] = var_name
146
  detected[var_name]['dataset_type'] = dataset_type
147
  detected[var_name]['shape'] = dataset[var_name].shape
148
+ detected[var_name]['dims'] = var_dims
149
+ # Override type based on actual dimensions
150
+ detected[var_name]['type'] = actual_var_type
151
 
152
  elif var_name_lower in NETCDF_VARIABLES:
153
  detected[var_name] = NETCDF_VARIABLES[var_name_lower].copy()
154
  detected[var_name]['original_name'] = var_name
155
  detected[var_name]['dataset_type'] = dataset_type
156
  detected[var_name]['shape'] = dataset[var_name].shape
157
+ detected[var_name]['dims'] = var_dims
158
+ # Override type based on actual dimensions
159
+ detected[var_name]['type'] = actual_var_type
160
 
161
  else:
162
  # Auto-detect unknown variables by examining their attributes
 
177
  detected[var_name]['original_name'] = var_name
178
  detected[var_name]['dataset_type'] = dataset_type
179
  detected[var_name]['shape'] = dataset[var_name].shape
180
+ detected[var_name]['dims'] = var_dims
181
+ # Override type based on actual dimensions
182
+ detected[var_name]['type'] = actual_var_type
183
  if units != 'unknown':
184
  detected[var_name]['units'] = units # Use actual units from file
185
  matched = True
 
187
 
188
  # If still no match, create a generic entry for any 2D+ variable
189
  if not matched and len(dataset[var_name].dims) >= 2:
190
+ # Check if it has lat/lon dimensions
 
191
  has_spatial = any(dim in ['lat', 'lon', 'latitude', 'longitude', 'x', 'y']
192
+ for dim in [d.lower() for d in var_dims])
193
 
194
  if has_spatial:
195
+ # Use the already determined variable type
196
+ var_type = actual_var_type
 
 
197
 
198
  # Auto-determine color scheme based on variable name or units
199
  cmap = 'viridis' # default
 
217
  'original_name': var_name,
218
  'dataset_type': dataset_type,
219
  'shape': dataset[var_name].shape,
220
+ 'dims': var_dims,
221
  'auto_detected': True # Flag to indicate this was auto-detected
222
  }
223
 
 
422
  Returns:
423
  tuple: (cropped_data, cropped_lats, cropped_lons)
424
  """
425
+ # Use same India bounds as Aurora processor for consistency
 
 
426
  lat_min = INDIA_BOUNDS['lat_min'] - 2 # 6-2 = 4ยฐN
427
  lat_max = INDIA_BOUNDS['lat_max'] + 2 # 38+2 = 40ยฐN
428
  lon_min = INDIA_BOUNDS['lon_min'] - 2 # 68-2 = 66ยฐE
 
544
  pass # Dataset already closed or invalid
545
 
546
 
547
+ class AuroraPredictionProcessor:
548
+ def __init__(self, file_path):
549
+ """
550
+ Initialize Aurora prediction processor for single NetCDF files with timestep data
551
+
552
+ Parameters:
553
+ file_path (str): Path to Aurora prediction NetCDF file
554
+ """
555
+ self.file_path = Path(file_path)
556
+ self.dataset = None
557
+ self.detected_variables = {}
558
+
559
+ def _trim_to_india_bounds(self, var, lats, lons):
560
+ """
561
+ Trim data and coordinates to India geographical bounds to reduce computation
562
+
563
+ Parameters:
564
+ var (xarray.DataArray): Variable data
565
+ lats (numpy.ndarray): Latitude coordinates
566
+ lons (numpy.ndarray): Longitude coordinates
567
+
568
+ Returns:
569
+ tuple: (trimmed_var, trimmed_lats, trimmed_lons)
570
+ """
571
+ # Find indices within India bounds
572
+ lat_mask = (lats >= INDIA_BOUNDS['lat_min']) & (lats <= INDIA_BOUNDS['lat_max'])
573
+ lon_mask = (lons >= INDIA_BOUNDS['lon_min']) & (lons <= INDIA_BOUNDS['lon_max'])
574
+
575
+ lat_indices = np.where(lat_mask)[0]
576
+ lon_indices = np.where(lon_mask)[0]
577
+
578
+ if len(lat_indices) == 0 or len(lon_indices) == 0:
579
+ # If no points in India bounds, return original data
580
+ return var, lats, lons
581
+
582
+ # Get min/max indices for slicing
583
+ lat_start, lat_end = lat_indices[0], lat_indices[-1] + 1
584
+ lon_start, lon_end = lon_indices[0], lon_indices[-1] + 1
585
+
586
+ # Trim coordinates
587
+ trimmed_lats = lats[lat_start:lat_end]
588
+ trimmed_lons = lons[lon_start:lon_end]
589
+
590
+ # Trim data - handle different dimension orders
591
+ if var.ndim == 2: # (lat, lon)
592
+ trimmed_var = var[lat_start:lat_end, lon_start:lon_end]
593
+ elif var.ndim == 3 and 'latitude' in var.dims and 'longitude' in var.dims:
594
+ # Find latitude and longitude dimension positions
595
+ lat_dim_pos = var.dims.index('latitude') if 'latitude' in var.dims else var.dims.index('lat')
596
+ lon_dim_pos = var.dims.index('longitude') if 'longitude' in var.dims else var.dims.index('lon')
597
+
598
+ if lat_dim_pos == 1 and lon_dim_pos == 2: # (time/level, lat, lon)
599
+ trimmed_var = var[:, lat_start:lat_end, lon_start:lon_end]
600
+ elif lat_dim_pos == 0 and lon_dim_pos == 1: # (lat, lon, time/level)
601
+ trimmed_var = var[lat_start:lat_end, lon_start:lon_end, :]
602
+ else:
603
+ # Fall back to original if dimension order is unexpected
604
+ return var, lats, lons
605
+ else:
606
+ # For other dimensions or if trimming fails, return original
607
+ return var, lats, lons
608
+
609
+ return trimmed_var, trimmed_lats, trimmed_lons
610
+
611
+ def load_dataset(self):
612
+ """Load Aurora prediction NetCDF dataset"""
613
+ try:
614
+ self.dataset = xr.open_dataset(self.file_path)
615
+ return True
616
+ except Exception as e:
617
+ raise Exception(f"Error loading Aurora prediction dataset: {str(e)}")
618
+
619
+ def extract_variable_data(self, variable_name, pressure_level=None, step=0):
620
+ """
621
+ Extract variable data from Aurora prediction file
622
+
623
+ Parameters:
624
+ variable_name (str): Name of the variable to extract
625
+ pressure_level (float, optional): Pressure level for atmospheric variables
626
+ step (int): Time step index (default: 0)
627
+
628
+ Returns:
629
+ tuple: (data_array, metadata_dict)
630
+ """
631
+ if self.dataset is None:
632
+ self.load_dataset()
633
+
634
+ if variable_name not in self.dataset.data_vars:
635
+ raise ValueError(f"Variable '{variable_name}' not found in dataset")
636
+
637
+ var = self.dataset[variable_name]
638
+
639
+ # Handle Aurora-specific dimensions
640
+ # Aurora files have: (forecast_period, forecast_reference_time, [pressure_level], latitude, longitude)
641
+
642
+ # First, squeeze singleton forecast_period dimension
643
+ if 'forecast_period' in var.dims and var.sizes['forecast_period'] == 1:
644
+ var = var.squeeze('forecast_period')
645
+
646
+ # Handle forecast_reference_time - take the first one (index 0)
647
+ if 'forecast_reference_time' in var.dims:
648
+ var = var.isel(forecast_reference_time=0)
649
+
650
+ # Handle step dimension if present (for backward compatibility)
651
+ if 'step' in var.dims:
652
+ if step >= var.sizes['step']:
653
+ raise ValueError(f"Step {step} not available. Dataset has {var.sizes['step']} steps.")
654
+ var = var.isel(step=step)
655
+
656
+ # Handle pressure level dimension if present
657
+ if pressure_level is not None and 'pressure_level' in var.dims:
658
+ pressure_level = float(pressure_level)
659
+ # Find closest pressure level
660
+ available_levels = var.pressure_level.values
661
+ closest_idx = np.argmin(np.abs(available_levels - pressure_level))
662
+ actual_level = available_levels[closest_idx]
663
+ var = var.isel(pressure_level=closest_idx)
664
+ pressure_info = f"at {actual_level:.0f} hPa"
665
+ else:
666
+ pressure_info = None
667
+
668
+ # Handle different coordinate naming conventions
669
+ if 'latitude' in self.dataset.coords:
670
+ lats = self.dataset['latitude'].values
671
+ lons = self.dataset['longitude'].values
672
+ else:
673
+ lats = self.dataset['lat'].values if 'lat' in self.dataset else self.dataset['latitude'].values
674
+ lons = self.dataset['lon'].values if 'lon' in self.dataset else self.dataset['longitude'].values
675
+
676
+ # Trim data and coordinates to India bounds to reduce computation
677
+ var, lats, lons = self._trim_to_india_bounds(var, lats, lons)
678
+
679
+ # Extract trimmed data
680
+ data_values = var.values
681
+
682
+ # Get variable information
683
+ from constants import NETCDF_VARIABLES
684
+ var_info = NETCDF_VARIABLES.get(variable_name, {})
685
+ display_name = var_info.get('name', variable_name.replace('_', ' ').title())
686
+ units = var.attrs.get('units', var_info.get('units', ''))
687
+
688
+ # Prepare metadata
689
+ metadata = {
690
+ 'variable_name': variable_name,
691
+ 'display_name': display_name,
692
+ 'units': units,
693
+ 'lats': lats,
694
+ 'lons': lons,
695
+ 'pressure_level': pressure_level if pressure_level else None,
696
+ 'pressure_info': pressure_info,
697
+ 'step': step,
698
+ 'data_shape': data_values.shape,
699
+ 'source': 'Aurora Prediction',
700
+ 'file_path': str(self.file_path),
701
+ }
702
+
703
+ # Add timestamp information
704
+ # For Aurora predictions, step represents the forecast step (12-hour intervals)
705
+ hours_from_start = (step + 1) * 12 # Assuming 12-hour intervals
706
+ metadata['timestamp_str'] = f"T+{hours_from_start}h (Step {step + 1})"
707
+
708
+ return data_values, metadata
709
+
710
+ def get_available_variables(self):
711
+ """Get list of available variables categorized by type"""
712
+ if self.dataset is None:
713
+ self.load_dataset()
714
+
715
+ surface_vars = []
716
+ atmospheric_vars = []
717
+
718
+ for var_name in self.dataset.data_vars:
719
+ var = self.dataset[var_name]
720
+ # Check if variable has pressure level dimension
721
+ if 'pressure_level' in var.dims:
722
+ atmospheric_vars.append(var_name)
723
+ else:
724
+ surface_vars.append(var_name)
725
+
726
+ return {
727
+ 'surface_vars': surface_vars,
728
+ 'atmospheric_vars': atmospheric_vars
729
+ }
730
+
731
+ def get_available_pressure_levels(self):
732
+ """Get available pressure levels"""
733
+ if self.dataset is None:
734
+ self.load_dataset()
735
+
736
+ if 'pressure_level' in self.dataset.coords:
737
+ return self.dataset.pressure_level.values.tolist()
738
+ return []
739
+
740
+ def get_available_steps(self):
741
+ """Get available time steps"""
742
+ if self.dataset is None:
743
+ self.load_dataset()
744
+
745
+ if 'step' in self.dataset.dims:
746
+ return list(range(self.dataset.sizes['step']))
747
+ return [0]
748
+
749
+ def close(self):
750
+ """Close the dataset safely"""
751
+ try:
752
+ if self.dataset is not None:
753
+ self.dataset.close()
754
+ self.dataset = None
755
+ except (RuntimeError, OSError):
756
+ pass # Dataset already closed or invalid
757
+
758
+
759
  def analyze_netcdf_file(file_path):
760
  """
761
  Analyze NetCDF file structure and return summary
interactive_plot_generator.py CHANGED
@@ -378,6 +378,10 @@ class InteractiveIndiaMapPlotter:
378
 
379
  def _save_html_plot(self, fig, var_name, display_name, pressure_level, color_theme, time_stamp, config):
380
  """Save the interactive plot as HTML"""
 
 
 
 
381
  safe_display_name = display_name.replace('/', '_').replace(' ', '_').replace('โ‚‚', '2').replace('โ‚ƒ', '3').replace('.', '_')
382
  safe_time_stamp = time_stamp.replace('-', '').replace(':', '').replace(' ', '_')
383
 
 
378
 
379
  def _save_html_plot(self, fig, var_name, display_name, pressure_level, color_theme, time_stamp, config):
380
  """Save the interactive plot as HTML"""
381
+ # Handle None values with fallbacks
382
+ display_name = display_name or var_name or 'Unknown'
383
+ time_stamp = time_stamp or 'Unknown_Time'
384
+
385
  safe_display_name = display_name.replace('/', '_').replace(' ', '_').replace('โ‚‚', '2').replace('โ‚ƒ', '3').replace('.', '_')
386
  safe_time_stamp = time_stamp.replace('-', '').replace(':', '').replace(' ', '_')
387
 
plot_generator.py CHANGED
@@ -179,6 +179,10 @@ class IndiaMapPlotter:
179
  return "\n".join(stats_lines)
180
 
181
  def _save_plot(self, fig, var_name, display_name, pressure_level, color_theme, time_stamp):
 
 
 
 
182
  safe_display_name = display_name.replace('/', '_').replace(' ', '_').replace('โ‚‚', '2').replace('โ‚ƒ', '3').replace('.', '_')
183
  safe_time_stamp = time_stamp.replace('-', '').replace(':', '').replace(' ', '_')
184
  filename_parts = [f"{safe_display_name}_India"]
 
179
  return "\n".join(stats_lines)
180
 
181
  def _save_plot(self, fig, var_name, display_name, pressure_level, color_theme, time_stamp):
182
+ # Handle None values with fallbacks
183
+ display_name = display_name or var_name or 'Unknown'
184
+ time_stamp = time_stamp or 'Unknown_Time'
185
+
186
  safe_display_name = display_name.replace('/', '_').replace(' ', '_').replace('โ‚‚', '2').replace('โ‚ƒ', '3').replace('.', '_')
187
  safe_time_stamp = time_stamp.replace('-', '').replace(':', '').replace(' ', '_')
188
  filename_parts = [f"{safe_display_name}_India"]
templates/aurora_predict.html CHANGED
@@ -292,12 +292,13 @@
292
 
293
  <div class="form-container">
294
  <div class="info-box">
295
- <h3>๐Ÿš€ About Aurora Predictions</h3>
296
  <ul>
297
- <li><strong>AI-Powered Forecasting:</strong> Uses Microsoft's Aurora foundation model for atmospheric predictions</li>
298
- <li><strong>Multi-Step Forecasting:</strong> Generate predictions for up to 10 time steps ahead</li>
299
- <li><strong>Real CAMS Data:</strong> Downloads actual atmospheric data for the selected date</li>
300
- <li><strong>Multiple Variables:</strong> Predicts PM2.5, PM10, Oโ‚ƒ, NOโ‚‚, CO, SOโ‚‚ and meteorological variables</li>
 
301
  </ul>
302
  </div>
303
 
@@ -307,6 +308,7 @@
307
  <p><strong>GPU Mode:</strong> If CUDA GPU is available, Aurora will use it for faster predictions.</p>
308
  <p><strong>Processing Time:</strong> CPU: 5-15 minutes per step | GPU: 1-3 minutes total</p>
309
  <p><strong>Memory:</strong> CPU mode automatically limits to 2 steps to prevent memory issues.</p>
 
310
  </div>
311
 
312
  <form method="POST">
@@ -325,17 +327,16 @@
325
  </div>
326
 
327
  <div class="form-group">
328
- <label for="steps">๐Ÿ”ข Number of Forecast Steps:</label>
329
  <select id="steps" name="steps" required>
330
- <option value="1">1 step (6 hours) - Fastest</option>
331
- <option value="2" selected>2 steps (12 hours) - CPU Friendly</option>
332
- <option value="4">4 steps (24 hours) - Standard</option>
333
- <option value="6">6 steps (36 hours) - GPU Recommended</option>
334
- <option value="8">8 steps (48 hours) - GPU Required</option>
335
- <option value="10">10 steps (60 hours) - GPU Required</option>
336
  </select>
337
  <small style="color: #666; font-size: 14px;">
338
- Each step represents 6 hours. CPU mode automatically limits to 2 steps maximum.
 
339
  </small>
340
  </div>
341
 
 
292
 
293
  <div class="form-container">
294
  <div class="info-box">
295
+ <h3>โœจ Enhanced Aurora Features</h3>
296
  <ul>
297
+ <li><strong>Dual Time Input:</strong> Uses both T-1 (00:00) and T (12:00) timestamps for better accuracy</li>
298
+ <li><strong>Forward Predictions:</strong> Generate 1-4 steps forward, each covering 12 hours</li>
299
+ <li><strong>Organized Storage:</strong> Results saved in dated folders for easy management</li>
300
+ <li><strong>Multiple Variables:</strong> Predicts PM1, PM2.5, PM10, Oโ‚ƒ, NOโ‚‚, CO, SOโ‚‚ and meteorological variables</li>
301
+ <li><strong>Enhanced Visualization:</strong> Step-by-step analysis with time progression</li>
302
  </ul>
303
  </div>
304
 
 
308
  <p><strong>GPU Mode:</strong> If CUDA GPU is available, Aurora will use it for faster predictions.</p>
309
  <p><strong>Processing Time:</strong> CPU: 5-15 minutes per step | GPU: 1-3 minutes total</p>
310
  <p><strong>Memory:</strong> CPU mode automatically limits to 2 steps to prevent memory issues.</p>
311
+ <p><strong>Coverage:</strong> Each step predicts 12 hours forward (max 48 hours with 4 steps).</p>
312
  </div>
313
 
314
  <form method="POST">
 
327
  </div>
328
 
329
  <div class="form-group">
330
+ <label for="steps">๐Ÿ”ข Number of Forward Prediction Steps:</label>
331
  <select id="steps" name="steps" required>
332
+ <option value="1">1 step (12 hours forward) - Fastest</option>
333
+ <option value="2" selected>2 steps (24 hours forward) - CPU Friendly</option>
334
+ <option value="3">3 steps (36 hours forward) - Recommended</option>
335
+ <option value="4">4 steps (48 hours forward) - Maximum</option>
 
 
336
  </select>
337
  <small style="color: #666; font-size: 14px;">
338
+ Each step represents 12 hours forward from the initial conditions.
339
+ Aurora uses T-1 (00:00) and T (12:00) as input, then predicts forward.
340
  </small>
341
  </div>
342
 
templates/aurora_prediction_plot.html CHANGED
@@ -243,8 +243,8 @@
243
  </div>
244
 
245
  <div class="step-indicator">
246
- <h3>๐Ÿ“Š Current View: Step {{ step }} of {{ steps|length - 1 }}</h3>
247
- <p>Forecast time: {{ (step * 6) }} hours ahead</p>
248
  </div>
249
 
250
  <form method="POST" id="predictionForm">
@@ -266,7 +266,7 @@
266
  <select id="step" name="step">
267
  {% for s in steps %}
268
  <option value="{{ s }}" {% if s == step %}selected{% endif %}>
269
- Step {{ s }} ({{ s * 6 }}h ahead)
270
  </option>
271
  {% endfor %}
272
  </select>
 
243
  </div>
244
 
245
  <div class="step-indicator">
246
+ <h3>๐Ÿ“Š Current View: Step {{ step + 1 }} of {{ max_steps }}</h3>
247
+ <p>Forecast time: T+{{ (step + 1) * 12 }}h ahead from initial conditions</p>
248
  </div>
249
 
250
  <form method="POST" id="predictionForm">
 
266
  <select id="step" name="step">
267
  {% for s in steps %}
268
  <option value="{{ s }}" {% if s == step %}selected{% endif %}>
269
+ Step {{ s + 1 }} (T+{{ (s + 1) * 12 }}h)
270
  </option>
271
  {% endfor %}
272
  </select>
templates/aurora_variables.html ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Aurora Prediction Variables - CAMS Air Pollution</title>
7
+ <style>
8
+ body {
9
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
10
+ max-width: 1200px;
11
+ margin: 0 auto;
12
+ padding: 20px;
13
+ background: #f5f5f5;
14
+ }
15
+ .container {
16
+ background: white;
17
+ padding: 30px;
18
+ border-radius: 10px;
19
+ box-shadow: 0 2px 10px rgba(0,0,0,0.1);
20
+ margin-bottom: 20px;
21
+ }
22
+ h1 { color: #2c3e50; text-align: center; margin-bottom: 30px; }
23
+ h2 { color: #34495e; border-bottom: 2px solid #9b59b6; padding-bottom: 10px; }
24
+ .method-section, .form-section {
25
+ background: #f8f9fa;
26
+ padding: 20px;
27
+ border-radius: 8px;
28
+ margin-bottom: 20px;
29
+ border-left: 4px solid #9b59b6;
30
+ }
31
+ .form-group {
32
+ margin-bottom: 15px;
33
+ }
34
+ label {
35
+ display: block;
36
+ margin-bottom: 5px;
37
+ font-weight: 600;
38
+ color: #2c3e50;
39
+ }
40
+ select {
41
+ width: 100%;
42
+ padding: 10px;
43
+ border: 2px solid #ddd;
44
+ border-radius: 5px;
45
+ font-size: 14px;
46
+ }
47
+ select:focus {
48
+ border-color: #9b59b6;
49
+ outline: none;
50
+ }
51
+ .btn {
52
+ background: #9b59b6;
53
+ color: white;
54
+ padding: 12px 24px;
55
+ border: none;
56
+ border-radius: 5px;
57
+ cursor: pointer;
58
+ font-size: 16px;
59
+ font-weight: 600;
60
+ transition: all 0.3s;
61
+ text-decoration: none;
62
+ display: inline-block;
63
+ margin-right: 10px;
64
+ margin-bottom: 10px;
65
+ }
66
+ .btn:hover { background: #8e44ad; }
67
+ .btn:disabled {
68
+ background: #ccc;
69
+ cursor: not-allowed;
70
+ }
71
+ .btn-secondary {
72
+ background: #6c757d;
73
+ }
74
+ .btn-secondary:hover {
75
+ background: #5a6268;
76
+ }
77
+ .btn-success {
78
+ background: #28a745;
79
+ }
80
+ .btn-success:hover {
81
+ background: #218838;
82
+ }
83
+ .step-selector {
84
+ display: flex;
85
+ flex-wrap: wrap;
86
+ gap: 10px;
87
+ margin-bottom: 20px;
88
+ }
89
+ .step-btn {
90
+ padding: 8px 16px;
91
+ border: 2px solid #ddd;
92
+ background: white;
93
+ border-radius: 5px;
94
+ cursor: pointer;
95
+ transition: all 0.3s;
96
+ }
97
+ .step-btn:hover {
98
+ border-color: #9b59b6;
99
+ }
100
+ .step-btn.active {
101
+ background: #9b59b6;
102
+ color: white;
103
+ border-color: #9b59b6;
104
+ }
105
+ .info-box {
106
+ background: #e8f4f8;
107
+ border: 1px solid #bee5eb;
108
+ border-radius: 5px;
109
+ padding: 15px;
110
+ margin-bottom: 20px;
111
+ }
112
+ .hidden-section {
113
+ display: none;
114
+ }
115
+ .loading {
116
+ text-align: center;
117
+ color: #666;
118
+ font-style: italic;
119
+ }
120
+ .back-link {
121
+ color: #9b59b6;
122
+ text-decoration: none;
123
+ font-weight: 600;
124
+ margin-bottom: 20px;
125
+ display: inline-block;
126
+ }
127
+ .back-link:hover {
128
+ text-decoration: underline;
129
+ }
130
+ .color-preview-section {
131
+ margin-top: 15px;
132
+ }
133
+ .color-gradient {
134
+ width: 100%;
135
+ height: 20px;
136
+ border-radius: 5px;
137
+ border: 1px solid #ddd;
138
+ }
139
+ </style>
140
+ </head>
141
+ <body>
142
+ <div class="container">
143
+ <a href="{{ url_for('prediction_runs') }}" class="back-link">โ† Back to Prediction Runs</a>
144
+
145
+ <h1>๐Ÿ”ฎ Aurora Prediction Variables</h1>
146
+
147
+ <div class="info-box">
148
+ <strong>๐Ÿ“ Run Directory:</strong> {{ run_dir }}<br>
149
+ <strong>๐Ÿ“Š Steps Available:</strong> {{ steps_data|length }} ({{ (steps_data|length * 12) }}h coverage)
150
+ </div>
151
+
152
+ <!-- Step 1: Step Selection -->
153
+ <div class="method-section">
154
+ <h2>โฐ Step 1: Select Prediction Step</h2>
155
+ <p>Choose which prediction time step to analyze:</p>
156
+ <div class="step-selector">
157
+ {% for step_data in steps_data %}
158
+ <div class="step-btn" onclick="selectStep({{ step_data.step }}, '{{ step_data.filename }}')">
159
+ Step {{ step_data.step }}<br>
160
+ <small>T+{{ step_data.forecast_hours }}h</small>
161
+ </div>
162
+ {% endfor %}
163
+ </div>
164
+ </div>
165
+
166
+ <!-- Step 2: Variable Selection (hidden until step selected) -->
167
+ <div id="variableSection" class="hidden-section">
168
+ <div class="form-section">
169
+ <h2>๐Ÿงช Step 2: Select Variable</h2>
170
+ <div id="variableLoading" class="loading">Loading variables...</div>
171
+ <div id="variableContent" class="hidden-section">
172
+ <form method="POST" action="{{ url_for('aurora_plot') }}" id="plotForm">
173
+ <input type="hidden" name="run_dir" value="{{ run_dir }}">
174
+ <input type="hidden" name="step" id="selected_step" value="">
175
+
176
+ <div class="form-group">
177
+ <label for="variable">Choose Variable:</label>
178
+ <select name="variable" id="variable" required onchange="handleVariableChange()">
179
+ <option value="">-- Select a variable --</option>
180
+ </select>
181
+ </div>
182
+
183
+ <!-- Step 3: Pressure Level (shown for atmospheric variables) -->
184
+ <div id="pressureSection" class="hidden-section">
185
+ <h2>๐Ÿ“Š Step 3: Select Pressure Level</h2>
186
+ <div class="form-group">
187
+ <label for="pressure_level">Pressure Level (hPa):</label>
188
+ <select name="pressure_level" id="pressure_level">
189
+ </select>
190
+ </div>
191
+ </div>
192
+
193
+ <!-- Step 4: Color Theme Selection -->
194
+ <div id="plotOptionsSection" class="hidden-section">
195
+ <h2>๐ŸŽจ Step 4: Select Color Theme</h2>
196
+ <div class="form-group">
197
+ <label for="color_theme">Select Color Scheme:</label>
198
+ <select name="color_theme" id="color_theme" onchange="updateColorPreview()">
199
+ {% for theme_id, theme_name in color_themes.items() %}
200
+ <option value="{{ theme_id }}"
201
+ {% if theme_id == 'viridis' %}selected{% endif %}>
202
+ {{ theme_name }}
203
+ </option>
204
+ {% endfor %}
205
+ </select>
206
+ </div>
207
+
208
+ <div class="color-preview-section">
209
+ <p><strong>Preview:</strong> <span id="colorPreviewText">Viridis</span></p>
210
+ <div class="color-gradient" id="colorPreview"></div>
211
+ </div>
212
+ </div>
213
+
214
+ <!-- Step 5: Generate Plot -->
215
+ <h2>๐Ÿ“ˆ Step 5: Generate Plot</h2>
216
+ <button type="submit" name="plot_type" value="static" class="btn" id="staticPlotBtn" disabled>
217
+ ๐Ÿ“Š Generate Static Plot
218
+ </button>
219
+ <button type="submit" name="plot_type" value="interactive" class="btn btn-success" id="interactivePlotBtn" disabled>
220
+ ๐ŸŒ Generate Interactive Plot
221
+ </button>
222
+ </div>
223
+ </form>
224
+ </div>
225
+ </div>
226
+ </div>
227
+
228
+ <!-- Download Section -->
229
+ <div class="method-section">
230
+ <h2>๐Ÿ’พ Download Data</h2>
231
+ <a href="{{ url_for('download_prediction_netcdf', filename=run_dir) }}" class="btn btn-secondary">
232
+ ๐Ÿ“ฅ Download All Files
233
+ </a>
234
+ </div>
235
+ </div>
236
+
237
+ <script>
238
+ let currentStep = null;
239
+ let currentVariables = null;
240
+
241
+ function selectStep(step, filename) {
242
+ // Update UI
243
+ document.querySelectorAll('.step-btn').forEach(btn => btn.classList.remove('active'));
244
+ event.target.closest('.step-btn').classList.add('active');
245
+
246
+ currentStep = step;
247
+ document.getElementById('selected_step').value = step;
248
+
249
+ // Show variable section and loading
250
+ document.getElementById('variableSection').classList.remove('hidden-section');
251
+ document.getElementById('variableLoading').style.display = 'block';
252
+ document.getElementById('variableContent').classList.add('hidden-section');
253
+
254
+ // Load variables for this step
255
+ fetch(`/api/aurora_step_variables/{{ run_dir }}/${step}`)
256
+ .then(response => response.json())
257
+ .then(data => {
258
+ if (data.error) {
259
+ alert('Error loading variables: ' + data.error);
260
+ return;
261
+ }
262
+
263
+ currentVariables = data;
264
+ populateVariables(data);
265
+
266
+ // Hide loading, show content
267
+ document.getElementById('variableLoading').style.display = 'none';
268
+ document.getElementById('variableContent').classList.remove('hidden-section');
269
+ })
270
+ .catch(error => {
271
+ console.error('Error:', error);
272
+ alert('Error loading variables');
273
+ document.getElementById('variableLoading').innerHTML = 'Error loading variables';
274
+ });
275
+ }
276
+
277
+ function populateVariables(data) {
278
+ const select = document.getElementById('variable');
279
+ select.innerHTML = '<option value="">-- Select a variable --</option>';
280
+
281
+ if (data.surface_vars && data.surface_vars.length > 0) {
282
+ const surfaceGroup = document.createElement('optgroup');
283
+ surfaceGroup.label = 'Surface Variables';
284
+ data.surface_vars.forEach(varName => {
285
+ const option = document.createElement('option');
286
+ option.value = varName;
287
+ option.textContent = `${varName} (Surface)`;
288
+ option.dataset.type = 'surface';
289
+ surfaceGroup.appendChild(option);
290
+ });
291
+ select.appendChild(surfaceGroup);
292
+ }
293
+
294
+ if (data.atmos_vars && data.atmos_vars.length > 0) {
295
+ const atmosGroup = document.createElement('optgroup');
296
+ atmosGroup.label = 'Atmospheric Variables';
297
+ data.atmos_vars.forEach(varName => {
298
+ const option = document.createElement('option');
299
+ option.value = varName;
300
+ option.textContent = `${varName} (Atmospheric)`;
301
+ option.dataset.type = 'atmospheric';
302
+ atmosGroup.appendChild(option);
303
+ });
304
+ select.appendChild(atmosGroup);
305
+ }
306
+
307
+ // Populate pressure levels
308
+ const pressureSelect = document.getElementById('pressure_level');
309
+ pressureSelect.innerHTML = '';
310
+ if (data.pressure_levels && data.pressure_levels.length > 0) {
311
+ data.pressure_levels.forEach(level => {
312
+ const option = document.createElement('option');
313
+ option.value = level;
314
+ option.textContent = `${level} hPa`;
315
+ pressureSelect.appendChild(option);
316
+ });
317
+ }
318
+ }
319
+
320
+ function handleVariableChange() {
321
+ const select = document.getElementById('variable');
322
+ const selectedOption = select.options[select.selectedIndex];
323
+
324
+ if (selectedOption.value) {
325
+ const isAtmospheric = selectedOption.dataset.type === 'atmospheric';
326
+
327
+ // Show/hide pressure section
328
+ const pressureSection = document.getElementById('pressureSection');
329
+ if (isAtmospheric) {
330
+ pressureSection.classList.remove('hidden-section');
331
+ } else {
332
+ pressureSection.classList.add('hidden-section');
333
+ }
334
+
335
+ // Show plot options
336
+ document.getElementById('plotOptionsSection').classList.remove('hidden-section');
337
+
338
+ // Initialize color preview when plot options are shown
339
+ updateColorPreview();
340
+
341
+ // Enable plot buttons
342
+ document.getElementById('staticPlotBtn').disabled = false;
343
+ document.getElementById('interactivePlotBtn').disabled = false;
344
+ } else {
345
+ // Hide sections if no variable selected
346
+ document.getElementById('pressureSection').classList.add('hidden-section');
347
+ document.getElementById('plotOptionsSection').classList.add('hidden-section');
348
+
349
+ // Disable plot buttons
350
+ document.getElementById('staticPlotBtn').disabled = true;
351
+ document.getElementById('interactivePlotBtn').disabled = true;
352
+ }
353
+ }
354
+
355
+ // Color theme preview
356
+ const colorMaps = {
357
+ 'viridis': 'linear-gradient(to right, #440154, #414487, #2a788e, #22a884, #7ad151, #fde725)',
358
+ 'plasma': 'linear-gradient(to right, #0d0887, #6a00a8, #b12a90, #e16462, #fca636, #f0f921)',
359
+ 'YlOrRd': 'linear-gradient(to right, #ffffcc, #ffeda0, #fed976, #feb24c, #fd8d3c, #e31a1c)',
360
+ 'Blues': 'linear-gradient(to right, #f7fbff, #deebf7, #c6dbef, #9ecae1, #6baed6, #2171b5)',
361
+ 'Reds': 'linear-gradient(to right, #fff5f0, #fee0d2, #fcbba1, #fc9272, #fb6a4a, #de2d26)',
362
+ 'Greens': 'linear-gradient(to right, #f7fcf5, #e5f5e0, #c7e9c0, #a1d99b, #74c476, #238b45)',
363
+ 'Oranges': 'linear-gradient(to right, #fff5eb, #fee6ce, #fdd0a2, #fdae6b, #fd8d3c, #d94701)',
364
+ 'Purples': 'linear-gradient(to right, #fcfbfd, #efedf5, #dadaeb, #bcbddc, #9e9ac8, #756bb1)',
365
+ 'inferno': 'linear-gradient(to right, #000004, #420a68, #932667, #dd513a, #fca50a, #fcffa4)',
366
+ 'magma': 'linear-gradient(to right, #000004, #3b0f70, #8c2981, #de4968, #fe9f6d, #fcfdbf)',
367
+ 'cividis': 'linear-gradient(to right, #00224e, #123570, #3b496c, #575d6d, #707173, #8a8678)',
368
+ 'coolwarm': 'linear-gradient(to right, #3b4cc0, #688aef, #b7d4f1, #f7f7f7, #f4b2a6, #dc7176, #a50026)',
369
+ 'RdYlBu': 'linear-gradient(to right, #a50026, #d73027, #f46d43, #fdae61, #fee090, #e0f3f8, #abd9e9, #74add1, #4575b4, #313695)',
370
+ 'Spectral': 'linear-gradient(to right, #9e0142, #d53e4f, #f46d43, #fdae61, #fee08b, #e6f598, #abdda4, #66c2a5, #3288bd, #5e4fa2)'
371
+ };
372
+
373
+ function updateColorPreview() {
374
+ const theme = document.getElementById('color_theme').value;
375
+ const preview = document.getElementById('colorPreview');
376
+ const previewText = document.getElementById('colorPreviewText');
377
+
378
+ previewText.textContent = document.getElementById('color_theme').selectedOptions[0].text;
379
+
380
+ if (colorMaps[theme]) {
381
+ preview.style.background = colorMaps[theme];
382
+ } else {
383
+ preview.style.background = colorMaps['viridis'];
384
+ }
385
+ }
386
+
387
+ // Initialize color preview when page loads
388
+ document.addEventListener('DOMContentLoaded', function() {
389
+ updateColorPreview();
390
+ });
391
+ </script>
392
+ </body>
393
+ </html>
templates/index.html CHANGED
@@ -263,19 +263,25 @@
263
  <p>Generate AI-powered air pollution forecasts using Microsoft's Aurora foundation model</p>
264
 
265
  <div style="background: #f8f9ff; padding: 15px; border-radius: 8px; margin: 15px 0; border: 2px solid #e3e7ff;">
266
- <p style="margin-bottom: 10px;"><strong>๐Ÿš€ What is Aurora?</strong></p>
267
  <ul style="margin-left: 20px; color: #666;">
268
- <li>Microsoft's state-of-the-art atmospheric foundation model</li>
269
- <li>Trained on massive amounts of global weather and atmospheric data</li>
270
- <li>Generates multi-step forecasts for air pollution and meteorology</li>
271
- <li>Provides predictions up to 60 hours ahead with 6-hour intervals</li>
 
272
  </ul>
273
  </div>
274
 
275
  {% if aurora_available is defined and aurora_available %}
276
- <a href="{{ url_for('aurora_predict') }}" class="btn" style="background: linear-gradient(135deg, #9b59b6 0%, #8e44ad 100%);">
277
- ๐Ÿ”ฎ Generate Aurora Predictions
278
- </a>
 
 
 
 
 
279
  {% else %}
280
  <button class="btn" disabled style="background: #bdc3c7; cursor: not-allowed;">
281
  ๐Ÿ”ฎ Aurora Model Not Available
 
263
  <p>Generate AI-powered air pollution forecasts using Microsoft's Aurora foundation model</p>
264
 
265
  <div style="background: #f8f9ff; padding: 15px; border-radius: 8px; margin: 15px 0; border: 2px solid #e3e7ff;">
266
+ <p style="margin-bottom: 10px;"><strong>๐Ÿš€ Enhanced Aurora Features:</strong></p>
267
  <ul style="margin-left: 20px; color: #666;">
268
+ <li>Uses dual timestamps (T-1 and T) for improved prediction accuracy</li>
269
+ <li>Forward predictions from 1-4 steps (12-48 hours coverage)</li>
270
+ <li>Organized storage with run metadata and easy browsing</li>
271
+ <li>Step-by-step visualization showing temporal evolution</li>
272
+ <li>Full pollution suite: PM1, PM2.5, PM10, Oโ‚ƒ, NOโ‚‚, CO, SOโ‚‚</li>
273
  </ul>
274
  </div>
275
 
276
  {% if aurora_available is defined and aurora_available %}
277
+ <div style="display: flex; gap: 15px; flex-wrap: wrap;">
278
+ <a href="{{ url_for('aurora_predict') }}" class="btn" style="background: linear-gradient(135deg, #9b59b6 0%, #8e44ad 100%);">
279
+ ๐Ÿ”ฎ Generate Aurora Predictions
280
+ </a>
281
+ <a href="{{ url_for('prediction_runs') }}" class="btn" style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);">
282
+ ๐Ÿ“Š Browse Existing Predictions
283
+ </a>
284
+ </div>
285
  {% else %}
286
  <button class="btn" disabled style="background: #bdc3c7; cursor: not-allowed;">
287
  ๐Ÿ”ฎ Aurora Model Not Available
templates/prediction_runs.html ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Aurora Prediction Runs - CAMS Pollution Dashboard</title>
7
+ <style>
8
+ * {
9
+ margin: 0;
10
+ padding: 0;
11
+ box-sizing: border-box;
12
+ }
13
+
14
+ body {
15
+ font-family: 'Arial', sans-serif;
16
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
17
+ min-height: 100vh;
18
+ color: #333;
19
+ }
20
+
21
+ .container {
22
+ max-width: 1200px;
23
+ margin: 0 auto;
24
+ padding: 20px;
25
+ }
26
+
27
+ .header {
28
+ text-align: center;
29
+ margin-bottom: 40px;
30
+ color: white;
31
+ }
32
+
33
+ .header h1 {
34
+ font-size: 2.5em;
35
+ margin-bottom: 10px;
36
+ text-shadow: 2px 2px 4px rgba(0,0,0,0.3);
37
+ }
38
+
39
+ .header p {
40
+ font-size: 1.2em;
41
+ opacity: 0.9;
42
+ }
43
+
44
+ .back-link {
45
+ display: inline-block;
46
+ color: white;
47
+ text-decoration: none;
48
+ padding: 10px 20px;
49
+ background: rgba(255, 255, 255, 0.2);
50
+ border-radius: 25px;
51
+ margin-bottom: 30px;
52
+ transition: background 0.3s ease;
53
+ }
54
+
55
+ .back-link:hover {
56
+ background: rgba(255, 255, 255, 0.3);
57
+ }
58
+
59
+ .runs-container {
60
+ background: rgba(255, 255, 255, 0.95);
61
+ border-radius: 15px;
62
+ padding: 30px;
63
+ box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1);
64
+ backdrop-filter: blur(10px);
65
+ }
66
+
67
+ .no-runs {
68
+ text-align: center;
69
+ padding: 60px 20px;
70
+ color: #666;
71
+ }
72
+
73
+ .no-runs h3 {
74
+ font-size: 1.5em;
75
+ margin-bottom: 15px;
76
+ color: #999;
77
+ }
78
+
79
+ .run-card {
80
+ background: #f8f9fa;
81
+ border-radius: 10px;
82
+ padding: 20px;
83
+ margin-bottom: 20px;
84
+ border-left: 5px solid #667eea;
85
+ transition: transform 0.2s ease, box-shadow 0.2s ease;
86
+ }
87
+
88
+ .run-card:hover {
89
+ transform: translateY(-2px);
90
+ box-shadow: 0 8px 25px rgba(0, 0, 0, 0.1);
91
+ }
92
+
93
+ .run-header {
94
+ display: flex;
95
+ justify-content: between;
96
+ align-items: center;
97
+ margin-bottom: 15px;
98
+ }
99
+
100
+ .run-title {
101
+ font-size: 1.3em;
102
+ font-weight: bold;
103
+ color: #333;
104
+ margin-right: auto;
105
+ }
106
+
107
+ .run-status {
108
+ padding: 5px 15px;
109
+ border-radius: 20px;
110
+ font-size: 0.9em;
111
+ font-weight: bold;
112
+ }
113
+
114
+ .status-available {
115
+ background: #d4edda;
116
+ color: #155724;
117
+ }
118
+
119
+ .status-unavailable {
120
+ background: #f8d7da;
121
+ color: #721c24;
122
+ }
123
+
124
+ .run-details {
125
+ display: grid;
126
+ grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
127
+ gap: 15px;
128
+ margin-bottom: 20px;
129
+ }
130
+
131
+ .detail-item {
132
+ display: flex;
133
+ align-items: center;
134
+ font-size: 0.95em;
135
+ }
136
+
137
+ .detail-icon {
138
+ margin-right: 8px;
139
+ font-size: 1.1em;
140
+ }
141
+
142
+ .detail-label {
143
+ font-weight: bold;
144
+ margin-right: 8px;
145
+ color: #555;
146
+ }
147
+
148
+ .detail-value {
149
+ color: #333;
150
+ }
151
+
152
+ .run-actions {
153
+ display: flex;
154
+ gap: 10px;
155
+ flex-wrap: wrap;
156
+ }
157
+
158
+ .btn {
159
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
160
+ color: white;
161
+ padding: 10px 20px;
162
+ border: none;
163
+ border-radius: 6px;
164
+ font-size: 14px;
165
+ cursor: pointer;
166
+ text-decoration: none;
167
+ display: inline-block;
168
+ transition: transform 0.2s ease, box-shadow 0.2s ease;
169
+ }
170
+
171
+ .btn:hover {
172
+ transform: translateY(-1px);
173
+ box-shadow: 0 4px 15px rgba(102, 126, 234, 0.3);
174
+ }
175
+
176
+ .btn-secondary {
177
+ background: linear-gradient(135deg, #6c757d 0%, #495057 100%);
178
+ }
179
+
180
+ .btn:disabled {
181
+ background: #bdc3c7;
182
+ cursor: not-allowed;
183
+ transform: none;
184
+ }
185
+
186
+ .summary-stats {
187
+ display: grid;
188
+ grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
189
+ gap: 20px;
190
+ margin-bottom: 30px;
191
+ }
192
+
193
+ .stat-card {
194
+ background: white;
195
+ padding: 20px;
196
+ border-radius: 10px;
197
+ text-align: center;
198
+ box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
199
+ }
200
+
201
+ .stat-number {
202
+ font-size: 2em;
203
+ font-weight: bold;
204
+ color: #667eea;
205
+ display: block;
206
+ }
207
+
208
+ .stat-label {
209
+ color: #666;
210
+ font-size: 0.9em;
211
+ margin-top: 5px;
212
+ }
213
+
214
+ @media (max-width: 768px) {
215
+ .container {
216
+ padding: 10px;
217
+ }
218
+
219
+ .runs-container {
220
+ padding: 20px;
221
+ }
222
+
223
+ .run-details {
224
+ grid-template-columns: 1fr;
225
+ }
226
+
227
+ .run-actions {
228
+ flex-direction: column;
229
+ }
230
+
231
+ .btn {
232
+ text-align: center;
233
+ }
234
+ }
235
+ </style>
236
+ </head>
237
+ <body>
238
+ <div class="container">
239
+ <div class="header">
240
+ <h1>๐Ÿ”ฎ Aurora Prediction Runs</h1>
241
+ <p>Browse and manage your atmospheric prediction runs</p>
242
+ </div>
243
+
244
+ <a href="{{ url_for('index') }}" class="back-link">โ† Back to Dashboard</a>
245
+
246
+ <div class="runs-container">
247
+ {% if runs %}
248
+ <!-- Summary Statistics -->
249
+ <div class="summary-stats">
250
+ <div class="stat-card">
251
+ <span class="stat-number">{{ runs|length }}</span>
252
+ <div class="stat-label">Total Runs</div>
253
+ </div>
254
+ <div class="stat-card">
255
+ <span class="stat-number">{{ runs|selectattr("available")|list|length }}</span>
256
+ <div class="stat-label">Available</div>
257
+ </div>
258
+ <div class="stat-card">
259
+ <span class="stat-number">{{ (runs|map(attribute="steps")|sum) or 0 }}</span>
260
+ <div class="stat-label">Total Steps</div>
261
+ </div>
262
+ <div class="stat-card">
263
+ <span class="stat-number">{{ (runs|map(attribute="time_coverage_hours")|sum) or 0 }}h</span>
264
+ <div class="stat-label">Total Coverage</div>
265
+ </div>
266
+ </div>
267
+
268
+ <!-- Prediction Runs List -->
269
+ {% for run in runs %}
270
+ <div class="run-card">
271
+ <div class="run-header">
272
+ <div class="run-title">๐Ÿ“… {{ run.date }}</div>
273
+ <div class="run-status {{ 'status-available' if run.available else 'status-unavailable' }}">
274
+ {{ 'โœ… Available' if run.available else 'โŒ Missing' }}
275
+ </div>
276
+ </div>
277
+
278
+ <div class="run-details">
279
+ <div class="detail-item">
280
+ <span class="detail-icon">๐Ÿ•</span>
281
+ <span class="detail-label">Run Time:</span>
282
+ <span class="detail-value">{{ run.run_timestamp[:8] }} {{ run.run_timestamp[9:].replace('_', ':') }}</span>
283
+ </div>
284
+ <div class="detail-item">
285
+ <span class="detail-icon">๐Ÿ“Š</span>
286
+ <span class="detail-label">Steps:</span>
287
+ <span class="detail-value">{{ run.steps }} steps</span>
288
+ </div>
289
+ <div class="detail-item">
290
+ <span class="detail-icon">โฑ๏ธ</span>
291
+ <span class="detail-label">Coverage:</span>
292
+ <span class="detail-value">{{ run.time_coverage_hours }}h forward</span>
293
+ </div>
294
+ <div class="detail-item">
295
+ <span class="detail-icon">๐Ÿ“ฅ</span>
296
+ <span class="detail-label">Input Times:</span>
297
+ <span class="detail-value">{{ run.input_times|join(", ") }}</span>
298
+ </div>
299
+ </div>
300
+
301
+ <div class="run-actions">
302
+ {% if run.available %}
303
+ <a href="{{ url_for('aurora_variables', run_dir=run.relative_path) }}"
304
+ class="btn">
305
+ ๐Ÿ“Š View Variables
306
+ </a>
307
+ <a href="{{ url_for('download_prediction_netcdf', filename=run.relative_path) }}"
308
+ class="btn btn-secondary">
309
+ ๐Ÿ’พ Download Files
310
+ </a>
311
+ {% else %}
312
+ <button class="btn" disabled>
313
+ โŒ File Not Available
314
+ </button>
315
+ {% endif %}
316
+
317
+ <a href="{{ url_for('aurora_predict') }}" class="btn btn-secondary">
318
+ ๐Ÿ”„ Create New Run
319
+ </a>
320
+ </div>
321
+ </div>
322
+ {% endfor %}
323
+
324
+ {% else %}
325
+ <div class="no-runs">
326
+ <h3>๐Ÿ”ฎ No Aurora Prediction Runs Found</h3>
327
+ <p>You haven't created any Aurora predictions yet.</p>
328
+ <p style="margin-top: 20px;">
329
+ <a href="{{ url_for('aurora_predict') }}" class="btn">
330
+ ๐Ÿš€ Create Your First Prediction
331
+ </a>
332
+ </p>
333
+ </div>
334
+ {% endif %}
335
+ </div>
336
+ </div>
337
+ </body>
338
+ </html>
test_aurora_fix.py DELETED
@@ -1,61 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Quick test for Aurora pipeline generator fix
4
- """
5
-
6
- import sys
7
- import os
8
- sys.path.append('.')
9
-
10
- def test_aurora_fix():
11
- """Test the Aurora pipeline generator fix"""
12
- print("๐Ÿงช Testing Aurora Pipeline Generator Fix")
13
- print("=" * 45)
14
-
15
- try:
16
- from aurora_pipeline import AuroraPipeline
17
- print("โœ… Aurora pipeline imported successfully")
18
-
19
- # Initialize pipeline
20
- pipeline = AuroraPipeline()
21
- print("โœ… Pipeline initialized")
22
-
23
- # Test with minimal configuration
24
- date = '2022-10-14'
25
- steps = 1
26
-
27
- print(f"\n๐Ÿš€ Running test prediction:")
28
- print(f" ๐Ÿ“… Date: {date}")
29
- print(f" ๐Ÿ”„ Steps: {steps}")
30
-
31
- result = pipeline.run_pipeline(date, steps=steps)
32
-
33
- print("โœ… SUCCESS! Aurora pipeline completed without errors")
34
-
35
- if isinstance(result, dict):
36
- print(f"๐Ÿ“Š Result contains: {list(result.keys())}")
37
-
38
- # Check if we have expected outputs
39
- if 'air_pollution' in result:
40
- pollution_data = result['air_pollution']
41
- print(f"๐ŸŒฌ๏ธ Air pollution predictions: {pollution_data.shape if hasattr(pollution_data, 'shape') else type(pollution_data)}")
42
-
43
- if 'metadata' in result:
44
- metadata = result['metadata']
45
- print(f"๐Ÿ“‹ Metadata: {metadata}")
46
-
47
- print("\n๐ŸŽ‰ Aurora pipeline is now working correctly!")
48
-
49
- except Exception as e:
50
- print(f"โŒ Error during test: {e}")
51
- import traceback
52
- print("\n๐Ÿ” Full traceback:")
53
- traceback.print_exc()
54
-
55
- print("\n๐Ÿ’ก Potential solutions:")
56
- print("1. Check if CAMS data exists for the test date")
57
- print("2. Verify Aurora model downloaded correctly")
58
- print("3. Ensure all dependencies are installed")
59
-
60
- if __name__ == "__main__":
61
- test_aurora_fix()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test_cpu_mode.py DELETED
@@ -1,124 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- CPU-friendly test for local development without GPU requirements
4
- """
5
-
6
- def test_system_readiness():
7
- """Test if the basic system is ready for local development"""
8
- print("๐Ÿ”ฌ Testing System for Local Development")
9
- print("=" * 50)
10
-
11
- issues = []
12
-
13
- # Test 1: Basic Python imports
14
- try:
15
- import sys
16
- print(f"โœ… Python version: {sys.version.split()[0]}")
17
- except Exception as e:
18
- issues.append(f"Python: {e}")
19
-
20
- # Test 2: Core dependencies
21
- try:
22
- import numpy as np
23
- print(f"โœ… NumPy: {np.__version__}")
24
- except ImportError:
25
- issues.append("NumPy not found")
26
-
27
- try:
28
- import xarray as xr
29
- print(f"โœ… xarray: {xr.__version__}")
30
- except ImportError:
31
- issues.append("xarray not found")
32
-
33
- try:
34
- import matplotlib
35
- print(f"โœ… Matplotlib: {matplotlib.__version__}")
36
- except ImportError:
37
- issues.append("Matplotlib not found")
38
-
39
- try:
40
- import flask
41
- print(f"โœ… Flask: {flask.__version__}")
42
- except ImportError:
43
- issues.append("Flask not found")
44
-
45
- # Test 3: Optional Aurora dependencies
46
- aurora_ready = True
47
- try:
48
- import torch
49
- print(f"โœ… PyTorch: {torch.__version__}")
50
- print(f" CUDA available: {torch.cuda.is_available()}")
51
- print(f" CPU cores: {torch.get_num_threads()}")
52
- except ImportError:
53
- print("โš ๏ธ PyTorch not found (Aurora unavailable)")
54
- aurora_ready = False
55
-
56
- try:
57
- from huggingface_hub import hf_hub_download
58
- print("โœ… Hugging Face Hub available")
59
- except ImportError:
60
- print("โš ๏ธ Hugging Face Hub not found (Aurora unavailable)")
61
- aurora_ready = False
62
-
63
- # Test 4: Aurora model
64
- try:
65
- from aurora import Batch, Metadata, AuroraAirPollution, rollout
66
- print("โœ… Aurora model available")
67
- except ImportError:
68
- print("โš ๏ธ Aurora model not found (predictions unavailable)")
69
- aurora_ready = False
70
-
71
- # Test 5: Custom modules
72
- try:
73
- from data_processor import NetCDFProcessor
74
- from plot_generator import IndiaMapPlotter
75
- from constants import NETCDF_VARIABLES
76
- print("โœ… Custom modules loaded")
77
- except ImportError as e:
78
- issues.append(f"Custom modules: {e}")
79
-
80
- # Test 6: GPU vs CPU detection
81
- gpu_info = "None"
82
- try:
83
- import subprocess
84
- result = subprocess.run(['nvidia-smi', '--query-gpu=name', '--format=csv,noheader,nounits'],
85
- capture_output=True, text=True, timeout=5)
86
- if result.returncode == 0:
87
- gpu_info = result.stdout.strip()
88
- print(f"๐ŸŽฎ GPU detected: {gpu_info}")
89
- else:
90
- print("๐Ÿ’ป No GPU detected (CPU mode)")
91
- except:
92
- print("๐Ÿ’ป No GPU utilities found (CPU mode)")
93
-
94
- print("\n" + "=" * 50)
95
- print("๐Ÿ“Š System Assessment:")
96
-
97
- if issues:
98
- print("โŒ Critical Issues Found:")
99
- for issue in issues:
100
- print(f" - {issue}")
101
- print("\n๐Ÿ”ง Please install missing dependencies")
102
- else:
103
- print("โœ… Core system ready!")
104
-
105
- if aurora_ready:
106
- print("๐Ÿ”ฎ Aurora ML predictions: Available")
107
- if "CPU mode" in gpu_info or gpu_info == "None":
108
- print("๐Ÿ’ป Recommended: Use CPU mode with max 2 steps")
109
- else:
110
- print("๐ŸŽฎ GPU available: Can use more prediction steps")
111
- else:
112
- print("โš ๏ธ Aurora ML predictions: Not available")
113
- print(" Install: torch, huggingface_hub, aurora-forecast")
114
-
115
- return len(issues) == 0
116
-
117
- if __name__ == "__main__":
118
- success = test_system_readiness()
119
-
120
- if success:
121
- print("\n๐Ÿš€ Ready to run CAMS visualization system!")
122
- print("๐Ÿ’ก Start with: python app.py")
123
- else:
124
- print("\nโŒ Please resolve issues before running the system")