#!/usr/bin/env python3 """NEO Reader module""" from __future__ import annotations from pathlib import Path from argparse import Namespace, ArgumentParser from pprint import pprint from natsort import natsorted from loguru import logger import numpy as np from torch.utils.data import Dataset try: from .neo_node import NEONode except ImportError: from neo_node import NEONode class NEOReader(Dataset): """ NEO Reader implementation. Reads data from npz files and returns them as a dict. Parameters: - path: Path to the directory containing the npz files. - nodes: List of nodes that are present in the dataset. - handle_missing_data: Modes to handle missing data. Valid options are: - drop: Drop the data point if any of the nodes is missing. - fill_none: Fill the missing data with Nones. Expected directory structure: path/ - node_1/0.npz, ..., N.npz - ... - node_n/0.npz, ..., N.npz Names can be in a different format (i.e. 2022-01-01.npz), but must be consistent and equal across all nodes. """ # The default names and their corresponding types for NEO datasets (1week or 1month) name_to_type = {"AOD": "AerosolOpticalDepth", "BS_ALBEDO": "Albedo", "CHLORA": "Chlorophyll", "CLD_FR": "CloudFraction", "CLD_RD": "CloudParticleRadius", "CLD_WP": "CloudWaterContent", "COT": "CloudOpticalThickness", "CO_M": "CarbonMonoxide", "FIRE": "Fire", "INSOL": "SolarInsolation", "LAI": "LeafAreaIndex", "LSTD": "Temperature", "LSTD_AN": "TemperatureAnomaly", "LSTN": "Temperature", "LSTN_AN": "TemperatureAnomaly", "LWFLUX": "OutgoingLongwaveRadiation", "NDVI": "Vegetation", "NETFLUX": "NetRadiation", "NO2": "NitrogenDioxide", "OZONE": "Ozone", "SNOWC": "SnowCover", "SST": "SeaSurfaceTemperature", "SWFLUX": "ReflectedShortwaveRadiation", "WV": "WaterVapor"} def __init__(self, path: Path, nodes: list[str] | None = None, handle_missing_data: str = "fill_none"): assert path.exists(), f"Provided path '{path}' doesn't exist!" assert handle_missing_data in ("drop", "fill_none"), f"Invalid handle_missing_data mode: {handle_missing_data}" self.path = Path(path).absolute() self.files_per_node, self.file_names = self._build_dataset(handle_missing_data) if nodes is None: nodes = list(self.files_per_node.keys()) logger.debug("No nodes provided. Using all of them as read from the paths.") assert all(isinstance(x, str) for x in nodes), tuple(zip(nodes, (type(x) for x in nodes))) self.node_names = sorted(nodes) logger.info(f"Nodes used in this reader: {self.node_names}") self.nodes = [NEONode(NEOReader.name_to_type[x], x) for x in self.node_names] self.handle_missing_data = handle_missing_data self._images_shape: tuple[int, int, int] | None = None # Public methods and properties @property def images_shape(self) -> tuple[int, int, int]: """Returns a triple of (H, W, C) for all images shape, which are assumed to be consistent for all data points""" if self._images_shape is None: i = 0 while True: for img in self[i][0].values(): if img is not None: self._images_shape = img.shape assert len(self._images_shape) == 3 and self._images_shape[-1] == 1, self._images_shape return self._images_shape i += 1 return self._images_shape # Private methods def _get_all_npz_files(self) -> dict[str, list[Path]]: in_files = {} nodes = [x for x in self.path.iterdir() if x.is_dir()] for node in nodes: dir_name = self.path / node.name items = dir_name.glob("*.npz") items = set(natsorted(items, key=lambda x: x.name)) in_files[node.name] = items assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }" return in_files def _build_dataset_drop(self) -> tuple[dict[str, list[Path]], list[str]]: in_files = self._get_all_npz_files() common = set(x.name for x in next(iter(in_files.values()))) nodes = in_files.keys() for node in nodes: common = common.intersection([f.name for f in in_files[node]]) assert len(common) > 0, f"Node '{node}' made the intersection null" common = natsorted(list(common)) logger.info(f"Found {len(common)} data points for each node ({len(nodes)} nodes).") files_per_node = {node: [self.path / node / x for x in common] for node in nodes} return files_per_node, common def _build_dataset_fill_none(self) -> tuple[dict[str, list[Path]], list[str]]: in_files = self._get_all_npz_files() all_files = set(x.name for x in next(iter(in_files.values()))) nodes = in_files.keys() for node in nodes: all_files = all_files.union([f.name for f in in_files[node]]) all_files = natsorted(list(all_files)) logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).") files_per_node = {node: [] for node in nodes} in_file_names = {node: [f.name for f in in_files[node]] for node in nodes} for node in nodes: for file_name in all_files: file_path = self.path / node / file_name if file_name in in_file_names[node] else None files_per_node[node].append(file_path) return files_per_node, all_files def _build_dataset(self, handle_missing_data: str) -> tuple[dict[str, list[Path]], list[str]]: logger.debug(f"Building dataset from: '{self.path}'") if handle_missing_data == "drop": return self._build_dataset_drop() else: return self._build_dataset_fill_none() def _read_node_data(self, node: NEONode, index: int) -> np.ndarray | None: """Reads the npz data from the disk and transforms it properly""" file_path = self.files_per_node[node.name][index] if file_path is None: return None item = np.load(file_path, allow_pickle=True)["arr_0"] transformed_item = node.load_from_disk(item) return transformed_item # Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.) def __getitem__(self, index: int) -> tuple[dict[str, np.ndarray], str, list[str]]: """Read the data all the desired nodes""" assert isinstance(index, int), type(index) res = {} item_name = self.file_names[index] for node in self.nodes: item = self._read_node_data(node, index) assert self.handle_missing_data == "fill_none" or item is not None, item_name res[node.name] = item return (res, item_name, self.node_names) def __len__(self) -> int: return len(self.files_per_node[self.node_names[0]]) def __str__(self): f_str = "[NGC Npz Reader]" f_str += f"\n - Path: '{self.path}'" f_str += f"\n - Only full data: {self.handle_missing_data == 'drop'}" f_str += f"\n - Nodes ({len(self.nodes)}): {self.nodes}" f_str += f"\n - Length: {len(self)}" return f_str def __repr__(self): return str(self) def get_args() -> Namespace: """cli args""" parser = ArgumentParser() parser.add_argument("dataset_path", type=Path) parser.add_argument("--handle_missing_data", choices=("drop", "fill_none"), default="fill_none") args = parser.parse_args() return args def main(args: Namespace): """main fn""" reader = NEOReader(args.dataset_path, nodes=None, handle_missing_data=args.handle_missing_data) print(reader) print(f"Shape: {reader.images_shape}") data, name, node_names = reader[np.random.randint(len(reader))] print(f"Name: {name}. Nodes: {node_names}") pprint({k: (v.shape if v is not None else None) for k, v in data.items()}) if __name__ == "__main__": main(get_args())