Datasets:
				
			
			
	
			
	
		
			
	
		
		Delete loading script
Browse files- PGLearn-ExtraLarge-Midwest24k.py +0 -393
    	
        PGLearn-ExtraLarge-Midwest24k.py
    DELETED
    
    | @@ -1,393 +0,0 @@ | |
| 1 | 
            -
            from __future__ import annotations
         | 
| 2 | 
            -
            from dataclasses import dataclass
         | 
| 3 | 
            -
            from pathlib import Path
         | 
| 4 | 
            -
            import json
         | 
| 5 | 
            -
            import shutil
         | 
| 6 | 
            -
             | 
| 7 | 
            -
            import datasets as hfd
         | 
| 8 | 
            -
            import h5py
         | 
| 9 | 
            -
            import pgzip as gzip
         | 
| 10 | 
            -
            import pyarrow as pa
         | 
| 11 | 
            -
             | 
| 12 | 
            -
            # ┌──────────────┐
         | 
| 13 | 
            -
            # │   Metadata   │
         | 
| 14 | 
            -
            # └──────────────┘
         | 
| 15 | 
            -
             | 
| 16 | 
            -
            @dataclass
         | 
| 17 | 
            -
            class CaseSizes:
         | 
| 18 | 
            -
                n_bus:    int
         | 
| 19 | 
            -
                n_load:   int
         | 
| 20 | 
            -
                n_gen:    int
         | 
| 21 | 
            -
                n_branch: int
         | 
| 22 | 
            -
             | 
| 23 | 
            -
            CASENAME = "Midwest24k"
         | 
| 24 | 
            -
            SIZES = CaseSizes(n_bus=23643, n_load=11731, n_gen=5646, n_branch=33739)
         | 
| 25 | 
            -
            NUM_SAMPLES = 52699
         | 
| 26 | 
            -
            SPLITFILES = {
         | 
| 27 | 
            -
                "data/SOCOPF/dual.h5.gz": ["data/SOCOPF/dual/xaa", "data/SOCOPF/dual/xab", "data/SOCOPF/dual/xac", "data/SOCOPF/dual/xad"],
         | 
| 28 | 
            -
            }
         | 
| 29 | 
            -
             | 
| 30 | 
            -
            URL = "https://huggingface.co/datasets/PGLearn/PGLearn-ExtraLarge-Midwest24k"
         | 
| 31 | 
            -
            DESCRIPTION = """\
         | 
| 32 | 
            -
            The Midwest24k PGLearn optimal power flow dataset, part of the PGLearn-ExtraLarge collection. \
         | 
| 33 | 
            -
            """
         | 
| 34 | 
            -
            VERSION = hfd.Version("1.0.0")
         | 
| 35 | 
            -
            DEFAULT_CONFIG_DESCRIPTION="""\
         | 
| 36 | 
            -
            This configuration contains input, primal solution, and dual solution data \
         | 
| 37 | 
            -
            for the ACOPF, DCOPF, and SOCOPF formulations on the {case} system. For case data, \
         | 
| 38 | 
            -
            download the case.json.gz file from the `script` branch of the repository. \
         | 
| 39 | 
            -
            https://huggingface.co/datasets/PGLearn/PGLearn-ExtraLarge-Midwest24k/blob/script/case.json.gz
         | 
| 40 | 
            -
            """
         | 
| 41 | 
            -
            USE_ML4OPF_WARNING = """
         | 
| 42 | 
            -
            ================================================================================================
         | 
| 43 | 
            -
              Loading PGLearn-ExtraLarge-Midwest24k through the `datasets.load_dataset` function may be slow.
         | 
| 44 | 
            -
             | 
| 45 | 
            -
              Consider using ML4OPF to directly convert to `torch.Tensor`; for more info see:
         | 
| 46 | 
            -
                https://github.com/AI4OPT/ML4OPF?tab=readme-ov-file#manually-loading-data
         | 
| 47 | 
            -
             | 
| 48 | 
            -
              Or, use `huggingface_hub.snapshot_download` and an HDF5 reader; for more info see:
         | 
| 49 | 
            -
                https://huggingface.co/datasets/PGLearn/PGLearn-ExtraLarge-Midwest24k#downloading-individual-files
         | 
| 50 | 
            -
            ================================================================================================
         | 
| 51 | 
            -
            """
         | 
| 52 | 
            -
            CITATION = """\
         | 
| 53 | 
            -
            @article{klamkinpglearn,
         | 
| 54 | 
            -
              title={{PGLearn - An Open-Source Learning Toolkit for Optimal Power Flow}},
         | 
| 55 | 
            -
              author={Klamkin, Michael and Tanneau, Mathieu and Van Hentenryck, Pascal},
         | 
| 56 | 
            -
              year={2025},
         | 
| 57 | 
            -
            }\
         | 
| 58 | 
            -
            """
         | 
| 59 | 
            -
             | 
| 60 | 
            -
            IS_COMPRESSED = True
         | 
| 61 | 
            -
             | 
| 62 | 
            -
            # ┌──────────────────┐
         | 
| 63 | 
            -
            # │   Formulations   │
         | 
| 64 | 
            -
            # └──────────────────┘
         | 
| 65 | 
            -
             | 
| 66 | 
            -
            def acopf_features(sizes: CaseSizes, primal: bool, dual: bool, meta: bool):
         | 
| 67 | 
            -
                features = {}
         | 
| 68 | 
            -
                if primal: features.update(acopf_primal_features(sizes))
         | 
| 69 | 
            -
                if dual:   features.update(acopf_dual_features(sizes))
         | 
| 70 | 
            -
                if meta:   features.update({f"ACOPF/{k}": v for k, v in META_FEATURES.items()})
         | 
| 71 | 
            -
                return features
         | 
| 72 | 
            -
             | 
| 73 | 
            -
            def dcopf_features(sizes: CaseSizes, primal: bool, dual: bool, meta: bool):
         | 
| 74 | 
            -
                features = {}
         | 
| 75 | 
            -
                if primal: features.update(dcopf_primal_features(sizes))
         | 
| 76 | 
            -
                if dual:   features.update(dcopf_dual_features(sizes))
         | 
| 77 | 
            -
                if meta:   features.update({f"DCOPF/{k}": v for k, v in META_FEATURES.items()})
         | 
| 78 | 
            -
                return features
         | 
| 79 | 
            -
             | 
| 80 | 
            -
            def socopf_features(sizes: CaseSizes, primal: bool, dual: bool, meta: bool):
         | 
| 81 | 
            -
                features = {}
         | 
| 82 | 
            -
                if primal: features.update(socopf_primal_features(sizes))
         | 
| 83 | 
            -
                if dual:   features.update(socopf_dual_features(sizes))
         | 
| 84 | 
            -
                if meta:   features.update({f"SOCOPF/{k}": v for k, v in META_FEATURES.items()})
         | 
| 85 | 
            -
                return features
         | 
| 86 | 
            -
             | 
| 87 | 
            -
            FORMULATIONS_TO_FEATURES = {
         | 
| 88 | 
            -
                # "ACOPF": acopf_features,
         | 
| 89 | 
            -
                "DCOPF": dcopf_features,
         | 
| 90 | 
            -
                "SOCOPF": socopf_features,
         | 
| 91 | 
            -
            }
         | 
| 92 | 
            -
             | 
| 93 | 
            -
            # ┌───────────────────┐
         | 
| 94 | 
            -
            # │   BuilderConfig   │
         | 
| 95 | 
            -
            # └───────────────────┘
         | 
| 96 | 
            -
             | 
| 97 | 
            -
            class PGLearnExtraLargeMidwest24kConfig(hfd.BuilderConfig):
         | 
| 98 | 
            -
                """BuilderConfig for PGLearn-ExtraLarge-Midwest24k. 
         | 
| 99 | 
            -
                By default, primal solution data, metadata, input, casejson, are included.
         | 
| 100 | 
            -
             | 
| 101 | 
            -
                To modify the default configuration, pass attributes of this class to `datasets.load_dataset`:
         | 
| 102 | 
            -
                
         | 
| 103 | 
            -
                Attributes:
         | 
| 104 | 
            -
                    formulations (list[str]): The formulation(s) to include, e.g. ["ACOPF", "DCOPF"]
         | 
| 105 | 
            -
                    primal (bool, optional): Include primal solution data. Defaults to True.
         | 
| 106 | 
            -
                    dual (bool, optional): Include dual solution data. Defaults to False.
         | 
| 107 | 
            -
                    meta (bool, optional): Include metadata. Defaults to True.
         | 
| 108 | 
            -
                    input (bool, optional): Include input data. Defaults to True.
         | 
| 109 | 
            -
                    casejson (bool, optional): Include case.json data. Defaults to True.
         | 
| 110 | 
            -
                """
         | 
| 111 | 
            -
                def __init__(self,
         | 
| 112 | 
            -
                        formulations: list[str],
         | 
| 113 | 
            -
                        primal: bool=True, dual: bool=False, meta: bool=True, input: bool = True, casejson: bool=True,
         | 
| 114 | 
            -
                        compressed: bool=IS_COMPRESSED, **kwargs
         | 
| 115 | 
            -
                    ):
         | 
| 116 | 
            -
                    super(PGLearnExtraLargeMidwest24kConfig, self).__init__(version=VERSION, **kwargs)
         | 
| 117 | 
            -
             | 
| 118 | 
            -
                    self.case = CASENAME
         | 
| 119 | 
            -
                    self.formulations = formulations
         | 
| 120 | 
            -
             | 
| 121 | 
            -
                    self.primal = primal
         | 
| 122 | 
            -
                    self.dual = dual
         | 
| 123 | 
            -
                    self.meta = meta
         | 
| 124 | 
            -
                    self.input = input
         | 
| 125 | 
            -
                    self.casejson = casejson
         | 
| 126 | 
            -
             | 
| 127 | 
            -
                    self.gz_ext = ".gz" if compressed else ""
         | 
| 128 | 
            -
             | 
| 129 | 
            -
                @property
         | 
| 130 | 
            -
                def size(self):
         | 
| 131 | 
            -
                    return SIZES
         | 
| 132 | 
            -
             | 
| 133 | 
            -
                @property
         | 
| 134 | 
            -
                def features(self):
         | 
| 135 | 
            -
                    features = {}
         | 
| 136 | 
            -
                    if self.casejson: features.update(case_features())
         | 
| 137 | 
            -
                    if self.input: features.update(input_features(SIZES))
         | 
| 138 | 
            -
                    for formulation in self.formulations:
         | 
| 139 | 
            -
                        features.update(FORMULATIONS_TO_FEATURES[formulation](SIZES, self.primal, self.dual, self.meta))
         | 
| 140 | 
            -
                    return hfd.Features(features)
         | 
| 141 | 
            -
                
         | 
| 142 | 
            -
                @property
         | 
| 143 | 
            -
                def splits(self):
         | 
| 144 | 
            -
                    splits: dict[hfd.Split, dict[str, str | int]] = {}
         | 
| 145 | 
            -
                    splits["data"] = {
         | 
| 146 | 
            -
                        "name": "data",
         | 
| 147 | 
            -
                        "num_examples": NUM_SAMPLES
         | 
| 148 | 
            -
                    }
         | 
| 149 | 
            -
                    return splits
         | 
| 150 | 
            -
                
         | 
| 151 | 
            -
                @property
         | 
| 152 | 
            -
                def urls(self):
         | 
| 153 | 
            -
                    urls: dict[str, None | str | list] = {
         | 
| 154 | 
            -
                        "case": None, "data": [],
         | 
| 155 | 
            -
                    }
         | 
| 156 | 
            -
             | 
| 157 | 
            -
                    if self.casejson:
         | 
| 158 | 
            -
                        urls["case"] = f"case.json" + self.gz_ext
         | 
| 159 | 
            -
                    else:
         | 
| 160 | 
            -
                        urls.pop("case")
         | 
| 161 | 
            -
             | 
| 162 | 
            -
                    split_names = ["data"]
         | 
| 163 | 
            -
             | 
| 164 | 
            -
                    for split in split_names:
         | 
| 165 | 
            -
                        if self.input: urls[split].append(f"{split}/input.h5" + self.gz_ext)
         | 
| 166 | 
            -
                        for formulation in self.formulations:
         | 
| 167 | 
            -
                            if self.primal:
         | 
| 168 | 
            -
                                filename = f"{split}/{formulation}/primal.h5" + self.gz_ext
         | 
| 169 | 
            -
                                if filename in SPLITFILES: urls[split].append(SPLITFILES[filename])
         | 
| 170 | 
            -
                                else: urls[split].append(filename)
         | 
| 171 | 
            -
                            if self.dual:
         | 
| 172 | 
            -
                                filename = f"{split}/{formulation}/dual.h5" + self.gz_ext
         | 
| 173 | 
            -
                                if filename in SPLITFILES: urls[split].append(SPLITFILES[filename])
         | 
| 174 | 
            -
                                else: urls[split].append(filename)
         | 
| 175 | 
            -
                            if self.meta:
         | 
| 176 | 
            -
                                filename = f"{split}/{formulation}/meta.h5" + self.gz_ext
         | 
| 177 | 
            -
                                if filename in SPLITFILES: urls[split].append(SPLITFILES[filename])
         | 
| 178 | 
            -
                                else: urls[split].append(filename)
         | 
| 179 | 
            -
                    return urls
         | 
| 180 | 
            -
             | 
| 181 | 
            -
            # ┌────────────────────┐
         | 
| 182 | 
            -
            # │   DatasetBuilder   │
         | 
| 183 | 
            -
            # └────────────────────┘
         | 
| 184 | 
            -
             | 
| 185 | 
            -
            class PGLearnExtraLargeMidwest24k(hfd.ArrowBasedBuilder):
         | 
| 186 | 
            -
                """DatasetBuilder for PGLearn-ExtraLarge-Midwest24k.
         | 
| 187 | 
            -
                The main interface is `datasets.load_dataset` with `trust_remote_code=True`, e.g.
         | 
| 188 | 
            -
             | 
| 189 | 
            -
                ```python
         | 
| 190 | 
            -
                from datasets import load_dataset
         | 
| 191 | 
            -
                ds = load_dataset("PGLearn/PGLearn-ExtraLarge-Midwest24k", trust_remote_code=True,
         | 
| 192 | 
            -
                    # modify the default configuration by passing kwargs
         | 
| 193 | 
            -
                    formulations=["DCOPF"],
         | 
| 194 | 
            -
                    dual=False,
         | 
| 195 | 
            -
                    meta=False,
         | 
| 196 | 
            -
                )
         | 
| 197 | 
            -
                ```
         | 
| 198 | 
            -
                """
         | 
| 199 | 
            -
             | 
| 200 | 
            -
                DEFAULT_WRITER_BATCH_SIZE = 10000
         | 
| 201 | 
            -
                BUILDER_CONFIG_CLASS = PGLearnExtraLargeMidwest24kConfig
         | 
| 202 | 
            -
                DEFAULT_CONFIG_NAME=CASENAME
         | 
| 203 | 
            -
                BUILDER_CONFIGS = [
         | 
| 204 | 
            -
                    PGLearnExtraLargeMidwest24kConfig(
         | 
| 205 | 
            -
                        name=CASENAME, description=DEFAULT_CONFIG_DESCRIPTION.format(case=CASENAME),
         | 
| 206 | 
            -
                        formulations=list(FORMULATIONS_TO_FEATURES.keys()),
         | 
| 207 | 
            -
                        primal=True, dual=True, meta=True, input=True, casejson=False,
         | 
| 208 | 
            -
                    )
         | 
| 209 | 
            -
                ]
         | 
| 210 | 
            -
             | 
| 211 | 
            -
                def _info(self):
         | 
| 212 | 
            -
                    return hfd.DatasetInfo(
         | 
| 213 | 
            -
                        features=self.config.features, splits=self.config.splits,
         | 
| 214 | 
            -
                        description=DESCRIPTION + self.config.description,
         | 
| 215 | 
            -
                        homepage=URL, citation=CITATION,
         | 
| 216 | 
            -
                    )
         | 
| 217 | 
            -
             | 
| 218 | 
            -
                def _split_generators(self, dl_manager: hfd.DownloadManager):
         | 
| 219 | 
            -
                    hfd.logging.get_logger().warning(USE_ML4OPF_WARNING)
         | 
| 220 | 
            -
             | 
| 221 | 
            -
                    filepaths = dl_manager.download_and_extract(self.config.urls)
         | 
| 222 | 
            -
             | 
| 223 | 
            -
                    splits: list[hfd.SplitGenerator] = []
         | 
| 224 | 
            -
                    splits.append(hfd.SplitGenerator(
         | 
| 225 | 
            -
                        name=hfd.Split("data"),
         | 
| 226 | 
            -
                        gen_kwargs=dict(case_file=filepaths.get("case", None), data_files=tuple(filepaths["data"]), n_samples=NUM_SAMPLES),
         | 
| 227 | 
            -
                    ))
         | 
| 228 | 
            -
                    return splits
         | 
| 229 | 
            -
             | 
| 230 | 
            -
                def _generate_tables(self, case_file: str | None, data_files: tuple[hfd.utils.track.tracked_str | list[hfd.utils.track.tracked_str]], n_samples: int):
         | 
| 231 | 
            -
                    case_data: str | None = json.dumps(json.load(open_maybe_gzip_cat(case_file))) if case_file is not None else None
         | 
| 232 | 
            -
                    data: dict[str, h5py.File] = {}
         | 
| 233 | 
            -
                    for file in data_files:
         | 
| 234 | 
            -
                        v = h5py.File(open_maybe_gzip_cat(file), "r")
         | 
| 235 | 
            -
                        if isinstance(file, list):
         | 
| 236 | 
            -
                            k = "/".join(Path(file[0].get_origin()).parts[-3:-1]).split(".")[0]
         | 
| 237 | 
            -
                        else:
         | 
| 238 | 
            -
                            k = "/".join(Path(file.get_origin()).parts[-2:]).split(".")[0]
         | 
| 239 | 
            -
                        data[k] = v
         | 
| 240 | 
            -
                    for k in list(data.keys()):
         | 
| 241 | 
            -
                        if "/input" in k: data[k.split("/", 1)[1]] = data.pop(k)
         | 
| 242 | 
            -
             | 
| 243 | 
            -
                    batch_size = self._writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE
         | 
| 244 | 
            -
                    for i in range(0, n_samples, batch_size):
         | 
| 245 | 
            -
                        effective_batch_size = min(batch_size, n_samples - i)
         | 
| 246 | 
            -
             | 
| 247 | 
            -
                        sample_data = {
         | 
| 248 | 
            -
                            f"{dk}/{k}":
         | 
| 249 | 
            -
                            hfd.features.features.numpy_to_pyarrow_listarray(v[i:i + effective_batch_size, ...])
         | 
| 250 | 
            -
                            for dk, d in data.items() for k, v in d.items() if f"{dk}/{k}" in self.config.features
         | 
| 251 | 
            -
                        }
         | 
| 252 | 
            -
             | 
| 253 | 
            -
                        if case_data is not None:
         | 
| 254 | 
            -
                            sample_data["case/json"] = pa.array([case_data] * effective_batch_size)
         | 
| 255 | 
            -
             | 
| 256 | 
            -
                        yield i, pa.Table.from_pydict(sample_data)
         | 
| 257 | 
            -
             | 
| 258 | 
            -
                    for f in data.values():
         | 
| 259 | 
            -
                        f.close()
         | 
| 260 | 
            -
             | 
| 261 | 
            -
            # ┌──────────────┐
         | 
| 262 | 
            -
            # │   Features   │
         | 
| 263 | 
            -
            # └──────────────┘
         | 
| 264 | 
            -
             | 
| 265 | 
            -
            FLOAT_TYPE = "float32"
         | 
| 266 | 
            -
            INT_TYPE = "int64"
         | 
| 267 | 
            -
            BOOL_TYPE = "bool"
         | 
| 268 | 
            -
            STRING_TYPE = "string"
         | 
| 269 | 
            -
             | 
| 270 | 
            -
            def case_features():
         | 
| 271 | 
            -
                # FIXME: better way to share schema of case data -- need to treat jagged arrays
         | 
| 272 | 
            -
                return {
         | 
| 273 | 
            -
                    "case/json": hfd.Value(STRING_TYPE),
         | 
| 274 | 
            -
                }
         | 
| 275 | 
            -
             | 
| 276 | 
            -
            META_FEATURES = {
         | 
| 277 | 
            -
                "meta/seed":                   hfd.Value(dtype=INT_TYPE),
         | 
| 278 | 
            -
                "meta/formulation":            hfd.Value(dtype=STRING_TYPE),
         | 
| 279 | 
            -
                "meta/primal_objective_value": hfd.Value(dtype=FLOAT_TYPE),
         | 
| 280 | 
            -
                "meta/dual_objective_value":   hfd.Value(dtype=FLOAT_TYPE),
         | 
| 281 | 
            -
                "meta/primal_status":          hfd.Value(dtype=STRING_TYPE),
         | 
| 282 | 
            -
                "meta/dual_status":            hfd.Value(dtype=STRING_TYPE),
         | 
| 283 | 
            -
                "meta/termination_status":     hfd.Value(dtype=STRING_TYPE),
         | 
| 284 | 
            -
                "meta/build_time":             hfd.Value(dtype=FLOAT_TYPE),
         | 
| 285 | 
            -
                "meta/extract_time":           hfd.Value(dtype=FLOAT_TYPE),
         | 
| 286 | 
            -
                "meta/solve_time":             hfd.Value(dtype=FLOAT_TYPE),
         | 
| 287 | 
            -
            }
         | 
| 288 | 
            -
             | 
| 289 | 
            -
            def input_features(sizes: CaseSizes):
         | 
| 290 | 
            -
                return {
         | 
| 291 | 
            -
                    "input/pd":            hfd.Sequence(length=sizes.n_load,   feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 292 | 
            -
                    "input/qd":            hfd.Sequence(length=sizes.n_load,   feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 293 | 
            -
                    "input/gen_status":    hfd.Sequence(length=sizes.n_gen,    feature=hfd.Value(dtype=BOOL_TYPE)),
         | 
| 294 | 
            -
                    "input/branch_status": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=BOOL_TYPE)),
         | 
| 295 | 
            -
                    "input/seed":          hfd.Value(dtype=INT_TYPE),
         | 
| 296 | 
            -
                }
         | 
| 297 | 
            -
             | 
| 298 | 
            -
            def acopf_primal_features(sizes: CaseSizes):
         | 
| 299 | 
            -
                return {
         | 
| 300 | 
            -
                    "ACOPF/primal/vm": hfd.Sequence(length=sizes.n_bus,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 301 | 
            -
                    "ACOPF/primal/va": hfd.Sequence(length=sizes.n_bus,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 302 | 
            -
                    "ACOPF/primal/pg": hfd.Sequence(length=sizes.n_gen,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 303 | 
            -
                    "ACOPF/primal/qg": hfd.Sequence(length=sizes.n_gen,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 304 | 
            -
                    "ACOPF/primal/pf": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 305 | 
            -
                    "ACOPF/primal/pt": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 306 | 
            -
                    "ACOPF/primal/qf": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 307 | 
            -
                    "ACOPF/primal/qt": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 308 | 
            -
                }
         | 
| 309 | 
            -
            def acopf_dual_features(sizes: CaseSizes):
         | 
| 310 | 
            -
                return {
         | 
| 311 | 
            -
                    "ACOPF/dual/kcl_p":     hfd.Sequence(length=sizes.n_bus,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 312 | 
            -
                    "ACOPF/dual/kcl_q":     hfd.Sequence(length=sizes.n_bus,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 313 | 
            -
                    "ACOPF/dual/vm":        hfd.Sequence(length=sizes.n_bus,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 314 | 
            -
                    "ACOPF/dual/pg":        hfd.Sequence(length=sizes.n_gen,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 315 | 
            -
                    "ACOPF/dual/qg":        hfd.Sequence(length=sizes.n_gen,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 316 | 
            -
                    "ACOPF/dual/ohm_pf":    hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 317 | 
            -
                    "ACOPF/dual/ohm_pt":    hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 318 | 
            -
                    "ACOPF/dual/ohm_qf":    hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 319 | 
            -
                    "ACOPF/dual/ohm_qt":    hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 320 | 
            -
                    "ACOPF/dual/pf":        hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 321 | 
            -
                    "ACOPF/dual/pt":        hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 322 | 
            -
                    "ACOPF/dual/qf":        hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 323 | 
            -
                    "ACOPF/dual/qt":        hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 324 | 
            -
                    "ACOPF/dual/va_diff":   hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 325 | 
            -
                    "ACOPF/dual/sm_fr":     hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 326 | 
            -
                    "ACOPF/dual/sm_to":     hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 327 | 
            -
                    "ACOPF/dual/slack_bus": hfd.Value(dtype=FLOAT_TYPE),
         | 
| 328 | 
            -
                }
         | 
| 329 | 
            -
            def dcopf_primal_features(sizes: CaseSizes):
         | 
| 330 | 
            -
                return {
         | 
| 331 | 
            -
                    "DCOPF/primal/va": hfd.Sequence(length=sizes.n_bus,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 332 | 
            -
                    "DCOPF/primal/pg": hfd.Sequence(length=sizes.n_gen,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 333 | 
            -
                    "DCOPF/primal/pf": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 334 | 
            -
                }
         | 
| 335 | 
            -
            def dcopf_dual_features(sizes: CaseSizes):
         | 
| 336 | 
            -
                return {
         | 
| 337 | 
            -
                    "DCOPF/dual/kcl_p":     hfd.Sequence(length=sizes.n_bus,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 338 | 
            -
                    "DCOPF/dual/pg":        hfd.Sequence(length=sizes.n_gen,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 339 | 
            -
                    "DCOPF/dual/ohm_pf":    hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 340 | 
            -
                    "DCOPF/dual/pf":        hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 341 | 
            -
                    "DCOPF/dual/va_diff":   hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 342 | 
            -
                    "DCOPF/dual/slack_bus": hfd.Value(dtype=FLOAT_TYPE),
         | 
| 343 | 
            -
                }
         | 
| 344 | 
            -
            def socopf_primal_features(sizes: CaseSizes):
         | 
| 345 | 
            -
                return {
         | 
| 346 | 
            -
                    "SOCOPF/primal/w":  hfd.Sequence(length=sizes.n_bus,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 347 | 
            -
                    "SOCOPF/primal/pg": hfd.Sequence(length=sizes.n_gen,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 348 | 
            -
                    "SOCOPF/primal/qg": hfd.Sequence(length=sizes.n_gen,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 349 | 
            -
                    "SOCOPF/primal/pf": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 350 | 
            -
                    "SOCOPF/primal/pt": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 351 | 
            -
                    "SOCOPF/primal/qf": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 352 | 
            -
                    "SOCOPF/primal/qt": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 353 | 
            -
                    "SOCOPF/primal/wr": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 354 | 
            -
                    "SOCOPF/primal/wi": hfd.Sequence(length=sizes.n_branch, feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 355 | 
            -
                }
         | 
| 356 | 
            -
            def socopf_dual_features(sizes: CaseSizes):
         | 
| 357 | 
            -
                return {
         | 
| 358 | 
            -
                    "SOCOPF/dual/kcl_p":   hfd.Sequence(length=sizes.n_bus,       feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 359 | 
            -
                    "SOCOPF/dual/kcl_q":   hfd.Sequence(length=sizes.n_bus,       feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 360 | 
            -
                    "SOCOPF/dual/w":       hfd.Sequence(length=sizes.n_bus,       feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 361 | 
            -
                    "SOCOPF/dual/pg":      hfd.Sequence(length=sizes.n_gen,       feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 362 | 
            -
                    "SOCOPF/dual/qg":      hfd.Sequence(length=sizes.n_gen,       feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 363 | 
            -
                    "SOCOPF/dual/ohm_pf":  hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 364 | 
            -
                    "SOCOPF/dual/ohm_pt":  hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 365 | 
            -
                    "SOCOPF/dual/ohm_qf":  hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 366 | 
            -
                    "SOCOPF/dual/ohm_qt":  hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 367 | 
            -
                    "SOCOPF/dual/jabr":    hfd.Array2D(shape=(sizes.n_branch, 4), dtype=FLOAT_TYPE),
         | 
| 368 | 
            -
                    "SOCOPF/dual/sm_fr":   hfd.Array2D(shape=(sizes.n_branch, 3), dtype=FLOAT_TYPE),
         | 
| 369 | 
            -
                    "SOCOPF/dual/sm_to":   hfd.Array2D(shape=(sizes.n_branch, 3), dtype=FLOAT_TYPE),
         | 
| 370 | 
            -
                    "SOCOPF/dual/va_diff": hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 371 | 
            -
                    "SOCOPF/dual/wr":      hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 372 | 
            -
                    "SOCOPF/dual/wi":      hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 373 | 
            -
                    "SOCOPF/dual/pf":      hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 374 | 
            -
                    "SOCOPF/dual/pt":      hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 375 | 
            -
                    "SOCOPF/dual/qf":      hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 376 | 
            -
                    "SOCOPF/dual/qt":      hfd.Sequence(length=sizes.n_branch,    feature=hfd.Value(dtype=FLOAT_TYPE)),
         | 
| 377 | 
            -
                }
         | 
| 378 | 
            -
             | 
| 379 | 
            -
            # ┌───────────────┐
         | 
| 380 | 
            -
            # │   Utilities   │
         | 
| 381 | 
            -
            # └───────────────┘
         | 
| 382 | 
            -
             | 
| 383 | 
            -
            def open_maybe_gzip_cat(path: str | list):
         | 
| 384 | 
            -
                if isinstance(path, list):
         | 
| 385 | 
            -
                    dest = Path(path[0]).parent.with_suffix(".h5")
         | 
| 386 | 
            -
                    if not dest.exists():
         | 
| 387 | 
            -
                        with open(dest, "wb") as dest_f:
         | 
| 388 | 
            -
                            for piece in path:
         | 
| 389 | 
            -
                                with open(piece, "rb") as piece_f:
         | 
| 390 | 
            -
                                    shutil.copyfileobj(piece_f, dest_f)
         | 
| 391 | 
            -
                        # shutil.rmtree(Path(piece).parent)
         | 
| 392 | 
            -
                    path = dest.as_posix()
         | 
| 393 | 
            -
                return gzip.open(path, "rb") if path.endswith(".gz") else open(path, "rb")
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
