File size: 2,708 Bytes
90eacf5
 
 
 
 
 
 
 
 
2370f7d
 
 
 
 
90eacf5
 
 
64e52f8
 
 
90eacf5
 
 
 
2370f7d
90eacf5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2370f7d
90eacf5
 
 
 
 
2370f7d
 
90eacf5
 
 
 
2370f7d
90eacf5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# time_mmd_multi.py
import os, json, datasets

_DESCRIPTION = (
    "Time-MMD-style multi-config dataset. One config per subfolder under data/."
)
_CITATION = ""


def _config_names():
    here = os.path.dirname(__file__)
    cfg_path = os.path.join(here, "configs.json")
    with open(cfg_path, "r", encoding="utf-8") as f:
        return json.load(f)


class _Cfg(datasets.BuilderConfig):
    def __init__(self, name, **kwargs):
        super().__init__(name=name, version=datasets.Version("1.0.0"), **kwargs)
        self.subdir = name


class TimeMMDMulti(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        _Cfg(name=n, description=f"Subset: {n}") for n in _config_names()
    ]
    DEFAULT_CONFIG_NAME = BUILDER_CONFIGS[0].name if BUILDER_CONFIGS else None

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "batch_x": datasets.Sequence(
                        datasets.Sequence(datasets.Value("float32"))
                    ),
                    "batch_y": datasets.Sequence(
                        datasets.Sequence(datasets.Value("float32"))
                    ),
                    "pred_len": datasets.Value("int32"),
                    "split": datasets.Value("string"),
                    "domain": datasets.Value("string"),
                }
            ),
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        root = os.path.dirname(__file__)
        d = os.path.join(root, "data", self.config.subdir)
        paths = {
            "train": os.path.join(d, "train.jsonl"),
            "validation": os.path.join(d, "validation.jsonl"),
            "test": os.path.join(d, "test.jsonl"),
        }
        gens = []
        for name, p in paths.items():
            if os.path.exists(p):
                gens.append(
                    datasets.SplitGenerator(
                        name=getattr(datasets.Split, name.upper()),
                        gen_kwargs={
                            "filepath": p,
                            "split_name": name,
                            "domain": self.config.subdir,
                        },
                    )
                )
        return gens

    def _generate_examples(self, filepath, split_name, domain):
        with open(filepath, "r", encoding="utf-8") as f:
            for i, line in enumerate(f):
                obj = json.loads(line)
                obj.setdefault("pred_len", len(obj.get("batch_y", [])))
                obj.setdefault("split", split_name)
                obj.setdefault("domain", domain)
                yield i, obj