udpated configuration. Test ok
Browse files- Readme.md +78 -3
- SynStOp.py +76 -54
- usage.py +0 -16
Readme.md
CHANGED
|
@@ -29,18 +29,93 @@ dataset_info:
|
|
| 29 |
- name: input
|
| 30 |
dtype: string
|
| 31 |
- name: output
|
| 32 |
-
dtype: string
|
| 33 |
- name: code
|
| 34 |
-
dtype: string
|
| 35 |
- name: res_var
|
| 36 |
dtype: string
|
| 37 |
- name: operation
|
| 38 |
dtype: string
|
|
|
|
|
|
|
| 39 |
splits:
|
|
|
|
|
|
|
|
|
|
| 40 |
- name: test
|
|
|
|
| 41 |
num_examples: 14661
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
- name: train
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
---
|
| 45 |
|
| 46 |
# Dataset Card for Small String Operations Dataset
|
|
|
|
| 29 |
- name: input
|
| 30 |
dtype: string
|
| 31 |
- name: output
|
| 32 |
+
dtype: string
|
| 33 |
- name: code
|
| 34 |
+
dtype: string
|
| 35 |
- name: res_var
|
| 36 |
dtype: string
|
| 37 |
- name: operation
|
| 38 |
dtype: string
|
| 39 |
+
- name: id
|
| 40 |
+
dtype: int32
|
| 41 |
splits:
|
| 42 |
+
- name: train
|
| 43 |
+
num_bytes: 3222948
|
| 44 |
+
num_examples: 33939
|
| 45 |
- name: test
|
| 46 |
+
num_bytes: 1392252
|
| 47 |
num_examples: 14661
|
| 48 |
+
download_size: 1178254
|
| 49 |
+
dataset_size: 4615200
|
| 50 |
+
- config_name: small10
|
| 51 |
+
features:
|
| 52 |
+
- name: input
|
| 53 |
+
dtype: string
|
| 54 |
+
- name: output
|
| 55 |
+
dtype: string
|
| 56 |
+
- name: code
|
| 57 |
+
dtype: string
|
| 58 |
+
- name: res_var
|
| 59 |
+
dtype: string
|
| 60 |
+
- name: operation
|
| 61 |
+
dtype: string
|
| 62 |
+
- name: id
|
| 63 |
+
dtype: int32
|
| 64 |
+
splits:
|
| 65 |
- name: train
|
| 66 |
+
num_bytes: 956996
|
| 67 |
+
num_examples: 11313
|
| 68 |
+
- name: test
|
| 69 |
+
num_bytes: 413404
|
| 70 |
+
num_examples: 4887
|
| 71 |
+
download_size: 312419
|
| 72 |
+
dataset_size: 1370400
|
| 73 |
+
- config_name: small15
|
| 74 |
+
features:
|
| 75 |
+
- name: input
|
| 76 |
+
dtype: string
|
| 77 |
+
- name: output
|
| 78 |
+
dtype: string
|
| 79 |
+
- name: code
|
| 80 |
+
dtype: string
|
| 81 |
+
- name: res_var
|
| 82 |
+
dtype: string
|
| 83 |
+
- name: operation
|
| 84 |
+
dtype: string
|
| 85 |
+
- name: id
|
| 86 |
+
dtype: int32
|
| 87 |
+
splits:
|
| 88 |
+
- name: train
|
| 89 |
+
num_bytes: 1074316
|
| 90 |
+
num_examples: 11313
|
| 91 |
+
- name: test
|
| 92 |
+
num_bytes: 464084
|
| 93 |
+
num_examples: 4887
|
| 94 |
+
download_size: 393420
|
| 95 |
+
dataset_size: 1538400
|
| 96 |
+
- config_name: small20
|
| 97 |
+
features:
|
| 98 |
+
- name: input
|
| 99 |
+
dtype: string
|
| 100 |
+
- name: output
|
| 101 |
+
dtype: string
|
| 102 |
+
- name: code
|
| 103 |
+
dtype: string
|
| 104 |
+
- name: res_var
|
| 105 |
+
dtype: string
|
| 106 |
+
- name: operation
|
| 107 |
+
dtype: string
|
| 108 |
+
- name: id
|
| 109 |
+
dtype: int32
|
| 110 |
+
splits:
|
| 111 |
+
- name: train
|
| 112 |
+
num_bytes: 1191636
|
| 113 |
+
num_examples: 11313
|
| 114 |
+
- name: test
|
| 115 |
+
num_bytes: 514764
|
| 116 |
+
num_examples: 4887
|
| 117 |
+
download_size: 472415
|
| 118 |
+
dataset_size: 1706400
|
| 119 |
---
|
| 120 |
|
| 121 |
# Dataset Card for Small String Operations Dataset
|
SynStOp.py
CHANGED
|
@@ -35,11 +35,42 @@ year={2023}
|
|
| 35 |
# TODO: Add description of the dataset here
|
| 36 |
# You can copy an official description
|
| 37 |
_DESCRIPTION = """\
|
| 38 |
-
Minimal dataset for intended for LM development and testing using python string operations.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
"""
|
| 40 |
|
| 41 |
# TODO: Add a link to an official homepage for the dataset here
|
| 42 |
-
_HOMEPAGE = ""
|
| 43 |
|
| 44 |
# TODO: Add the licence for the dataset here if you can find it
|
| 45 |
_LICENSE = "Apache 2.0 License"
|
|
@@ -48,19 +79,44 @@ _LICENSE = "Apache 2.0 License"
|
|
| 48 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 49 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 50 |
_URLS = {
|
| 51 |
-
"small":
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
}
|
| 60 |
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
| 63 |
-
class
|
| 64 |
"""TODO: Short description of my dataset."""
|
| 65 |
|
| 66 |
VERSION = datasets.Version("0.0.1")
|
|
@@ -72,31 +128,24 @@ class StopDataset(datasets.GeneratorBasedBuilder):
|
|
| 72 |
# You will be able to load one or the other configurations in the following list with
|
| 73 |
# data = datasets.load_dataset('my_dataset', 'small')
|
| 74 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
| 75 |
-
BUILDER_CONFIGS = [
|
| 76 |
-
|
| 77 |
-
datasets.BuilderConfig(name="small[filter]", version=VERSION, description="Small string operations dataset with string slices only. [] allows to specify a comma separated list of filters on the length (i.e. l=X) and operations (i.e. o=y)"),
|
| 78 |
-
|
| 79 |
-
]
|
| 80 |
|
| 81 |
DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 82 |
|
| 83 |
def _info(self):
|
| 84 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
| 85 |
-
|
| 86 |
-
features = datasets.Features(
|
| 87 |
{
|
| 88 |
"input": datasets.Value("string"),
|
| 89 |
"output": datasets.Value("string"),
|
| 90 |
"code": datasets.Value("string"),
|
| 91 |
"res_var": datasets.Value("string"),
|
| 92 |
-
"operation": datasets.Value("string")
|
|
|
|
| 93 |
# These are the features of your dataset like images, labels ...
|
| 94 |
}
|
| 95 |
)
|
| 96 |
-
self._init_filters(self.config.name[len("small"):].strip("[]").split(","))
|
| 97 |
-
self.config.name= self.config.name[:len("small")]
|
| 98 |
-
else:
|
| 99 |
-
raise NotImplementedError()
|
| 100 |
return datasets.DatasetInfo(
|
| 101 |
# This is the description that will appear on the datasets page.
|
| 102 |
description=_DESCRIPTION,
|
|
@@ -112,18 +161,6 @@ class StopDataset(datasets.GeneratorBasedBuilder):
|
|
| 112 |
# Citation for the dataset
|
| 113 |
citation=_CITATION,
|
| 114 |
)
|
| 115 |
-
|
| 116 |
-
def _init_filters(self, filters):
|
| 117 |
-
self.filter_operations = []
|
| 118 |
-
self.filter_len = []
|
| 119 |
-
for filter in filters:
|
| 120 |
-
if filter =="": continue
|
| 121 |
-
k, v = filter.split("=")
|
| 122 |
-
if k=="l":
|
| 123 |
-
self.filter_len.append(int(v))
|
| 124 |
-
elif k=="o":
|
| 125 |
-
self.filter_operations.append(re.compile(v))
|
| 126 |
-
|
| 127 |
def _split_generators(self, dl_manager):
|
| 128 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
| 129 |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
|
@@ -131,16 +168,14 @@ class StopDataset(datasets.GeneratorBasedBuilder):
|
|
| 131 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
| 132 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 133 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 134 |
-
urls =
|
| 135 |
-
if len(self.filter_len)>0:
|
| 136 |
-
urls = [url for url in urls if any([f"stop_{str(len)}_t" in url for len in self.filter_len])]
|
| 137 |
data_dir = dl_manager.download_and_extract(urls)
|
| 138 |
return [
|
| 139 |
datasets.SplitGenerator(
|
| 140 |
name=datasets.Split.TRAIN,
|
| 141 |
# These kwargs will be passed to _generate_examples
|
| 142 |
gen_kwargs={
|
| 143 |
-
"filepath":
|
| 144 |
"split": "train",
|
| 145 |
},
|
| 146 |
),
|
|
@@ -149,22 +184,12 @@ class StopDataset(datasets.GeneratorBasedBuilder):
|
|
| 149 |
name=datasets.Split.TEST,
|
| 150 |
# These kwargs will be passed to _generate_examples
|
| 151 |
gen_kwargs={
|
| 152 |
-
"filepath":
|
| 153 |
"split": "test",
|
| 154 |
},
|
| 155 |
),
|
| 156 |
]
|
| 157 |
|
| 158 |
-
def _match_operations_filter(self, operation):
|
| 159 |
-
if self.filter_operations is not None:
|
| 160 |
-
matches = False
|
| 161 |
-
for filter in self.filter_operations:
|
| 162 |
-
if filter.matches(operation):
|
| 163 |
-
matches = True
|
| 164 |
-
break
|
| 165 |
-
return matches
|
| 166 |
-
else: return True
|
| 167 |
-
|
| 168 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 169 |
def _generate_examples(self, filepath, split):
|
| 170 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
|
@@ -176,10 +201,6 @@ class StopDataset(datasets.GeneratorBasedBuilder):
|
|
| 176 |
for ix, data in enumerate(dataset):
|
| 177 |
|
| 178 |
if self.config.name.startswith("small"):
|
| 179 |
-
|
| 180 |
-
if self._match_operations_filter(data["operation"]):
|
| 181 |
-
continue
|
| 182 |
-
|
| 183 |
# Yields examples as (key, example) tuples
|
| 184 |
id = data["id"] if "id" in data else count
|
| 185 |
count = count + 1
|
|
@@ -188,6 +209,7 @@ class StopDataset(datasets.GeneratorBasedBuilder):
|
|
| 188 |
"output": data["output"],
|
| 189 |
"code": data["code"],
|
| 190 |
"res_var": data["res_var"],
|
|
|
|
| 191 |
"operation": data["operation"]
|
| 192 |
}
|
| 193 |
else:
|
|
|
|
| 35 |
# TODO: Add description of the dataset here
|
| 36 |
# You can copy an official description
|
| 37 |
_DESCRIPTION = """\
|
| 38 |
+
Minimal dataset for intended for LM development and testing using python string operations.
|
| 39 |
+
The dataset is created by running different one line python string operations on random strings
|
| 40 |
+
The idea is, that transformer implementation can learn the string operations and that this task is a good
|
| 41 |
+
proxy tasks for other transformer operations on real languages and real tasks. Consequently, the
|
| 42 |
+
data set is small and can be used in the development process without large scale infrastructures.
|
| 43 |
+
|
| 44 |
+
There are different configurations for the data set.
|
| 45 |
+
|
| 46 |
+
- `small`: contains below 50k instances of various string length and only contains slicing operations, i.e. all python operations expressable with `s[i:j:s]` (which also includes string reversal).
|
| 47 |
+
- you can further choose different subsets according to either length or the kind of operation
|
| 48 |
+
- `small10`: like small, but only strings to length 10
|
| 49 |
+
- `small15`: like small, but only strings to length 15
|
| 50 |
+
- `small20`: like small, but only strings to length 20
|
| 51 |
+
|
| 52 |
+
The fields have the following meaning:
|
| 53 |
+
|
| 54 |
+
- `input`: input string, i.e. the string and the string operation
|
| 55 |
+
- `output`: output of the string operation
|
| 56 |
+
- `code`: code for running the string operation in python,
|
| 57 |
+
- `res_var`: name of the result variable
|
| 58 |
+
- `operation`: kind of operation:
|
| 59 |
+
- `step_x` for `s[::x]`
|
| 60 |
+
- `char_at_x` for `s[x]`
|
| 61 |
+
- `slice_x:y` for `s[x:y]`
|
| 62 |
+
- `slice_step_x:y:z` for `s[x:y:z]`
|
| 63 |
+
- `slice_reverse_i:j:k` for `s[i:i+j][::k]`
|
| 64 |
+
|
| 65 |
+
Siblings of `data` contain additional metadata information about the dataset.
|
| 66 |
+
|
| 67 |
+
- `prompt` describes possible prompts based on that data splitted into input prompts / output prompts
|
| 68 |
+
|
| 69 |
+
|
| 70 |
"""
|
| 71 |
|
| 72 |
# TODO: Add a link to an official homepage for the dataset here
|
| 73 |
+
_HOMEPAGE = "https://huggingface.co/PaDaS-Lab/SynStOp"
|
| 74 |
|
| 75 |
# TODO: Add the licence for the dataset here if you can find it
|
| 76 |
_LICENSE = "Apache 2.0 License"
|
|
|
|
| 79 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 80 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 81 |
_URLS = {
|
| 82 |
+
"small": {
|
| 83 |
+
"train": ["./small/stop_10_train.json.gz", "./small/stop_20_train.json.gz", "./small/stop_15_train.json.gz",],
|
| 84 |
+
"test": ["./small/stop_10_test.json.gz", "./small/stop_20_test.json.gz", "./small/stop_15_test.json.gz",]
|
| 85 |
+
},
|
| 86 |
+
"small15": {
|
| 87 |
+
"train": [ "./small/stop_15_train.json.gz",],
|
| 88 |
+
"test": [ "./small/stop_15_test.json.gz",]
|
| 89 |
+
},
|
| 90 |
+
"small10": {
|
| 91 |
+
"train": ["./small/stop_10_train.json.gz"],
|
| 92 |
+
"test": ["./small/stop_10_test.json.gz"]
|
| 93 |
+
},
|
| 94 |
+
"small20": {
|
| 95 |
+
"train": [ "./small/stop_20_train.json.gz"],
|
| 96 |
+
"test": [ "./small/stop_20_test.json.gz"]
|
| 97 |
+
}
|
| 98 |
}
|
| 99 |
|
| 100 |
|
| 101 |
+
class SynStOpDatasetConfig(datasets.BuilderConfig):
|
| 102 |
+
|
| 103 |
+
def __init__(self, subset="small", length=(10,15,20), **kwargs):
|
| 104 |
+
"""BuilderConfig for SynStOpDatasetConfig.
|
| 105 |
+
Args:
|
| 106 |
+
**kwargs: keyword arguments forwarded to super.
|
| 107 |
+
"""
|
| 108 |
+
super(SynStOpDatasetConfig, self).__init__(**kwargs)
|
| 109 |
+
self.subset = subset
|
| 110 |
+
self.length = length
|
| 111 |
+
self.files = {
|
| 112 |
+
"train": ["./{subset}".format(subset=subset) + "/stop_{length}_train.json.gz".format(length=length) for length in length],
|
| 113 |
+
"test": ["./{subset}".format(subset=subset) + "/stop_{length}_test.json.gz".format(length=length) for length in length],
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
|
| 118 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
| 119 |
+
class SynStOpDataset(datasets.GeneratorBasedBuilder):
|
| 120 |
"""TODO: Short description of my dataset."""
|
| 121 |
|
| 122 |
VERSION = datasets.Version("0.0.1")
|
|
|
|
| 128 |
# You will be able to load one or the other configurations in the following list with
|
| 129 |
# data = datasets.load_dataset('my_dataset', 'small')
|
| 130 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
| 131 |
+
BUILDER_CONFIGS = [SynStOpDatasetConfig(name="small", length=(10,15,20),version=VERSION, description="Small set of string operations with string slices only")] +\
|
| 132 |
+
[SynStOpDatasetConfig(name=f"small{l1}", length=(l1,), version=datasets.Version("0.0.1"), description="Small set of string operations with string slices only") for l1 in [10,15, 20]]
|
|
|
|
|
|
|
|
|
|
| 133 |
|
| 134 |
DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 135 |
|
| 136 |
def _info(self):
|
| 137 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
| 138 |
+
features = datasets.Features(
|
|
|
|
| 139 |
{
|
| 140 |
"input": datasets.Value("string"),
|
| 141 |
"output": datasets.Value("string"),
|
| 142 |
"code": datasets.Value("string"),
|
| 143 |
"res_var": datasets.Value("string"),
|
| 144 |
+
"operation": datasets.Value("string"),
|
| 145 |
+
"id": datasets.Value("int32"),
|
| 146 |
# These are the features of your dataset like images, labels ...
|
| 147 |
}
|
| 148 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
return datasets.DatasetInfo(
|
| 150 |
# This is the description that will appear on the datasets page.
|
| 151 |
description=_DESCRIPTION,
|
|
|
|
| 161 |
# Citation for the dataset
|
| 162 |
citation=_CITATION,
|
| 163 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
def _split_generators(self, dl_manager):
|
| 165 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
| 166 |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
|
|
|
| 168 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
| 169 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 170 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 171 |
+
urls = self.config.files
|
|
|
|
|
|
|
| 172 |
data_dir = dl_manager.download_and_extract(urls)
|
| 173 |
return [
|
| 174 |
datasets.SplitGenerator(
|
| 175 |
name=datasets.Split.TRAIN,
|
| 176 |
# These kwargs will be passed to _generate_examples
|
| 177 |
gen_kwargs={
|
| 178 |
+
"filepath": data_dir["train"],
|
| 179 |
"split": "train",
|
| 180 |
},
|
| 181 |
),
|
|
|
|
| 184 |
name=datasets.Split.TEST,
|
| 185 |
# These kwargs will be passed to _generate_examples
|
| 186 |
gen_kwargs={
|
| 187 |
+
"filepath": data_dir["test"],
|
| 188 |
"split": "test",
|
| 189 |
},
|
| 190 |
),
|
| 191 |
]
|
| 192 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 194 |
def _generate_examples(self, filepath, split):
|
| 195 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
|
|
|
| 201 |
for ix, data in enumerate(dataset):
|
| 202 |
|
| 203 |
if self.config.name.startswith("small"):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
# Yields examples as (key, example) tuples
|
| 205 |
id = data["id"] if "id" in data else count
|
| 206 |
count = count + 1
|
|
|
|
| 209 |
"output": data["output"],
|
| 210 |
"code": data["code"],
|
| 211 |
"res_var": data["res_var"],
|
| 212 |
+
"id": id,
|
| 213 |
"operation": data["operation"]
|
| 214 |
}
|
| 215 |
else:
|
usage.py
DELETED
|
@@ -1,16 +0,0 @@
|
|
| 1 |
-
import datasets
|
| 2 |
-
|
| 3 |
-
if __name__=="__main__":
|
| 4 |
-
# load locally from this repo
|
| 5 |
-
ds = datasets.load_dataset("./stop.py", "small")
|
| 6 |
-
|
| 7 |
-
ds.push_to_hub("PaDaS-Lab/stop-small")
|
| 8 |
-
|
| 9 |
-
from datasets import load_dataset
|
| 10 |
-
|
| 11 |
-
dataset = load_dataset("PaDaS-Lab/stop-small")
|
| 12 |
-
print(dataset)
|
| 13 |
-
|
| 14 |
-
# load locally from this repo
|
| 15 |
-
# load locally from this repo
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|