isbondarev commited on
Commit
714d7cc
·
verified ·
1 Parent(s): 8e3fcc8

Upload tokenizer

Browse files
chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<unk>' + system_message }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'reserved_0' + content + 'reserved_1' }}{% elif message['role'] == 'assistant' %}{{ content }}{% endif %}{% endfor %}
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": true
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenization_index.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for Index, Modify from llama tokenzier."""
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+
28
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
29
+ from transformers.utils import logging
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {}
37
+
38
+ SPIECE_UNDERLINE = "▁"
39
+
40
+
41
+ class IndexTokenizer(PreTrainedTokenizer):
42
+ """
43
+ Construct a Index tokenizer. Based on Byte-Pair-Encoding.
44
+
45
+ Args:
46
+ vocab_file (`str`):
47
+ Path to the vocabulary file.
48
+ """
49
+
50
+ vocab_files_names = VOCAB_FILES_NAMES
51
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
52
+ model_input_names = ["input_ids", "attention_mask"]
53
+
54
+ def __init__(
55
+ self,
56
+ vocab_file,
57
+ unk_token="<unk>",
58
+ bos_token="<s>",
59
+ eos_token="</s>",
60
+ pad_token=None,
61
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
62
+ add_bos_token=False,
63
+ add_eos_token=False,
64
+ decode_with_prefix_space=False,
65
+ clean_up_tokenization_spaces=False,
66
+ legacy=False,
67
+ **kwargs,
68
+ ):
69
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
70
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
71
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
72
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
73
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
74
+ self.legacy = legacy
75
+
76
+ self.vocab_file = vocab_file
77
+ self.add_bos_token = add_bos_token
78
+ self.add_eos_token = add_eos_token
79
+ self.decode_with_prefix_space = decode_with_prefix_space
80
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
81
+ self.sp_model.Load(vocab_file)
82
+ self._no_prefix_space_tokens = None
83
+
84
+ super().__init__(
85
+ bos_token=bos_token,
86
+ eos_token=eos_token,
87
+ unk_token=unk_token,
88
+ pad_token=pad_token,
89
+ add_bos_token=add_bos_token,
90
+ add_eos_token=add_eos_token,
91
+ sp_model_kwargs=self.sp_model_kwargs,
92
+ decode_with_prefix_space=decode_with_prefix_space,
93
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
94
+ legacy=legacy,
95
+ **kwargs,
96
+ )
97
+
98
+ @property
99
+ def no_prefix_space_tokens(self):
100
+ if self._no_prefix_space_tokens is None:
101
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
102
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
103
+ return self._no_prefix_space_tokens
104
+
105
+ @property
106
+ def vocab_size(self):
107
+ """Returns vocab size"""
108
+ return self.sp_model.get_piece_size()
109
+
110
+ @property
111
+ def bos_token_id(self) -> Optional[int]:
112
+ return self.sp_model.bos_id()
113
+
114
+ @property
115
+ def eos_token_id(self) -> Optional[int]:
116
+ return self.sp_model.eos_id()
117
+
118
+ def get_vocab(self):
119
+ """Returns vocab as a dict"""
120
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
121
+ vocab.update(self.added_tokens_encoder)
122
+ return vocab
123
+
124
+ def _tokenize(self, text):
125
+ """Returns a tokenized string."""
126
+ return self.sp_model.encode(text, out_type=str)
127
+
128
+ def _convert_token_to_id(self, token):
129
+ """Converts a token (str) in an id using the vocab."""
130
+ return self.sp_model.piece_to_id(token)
131
+
132
+ def _convert_id_to_token(self, index):
133
+ """Converts an index (integer) in a token (str) using the vocab."""
134
+ token = self.sp_model.IdToPiece(index)
135
+ return token
136
+
137
+ def convert_tokens_to_string(self, tokens):
138
+ """Converts a sequence of tokens (string) in a single string."""
139
+ # since we manually add the prefix space, we have to remove it when decoding
140
+ if tokens[0].startswith(SPIECE_UNDERLINE):
141
+ tokens[0] = tokens[0][1:]
142
+
143
+ current_sub_tokens = []
144
+ out_string = ""
145
+ prev_is_special = False
146
+ for i, token in enumerate(tokens):
147
+ # make sure that special tokens are not decoded using sentencepiece model
148
+ if token in self.all_special_tokens:
149
+ if not prev_is_special and i != 0 and self.legacy:
150
+ out_string += " "
151
+ out_string += self.sp_model.decode(current_sub_tokens) + token
152
+ prev_is_special = True
153
+ current_sub_tokens = []
154
+ else:
155
+ current_sub_tokens.append(token)
156
+ prev_is_special = False
157
+ out_string += self.sp_model.decode(current_sub_tokens)
158
+ return out_string
159
+
160
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
161
+ """
162
+ Save the vocabulary and special tokens file to a directory.
163
+
164
+ Args:
165
+ save_directory (`str`):
166
+ The directory in which to save the vocabulary.
167
+
168
+ Returns:
169
+ `Tuple(str)`: Paths to the files saved.
170
+ """
171
+ if not os.path.isdir(save_directory):
172
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
173
+ return
174
+ out_vocab_file = os.path.join(
175
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
176
+ )
177
+
178
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
179
+ copyfile(self.vocab_file, out_vocab_file)
180
+ elif not os.path.isfile(self.vocab_file):
181
+ with open(out_vocab_file, "wb") as fi:
182
+ content_spiece_model = self.sp_model.serialized_model_proto()
183
+ fi.write(content_spiece_model)
184
+
185
+ return (out_vocab_file,)
186
+
187
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
188
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
189
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
190
+
191
+ output = bos_token_id + token_ids_0 + eos_token_id
192
+
193
+ if token_ids_1 is not None:
194
+ output = output + bos_token_id + token_ids_1 + eos_token_id
195
+
196
+ return output
197
+
198
+ def get_special_tokens_mask(
199
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
200
+ ) -> List[int]:
201
+ """
202
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
203
+ special tokens using the tokenizer `prepare_for_model` method.
204
+
205
+ Args:
206
+ token_ids_0 (`List[int]`):
207
+ List of IDs.
208
+ token_ids_1 (`List[int]`, *optional*):
209
+ Optional second list of IDs for sequence pairs.
210
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
211
+ Whether or not the token list is already formatted with special tokens for the model.
212
+
213
+ Returns:
214
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
215
+ """
216
+ if already_has_special_tokens:
217
+ return super().get_special_tokens_mask(
218
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
219
+ )
220
+
221
+ bos_token_id = [1] if self.add_bos_token else []
222
+ eos_token_id = [1] if self.add_eos_token else []
223
+
224
+ if token_ids_1 is None:
225
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
226
+ return (
227
+ bos_token_id
228
+ + ([0] * len(token_ids_0))
229
+ + eos_token_id
230
+ + bos_token_id
231
+ + ([0] * len(token_ids_1))
232
+ + eos_token_id
233
+ )
234
+
235
+ def create_token_type_ids_from_sequences(
236
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
237
+ ) -> List[int]:
238
+ """
239
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
240
+ sequence pair mask has the following format:
241
+
242
+ ```
243
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
244
+ | first sequence | second sequence |
245
+ ```
246
+
247
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
248
+
249
+ Args:
250
+ token_ids_0 (`List[int]`):
251
+ List of ids.
252
+ token_ids_1 (`List[int]`, *optional*):
253
+ Optional second list of IDs for sequence pairs.
254
+
255
+ Returns:
256
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
257
+ """
258
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
259
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
260
+
261
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
262
+
263
+ if token_ids_1 is not None:
264
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
265
+
266
+ return output
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a11626b3c80cbfdd5f4b5585fce8938d178f3a0df739c3dd1c708ce944878af
3
+ size 1010047
tokenizer_config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "auto_map": {
32
+ "AutoTokenizer": [
33
+ "tokenization_index.IndexTokenizer",
34
+ null
35
+ ]
36
+ },
37
+ "bos_token": "<s>",
38
+ "clean_up_tokenization_spaces": false,
39
+ "decode_with_prefix_space": false,
40
+ "eos_token": "</s>",
41
+ "extra_special_tokens": {},
42
+ "legacy": false,
43
+ "model_max_length": 1000000000000000019884624838656,
44
+ "pad_token": "<unk>",
45
+ "padding_side": "left",
46
+ "sp_model_kwargs": {},
47
+ "split_special_tokens": false,
48
+ "tokenizer_class": "IndexTokenizer",
49
+ "unk_token": "<unk>"
50
+ }