redfernstech commited on
Commit
5800bf7
verified
1 Parent(s): 76bbdb3

Upload tokenizer

Browse files
special_tokens_map.json CHANGED
@@ -6,13 +6,6 @@
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
- "cls_token": {
10
- "content": "<s>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
  "eos_token": {
17
  "content": "</s>",
18
  "lstrip": false,
@@ -20,21 +13,7 @@
20
  "rstrip": false,
21
  "single_word": false
22
  },
23
- "mask_token": {
24
- "content": "<mask>",
25
- "lstrip": true,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
  "pad_token": {
31
- "content": "<pad>",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
  "content": "</s>",
39
  "lstrip": false,
40
  "normalized": false,
 
6
  "rstrip": false,
7
  "single_word": false
8
  },
 
 
 
 
 
 
 
9
  "eos_token": {
10
  "content": "</s>",
11
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
 
 
 
 
 
 
16
  "pad_token": {
 
 
 
 
 
 
 
17
  "content": "</s>",
18
  "lstrip": false,
19
  "normalized": false,
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json CHANGED
@@ -1,8 +1,10 @@
1
  {
2
- "add_prefix_space": false,
 
 
3
  "added_tokens_decoder": {
4
  "0": {
5
- "content": "<s>",
6
  "lstrip": false,
7
  "normalized": false,
8
  "rstrip": false,
@@ -10,7 +12,7 @@
10
  "special": true
11
  },
12
  "1": {
13
- "content": "<pad>",
14
  "lstrip": false,
15
  "normalized": false,
16
  "rstrip": false,
@@ -24,35 +26,24 @@
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
- },
28
- "3": {
29
- "content": "<unk>",
30
- "lstrip": false,
31
- "normalized": false,
32
- "rstrip": false,
33
- "single_word": false,
34
- "special": true
35
- },
36
- "50264": {
37
- "content": "<mask>",
38
- "lstrip": true,
39
- "normalized": false,
40
- "rstrip": false,
41
- "single_word": false,
42
- "special": true
43
  }
44
  },
45
  "bos_token": "<s>",
46
  "clean_up_tokenization_spaces": false,
47
- "cls_token": "<s>",
48
  "eos_token": "</s>",
49
- "errors": "replace",
50
  "extra_special_tokens": {},
51
- "mask_token": "<mask>",
52
- "model_max_length": 512,
53
- "pad_token": "<pad>",
54
- "sep_token": "</s>",
55
- "tokenizer_class": "RobertaTokenizer",
56
- "trim_offsets": true,
57
- "unk_token": "<unk>"
 
 
 
 
 
 
 
58
  }
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
7
+ "content": "<unk>",
8
  "lstrip": false,
9
  "normalized": false,
10
  "rstrip": false,
 
12
  "special": true
13
  },
14
  "1": {
15
+ "content": "<s>",
16
  "lstrip": false,
17
  "normalized": false,
18
  "rstrip": false,
 
26
  "rstrip": false,
27
  "single_word": false,
28
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  }
30
  },
31
  "bos_token": "<s>",
32
  "clean_up_tokenization_spaces": false,
 
33
  "eos_token": "</s>",
 
34
  "extra_special_tokens": {},
35
+ "legacy": false,
36
+ "max_length": 256,
37
+ "model_max_length": 1000000000000000019884624838656,
38
+ "pad_to_multiple_of": null,
39
+ "pad_token": "</s>",
40
+ "pad_token_type_id": 0,
41
+ "padding_side": "right",
42
+ "sp_model_kwargs": {},
43
+ "stride": 0,
44
+ "tokenizer_class": "LlamaTokenizer",
45
+ "truncation_side": "right",
46
+ "truncation_strategy": "longest_first",
47
+ "unk_token": "<unk>",
48
+ "use_default_system_prompt": false
49
  }