Pieter Delobelle commited on
Commit
1d99c40
1 Parent(s): 30fc97e

Initial commit

Browse files
Files changed (5) hide show
  1. config.json +23 -0
  2. merges.txt +0 -0
  3. parameters.json +51 -0
  4. pytorch_model.bin +3 -0
  5. vocab.json +0 -0
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "eos_token_id": 2,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 6,
19
+ "output_hidden_states": true,
20
+ "pad_token_id": 1,
21
+ "type_vocab_size": 1,
22
+ "vocab_size": 39985
23
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
parameters.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "force": false,
3
+ "dump_path": "serialization_dir/distilrobbert-v2-mini",
4
+ "data_file": "data/binarized_text_mini.pdelobelle-robbert-v2-dutch-base.pickle",
5
+ "student_type": "roberta",
6
+ "student_config": "training_configs/distilrobbert-base.json",
7
+ "student_pretrained_weights": null,
8
+ "teacher_type": "roberta",
9
+ "teacher_name": "pdelobelle/robbert-v2-dutch-base",
10
+ "temperature": 2.0,
11
+ "alpha_ce": 5.0,
12
+ "alpha_mlm": 2.0,
13
+ "alpha_clm": 0.0,
14
+ "alpha_mse": 0.0,
15
+ "alpha_cos": 1.0,
16
+ "mlm": true,
17
+ "mlm_mask_prop": 0.15,
18
+ "word_mask": 0.8,
19
+ "word_keep": 0.1,
20
+ "word_rand": 0.1,
21
+ "mlm_smoothing": 0.7,
22
+ "token_counts": "data/token_counts_mini.pdelobelle-robbert-v2-dutch-base.pickle",
23
+ "restrict_ce_to_mask": false,
24
+ "freeze_pos_embs": true,
25
+ "freeze_token_type_embds": false,
26
+ "n_epoch": 3,
27
+ "batch_size": 5,
28
+ "group_by_size": true,
29
+ "gradient_accumulation_steps": 100,
30
+ "warmup_prop": 0.05,
31
+ "weight_decay": 0.01,
32
+ "learning_rate": 0.0005,
33
+ "adam_epsilon": 1e-06,
34
+ "max_grad_norm": 5.0,
35
+ "initializer_range": 0.02,
36
+ "fp16": false,
37
+ "fp16_opt_level": "O1",
38
+ "gpus": 1,
39
+ "local_rank": 0,
40
+ "seed": 56,
41
+ "log_interval": 500,
42
+ "checkpoint_interval": 4000,
43
+ "n_nodes": 1,
44
+ "node_id": 0,
45
+ "global_rank": 0,
46
+ "world_size": 1,
47
+ "n_gpu_per_node": 1,
48
+ "multi_gpu": false,
49
+ "is_master": true,
50
+ "multi_node": false
51
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:263923cc00aeac74bc11d38a11ea59cc4eb18d0cc72e5d93b05f492b7f5096fd
3
+ size 297109362
vocab.json ADDED
The diff for this file is too large to render. See raw diff