MesserMMP commited on
Commit
6ca0278
·
verified ·
1 Parent(s): a6152bb

Training in progress, epoch 1

Browse files
Files changed (3) hide show
  1. config.json +82 -0
  2. model.safetensors +3 -0
  3. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu",
3
+ "allow_embedding_resizing": true,
4
+ "architectures": [
5
+ "ModernBertForSequenceClassification"
6
+ ],
7
+ "attention_bias": false,
8
+ "attention_dropout": 0.0,
9
+ "attention_layer": "rope",
10
+ "attention_probs_dropout_prob": 0.0,
11
+ "attn_out_bias": false,
12
+ "attn_out_dropout_prob": 0.1,
13
+ "attn_qkv_bias": false,
14
+ "bert_layer": "prenorm",
15
+ "bos_token_id": 50281,
16
+ "classifier_activation": "gelu",
17
+ "classifier_bias": false,
18
+ "classifier_dropout": 0.0,
19
+ "classifier_pooling": "cls",
20
+ "cls_token_id": 50281,
21
+ "compile_model": true,
22
+ "decoder_bias": true,
23
+ "deterministic_flash_attn": false,
24
+ "embed_dropout_prob": 0.0,
25
+ "embed_norm": true,
26
+ "embedding_dropout": 0.0,
27
+ "embedding_layer": "sans_pos",
28
+ "eos_token_id": 50282,
29
+ "final_norm": true,
30
+ "global_attn_every_n_layers": 3,
31
+ "global_rope_theta": 160000.0,
32
+ "head_pred_act": "gelu",
33
+ "hidden_act": "gelu",
34
+ "hidden_activation": "gelu",
35
+ "hidden_size": 768,
36
+ "init_method": "full_megatron",
37
+ "initializer_cutoff_factor": 2.0,
38
+ "initializer_range": 0.02,
39
+ "intermediate_size": 1152,
40
+ "local_attention": 128,
41
+ "local_attn_rotary_emb_base": 10000.0,
42
+ "local_rope_theta": 10000.0,
43
+ "loss_function": "fa_cross_entropy",
44
+ "loss_kwargs": {
45
+ "reduction": "mean"
46
+ },
47
+ "masked_prediction": true,
48
+ "max_position_embeddings": 8192,
49
+ "mlp_bias": false,
50
+ "mlp_dropout": 0.0,
51
+ "mlp_dropout_prob": 0.0,
52
+ "mlp_in_bias": false,
53
+ "mlp_layer": "glu",
54
+ "mlp_out_bias": false,
55
+ "model_type": "modernbert",
56
+ "norm_bias": false,
57
+ "norm_eps": 1e-05,
58
+ "norm_kwargs": {
59
+ "bias": false,
60
+ "eps": 1e-05
61
+ },
62
+ "normalization": "layernorm",
63
+ "num_attention_heads": 12,
64
+ "num_hidden_layers": 22,
65
+ "pad_token_id": 50283,
66
+ "padding": "unpadded",
67
+ "problem_type": "single_label_classification",
68
+ "repad_logits_with_grad": false,
69
+ "rotary_emb_base": 160000.0,
70
+ "rotary_emb_dim": null,
71
+ "rotary_emb_interleaved": false,
72
+ "rotary_emb_scale_base": null,
73
+ "sep_token_id": 50282,
74
+ "skip_first_prenorm": true,
75
+ "sliding_window": 128,
76
+ "sparse_pred_ignore_index": -100,
77
+ "sparse_prediction": false,
78
+ "torch_dtype": "float32",
79
+ "transformers_version": "4.51.3",
80
+ "unpad_embeddings": true,
81
+ "vocab_size": 50368
82
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e504c2ce129125897d2d0f00ad8c1bf15ee32a69dbf6340c02b603cbefc750b
3
+ size 598439784
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:068a581faa5f24a80920b90e265edf5afd44075e108030a01a5bf14054b6ad59
3
+ size 5240