sharkMeow commited on
Commit
e7a52c8
·
verified ·
1 Parent(s): c8ab44d

Training in progress, step 13600

Browse files
Files changed (3) hide show
  1. config.json +31 -3
  2. model.safetensors +1 -1
  3. training_args.bin +2 -2
config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_name_or_path": "OFA-Sys/chinese-clip-vit-base-patch16",
3
  "architectures": [
4
  "ChineseCLIPDualTextModel"
5
  ],
@@ -9,26 +8,55 @@
9
  "model_type": "chinese_clip",
10
  "projection_dim": 512,
11
  "text_config": {
 
12
  "architectures": [
13
  "ChineseCLIPTextModel"
14
  ],
 
15
  "bos_token_id": 0,
16
  "directionality": "bidi",
17
  "eos_token_id": 2,
 
 
 
 
 
 
 
 
18
  "model_type": "chinese_clip_text_model",
 
 
19
  "output_past": true,
20
  "pooler_fc_size": 768,
21
  "pooler_num_attention_heads": 12,
22
  "pooler_num_fc_layers": 3,
23
  "pooler_size_per_head": 128,
24
  "pooler_type": "first_token_transform",
 
 
 
 
25
  "vocab_size": 21128
26
  },
27
  "torch_dtype": "float32",
28
- "transformers_version": "4.45.0",
29
  "vision_config": {
 
30
  "dropout": 0.0,
 
 
 
 
 
 
 
31
  "model_type": "chinese_clip_vision_model",
32
- "patch_size": 16
 
 
 
 
 
33
  }
34
  }
 
1
  {
 
2
  "architectures": [
3
  "ChineseCLIPDualTextModel"
4
  ],
 
8
  "model_type": "chinese_clip",
9
  "projection_dim": 512,
10
  "text_config": {
11
+ "_attn_implementation_autoset": true,
12
  "architectures": [
13
  "ChineseCLIPTextModel"
14
  ],
15
+ "attention_probs_dropout_prob": 0.1,
16
  "bos_token_id": 0,
17
  "directionality": "bidi",
18
  "eos_token_id": 2,
19
+ "hidden_act": "gelu",
20
+ "hidden_dropout_prob": 0.1,
21
+ "hidden_size": 768,
22
+ "initializer_factor": 1.0,
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "layer_norm_eps": 1e-12,
26
+ "max_position_embeddings": 512,
27
  "model_type": "chinese_clip_text_model",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 12,
30
  "output_past": true,
31
  "pooler_fc_size": 768,
32
  "pooler_num_attention_heads": 12,
33
  "pooler_num_fc_layers": 3,
34
  "pooler_size_per_head": 128,
35
  "pooler_type": "first_token_transform",
36
+ "position_embedding_type": "absolute",
37
+ "torch_dtype": "float32",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
  "vocab_size": 21128
41
  },
42
  "torch_dtype": "float32",
43
+ "transformers_version": "4.50.0",
44
  "vision_config": {
45
+ "attention_dropout": 0.0,
46
  "dropout": 0.0,
47
+ "hidden_act": "quick_gelu",
48
+ "hidden_size": 768,
49
+ "image_size": 224,
50
+ "initializer_factor": 1.0,
51
+ "initializer_range": 0.02,
52
+ "intermediate_size": 3072,
53
+ "layer_norm_eps": 1e-05,
54
  "model_type": "chinese_clip_vision_model",
55
+ "num_attention_heads": 12,
56
+ "num_channels": 3,
57
+ "num_hidden_layers": 12,
58
+ "patch_size": 16,
59
+ "projection_dim": 512,
60
+ "torch_dtype": "float32"
61
  }
62
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b82630c8b787df58b407d612433aaf91ed9a2132fda3564c3a9d62367008d21
3
  size 816611916
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8290529a13185094079b8790cfe81ab7b8423a8a83e1eb750610ec9a6a94f765
3
  size 816611916
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b0180fe093698eb55e17573a57b410ec9e92d7ba108a1a4c3d585a948d09421
3
- size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e84f3d31103c491a87c4ddfd45c281d6b93b4b3769b8841dbe516755a9947f4a
3
+ size 5368