yaya36095 commited on
Commit
67be71e
·
verified ·
1 Parent(s): 0895697

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +31 -127
config.json CHANGED
@@ -1,131 +1,35 @@
1
  {
2
  "model_type": "xlm-roberta",
3
-
4
- "model_config": {
5
- "_name_or_path": "xlm-roberta-base",
6
- "architectures": [
7
- "XLMRobertaForSequenceClassification"
8
- ],
9
- "attention_probs_dropout_prob": 0.1,
10
- "bos_token_id": 0,
11
- "classifier_dropout": null,
12
- "eos_token_id": 2,
13
- "hidden_act": "gelu",
14
- "hidden_dropout_prob": 0.1,
15
- "hidden_size": 768,
16
- "initializer_range": 0.02,
17
- "intermediate_size": 3072,
18
- "layer_norm_eps": 1e-05,
19
- "max_position_embeddings": 514,
20
- "model_type": "xlm-roberta",
21
- "num_attention_heads": 12,
22
- "num_hidden_layers": 12,
23
- "output_past": true,
24
- "pad_token_id": 1,
25
- "position_embedding_type": "absolute",
26
- "problem_type": "single_label_classification",
27
- "torch_dtype": "float32",
28
- "transformers_version": "4.44.2",
29
- "type_vocab_size": 1,
30
- "use_cache": true,
31
- "vocab_size": 250002
32
- },
33
-
34
- "training_config": {
35
- "learning_rate": 2e-5,
36
- "num_train_epochs": 3,
37
- "per_device_train_batch_size": 16,
38
- "per_device_eval_batch_size": 16,
39
- "warmup_steps": 500,
40
- "weight_decay": 0.01,
41
- "logging_dir": "./logs",
42
- "evaluation_strategy": "epoch"
43
- },
44
-
45
- "tokenizer_config": {
46
- "model_max_length": 512,
47
- "padding_side": "right",
48
- "truncation_side": "right",
49
- "special_tokens": {
50
- "pad_token": "<pad>",
51
- "unk_token": "<unk>",
52
- "bos_token": "<s>",
53
- "eos_token": "</s>"
54
- }
55
- },
56
-
57
- "inference_config": {
58
- "task": "text-classification",
59
- "labels": ["HUMAN", "AI"],
60
- "threshold": 0.5,
61
- "max_length": 512,
62
- "batch_size": 32,
63
- "options": {
64
- "wait_for_model": true,
65
- "use_cache": true
66
- }
67
- },
68
-
69
- "api_config": {
70
- "endpoint": "https://api-inference.huggingface.co/models/yaya36095/text-detector",
71
- "headers": {
72
- "Content-Type": "application/json"
73
- },
74
- "cors": {
75
- "allow_origin": "*",
76
- "allow_headers": [
77
- "authorization",
78
- "x-client-info",
79
- "apikey",
80
- "content-type"
81
- ]
82
- }
83
- },
84
-
85
- "model_info": {
86
- "name": "text-detector",
87
- "version": "1.0.0",
88
- "author": "yaya36095",
89
- "description": "A model for detecting AI-generated vs human-written text",
90
- "license": "MIT",
91
- "repository": "https://huggingface.co/yaya36095/text-detector",
92
- "languages": ["multilingual"],
93
- "tags": [
94
- "text-classification",
95
- "ai-detection",
96
- "xlm-roberta"
97
- ]
98
- },
99
-
100
- "environment": {
101
- "framework": "transformers",
102
- "framework_version": "4.44.2",
103
- "python_version": ">=3.8.0",
104
- "cuda_support": true,
105
- "required_packages": {
106
- "torch": ">=1.10.0",
107
- "transformers": ">=4.44.2",
108
- "numpy": ">=1.19.0"
109
- }
110
- },
111
-
112
- "logging_config": {
113
- "level": "INFO",
114
- "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
115
- "development_mode": {
116
- "debug": true,
117
- "verbose": true
118
- },
119
- "production_mode": {
120
- "debug": false,
121
- "verbose": false
122
- }
123
- },
124
-
125
- "performance_metrics": {
126
- "accuracy_threshold": 0.85,
127
- "latency_threshold_ms": 500,
128
- "max_batch_size": 64,
129
- "memory_limit_mb": 4096
130
  }
131
  }
 
1
  {
2
  "model_type": "xlm-roberta",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "problem_type": "single_label_classification",
7
+ "num_labels": 2,
8
+ "torch_dtype": "float32",
9
+ "vocab_size": 250002,
10
+ "hidden_size": 768,
11
+ "num_hidden_layers": 12,
12
+ "num_attention_heads": 12,
13
+ "intermediate_size": 3072,
14
+ "hidden_act": "gelu",
15
+ "hidden_dropout_prob": 0.1,
16
+ "attention_probs_dropout_prob": 0.1,
17
+ "max_position_embeddings": 514,
18
+ "type_vocab_size": 1,
19
+ "initializer_range": 0.02,
20
+ "layer_norm_eps": 1e-05,
21
+ "pad_token_id": 1,
22
+ "bos_token_id": 0,
23
+ "eos_token_id": 2,
24
+ "use_cache": true,
25
+ "transformers_version": "4.44.2",
26
+ "tokenizer_class": "XLMRobertaTokenizer",
27
+ "label2id": {
28
+ "HUMAN": 0,
29
+ "AI": 1
30
+ },
31
+ "id2label": {
32
+ "0": "HUMAN",
33
+ "1": "AI"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  }
35
  }