model update
Browse files- README.md +10 -10
- config.json +1 -1
- eval/metric.first.answer.paragraph.questions_answers.lmqg_qag_tweetqa.default.json +1 -1
- eval/samples.test.hyp.paragraph.questions_answers.lmqg_qag_tweetqa.default.txt +0 -0
- eval/samples.validation.hyp.paragraph.questions_answers.lmqg_qag_tweetqa.default.txt +0 -0
- pytorch_model.bin +2 -2
- tokenizer_config.json +1 -1
- trainer_config.json +1 -1
README.md
CHANGED
@@ -14,7 +14,7 @@ pipeline_tag: text2text-generation
|
|
14 |
tags:
|
15 |
- questions and answers generation
|
16 |
widget:
|
17 |
-
- text: " Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records."
|
18 |
example_title: "Questions & Answers Generation Example 1"
|
19 |
model-index:
|
20 |
- name: lmqg/t5-base-tweetqa-qag
|
@@ -29,19 +29,19 @@ model-index:
|
|
29 |
metrics:
|
30 |
- name: BLEU4
|
31 |
type: bleu4
|
32 |
-
value: 0.
|
33 |
- name: ROUGE-L
|
34 |
type: rouge-l
|
35 |
-
value: 0.
|
36 |
- name: METEOR
|
37 |
type: meteor
|
38 |
-
value: 0.
|
39 |
- name: BERTScore
|
40 |
type: bertscore
|
41 |
-
value: 0.
|
42 |
- name: MoverScore
|
43 |
type: moverscore
|
44 |
-
value: 0.
|
45 |
---
|
46 |
|
47 |
# Model Card of `lmqg/t5-base-tweetqa-qag`
|
@@ -94,7 +94,7 @@ from transformers import pipeline
|
|
94 |
# initialize model
|
95 |
pipe = pipeline("text2text-generation", 'lmqg/t5-base-tweetqa-qag')
|
96 |
# question generation
|
97 |
-
question = pipe(' Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.')
|
98 |
|
99 |
```
|
100 |
|
@@ -105,7 +105,7 @@ question = pipe(' Beyonce further expanded her acting career, starring as blues
|
|
105 |
|
106 |
| Dataset | Type | BLEU4 | ROUGE-L | METEOR | BERTScore | MoverScore | Link |
|
107 |
|:--------|:-----|------:|--------:|-------:|----------:|-----------:|-----:|
|
108 |
-
| [lmqg/qag_tweetqa](https://huggingface.co/datasets/lmqg/qag_tweetqa) | default | 0.
|
109 |
|
110 |
|
111 |
|
@@ -117,11 +117,11 @@ The following hyperparameters were used during fine-tuning:
|
|
117 |
- dataset_name: default
|
118 |
- input_types: ['paragraph']
|
119 |
- output_types: ['questions_answers']
|
120 |
-
- prefix_types:
|
121 |
- model: t5-base
|
122 |
- max_length: 256
|
123 |
- max_length_output: 128
|
124 |
-
- epoch:
|
125 |
- batch: 32
|
126 |
- lr: 0.0001
|
127 |
- fp16: False
|
|
|
14 |
tags:
|
15 |
- questions and answers generation
|
16 |
widget:
|
17 |
+
- text: "generate question and answer: Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records."
|
18 |
example_title: "Questions & Answers Generation Example 1"
|
19 |
model-index:
|
20 |
- name: lmqg/t5-base-tweetqa-qag
|
|
|
29 |
metrics:
|
30 |
- name: BLEU4
|
31 |
type: bleu4
|
32 |
+
value: 0.13263946554139405
|
33 |
- name: ROUGE-L
|
34 |
type: rouge-l
|
35 |
+
value: 0.36935780155247455
|
36 |
- name: METEOR
|
37 |
type: meteor
|
38 |
+
value: 0.3081166404528711
|
39 |
- name: BERTScore
|
40 |
type: bertscore
|
41 |
+
value: 0.9085398159959508
|
42 |
- name: MoverScore
|
43 |
type: moverscore
|
44 |
+
value: 0.6231917023300243
|
45 |
---
|
46 |
|
47 |
# Model Card of `lmqg/t5-base-tweetqa-qag`
|
|
|
94 |
# initialize model
|
95 |
pipe = pipeline("text2text-generation", 'lmqg/t5-base-tweetqa-qag')
|
96 |
# question generation
|
97 |
+
question = pipe('generate question and answer: Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.')
|
98 |
|
99 |
```
|
100 |
|
|
|
105 |
|
106 |
| Dataset | Type | BLEU4 | ROUGE-L | METEOR | BERTScore | MoverScore | Link |
|
107 |
|:--------|:-----|------:|--------:|-------:|----------:|-----------:|-----:|
|
108 |
+
| [lmqg/qag_tweetqa](https://huggingface.co/datasets/lmqg/qag_tweetqa) | default | 0.133 | 0.369 | 0.308 | 0.909 | 0.623 | [link](https://huggingface.co/lmqg/t5-base-tweetqa-qag/raw/main/eval/metric.first.answer.paragraph.questions_answers.lmqg_qag_tweetqa.default.json) |
|
109 |
|
110 |
|
111 |
|
|
|
117 |
- dataset_name: default
|
118 |
- input_types: ['paragraph']
|
119 |
- output_types: ['questions_answers']
|
120 |
+
- prefix_types: ['qag']
|
121 |
- model: t5-base
|
122 |
- max_length: 256
|
123 |
- max_length_output: 128
|
124 |
+
- epoch: 14
|
125 |
- batch: 32
|
126 |
- lr: 0.0001
|
127 |
- fp16: False
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "lmqg_output/t5_base_tweetqa/
|
3 |
"add_prefix": true,
|
4 |
"architectures": [
|
5 |
"T5ForConditionalGeneration"
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "lmqg_output/t5_base_tweetqa/model_mzgdpa/epoch_10",
|
3 |
"add_prefix": true,
|
4 |
"architectures": [
|
5 |
"T5ForConditionalGeneration"
|
eval/metric.first.answer.paragraph.questions_answers.lmqg_qag_tweetqa.default.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"validation": {"Bleu_1": 0.
|
|
|
1 |
+
{"validation": {"Bleu_1": 0.3738463313336194, "Bleu_2": 0.25187710741309555, "Bleu_3": 0.17301367663064196, "Bleu_4": 0.12056853153991282, "METEOR": 0.3347661830653345, "ROUGE_L": 0.3823696390369866, "BERTScore": 0.9027718756456522, "MoverScore": 0.6215475528953677}, "test": {"Bleu_1": 0.4051769369789305, "Bleu_2": 0.2754558448773594, "Bleu_3": 0.18928799859905374, "Bleu_4": 0.13263946554139405, "METEOR": 0.3081166404528711, "ROUGE_L": 0.36935780155247455, "BERTScore": 0.9085398159959508, "MoverScore": 0.6231917023300243}}
|
eval/samples.test.hyp.paragraph.questions_answers.lmqg_qag_tweetqa.default.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
eval/samples.validation.hyp.paragraph.questions_answers.lmqg_qag_tweetqa.default.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:455ccd84b8bf52107ea8a0f778dd49f4872385736cf28f55160802d5b314e03e
|
3 |
+
size 891647935
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "model_max_length": 512, "name_or_path": "lmqg_output/t5_base_tweetqa/
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "model_max_length": 512, "name_or_path": "lmqg_output/t5_base_tweetqa/model_mzgdpa/epoch_10", "special_tokens_map_file": "lmqg_output/t5_base_tweetqa/model_mzgdpa/epoch_10/special_tokens_map.json", "tokenizer_class": "T5Tokenizer"}
|
trainer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"dataset_path": "lmqg/qag_tweetqa", "dataset_name": "default", "input_types": ["paragraph"], "output_types": ["questions_answers"], "prefix_types":
|
|
|
1 |
+
{"dataset_path": "lmqg/qag_tweetqa", "dataset_name": "default", "input_types": ["paragraph"], "output_types": ["questions_answers"], "prefix_types": ["qag"], "model": "t5-base", "max_length": 256, "max_length_output": 128, "epoch": 14, "batch": 32, "lr": 0.0001, "fp16": false, "random_seed": 1, "gradient_accumulation_steps": 2, "label_smoothing": 0.0}
|