WayenVan commited on
Commit
3e15bc4
·
verified ·
1 Parent(s): aeffd06

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,9 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
4
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ model.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
README.md ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - de
5
+ tags:
6
+ - translation
7
+ - wmt19
8
+ - facebook
9
+ license: apache-2.0
10
+ datasets:
11
+ - wmt19
12
+ metrics:
13
+ - bleu
14
+ thumbnail: https://huggingface.co/front/thumbnails/facebook.png
15
+ ---
16
+
17
+ # FSMT
18
+
19
+ ## Model description
20
+
21
+ This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for en-de.
22
+
23
+ For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
24
+
25
+ The abbreviation FSMT stands for FairSeqMachineTranslation
26
+
27
+ All four models are available:
28
+
29
+ * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
30
+ * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
31
+ * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
32
+ * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
33
+
34
+ ## Intended uses & limitations
35
+
36
+ #### How to use
37
+
38
+ ```python
39
+ from transformers import FSMTForConditionalGeneration, FSMTTokenizer
40
+ mname = "facebook/wmt19-en-de"
41
+ tokenizer = FSMTTokenizer.from_pretrained(mname)
42
+ model = FSMTForConditionalGeneration.from_pretrained(mname)
43
+
44
+ input = "Machine learning is great, isn't it?"
45
+ input_ids = tokenizer.encode(input, return_tensors="pt")
46
+ outputs = model.generate(input_ids)
47
+ decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
48
+ print(decoded) # Maschinelles Lernen ist großartig, oder?
49
+
50
+ ```
51
+
52
+ #### Limitations and bias
53
+
54
+ - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
55
+
56
+ ## Training data
57
+
58
+ Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
59
+
60
+ ## Eval results
61
+
62
+ pair | fairseq | transformers
63
+ -------|---------|----------
64
+ en-de | [43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862) | 42.83
65
+
66
+ The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
67
+ - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
68
+ - re-ranking
69
+
70
+ The score was calculated using this code:
71
+
72
+ ```bash
73
+ git clone https://github.com/huggingface/transformers
74
+ cd transformers
75
+ export PAIR=en-de
76
+ export DATA_DIR=data/$PAIR
77
+ export SAVE_DIR=data/$PAIR
78
+ export BS=8
79
+ export NUM_BEAMS=15
80
+ mkdir -p $DATA_DIR
81
+ sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
82
+ sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
83
+ echo $PAIR
84
+ PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
85
+ ```
86
+ note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
87
+
88
+ ## Data Sources
89
+
90
+ - [training, etc.](http://www.statmt.org/wmt19/)
91
+ - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
92
+
93
+
94
+ ### BibTeX entry and citation info
95
+
96
+ ```bibtex
97
+ @inproceedings{...,
98
+ year={2020},
99
+ title={Facebook FAIR's WMT19 News Translation Task Submission},
100
+ author={Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey},
101
+ booktitle={Proc. of WMT},
102
+ }
103
+ ```
104
+
105
+
106
+ ## TODO
107
+
108
+ - port model ensemble (fairseq uses 4 model checkpoints)
109
+
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "FSMTForConditionalGeneration"
4
+ ],
5
+ "model_type": "fsmt",
6
+ "activation_dropout": 0.0,
7
+ "activation_function": "relu",
8
+ "attention_dropout": 0.1,
9
+ "d_model": 1024,
10
+ "dropout": 0.2,
11
+ "init_std": 0.02,
12
+ "max_position_embeddings": 1024,
13
+ "num_hidden_layers": 6,
14
+ "src_vocab_size": 42024,
15
+ "tgt_vocab_size": 42024,
16
+ "langs": [
17
+ "en",
18
+ "de"
19
+ ],
20
+ "encoder_attention_heads": 16,
21
+ "encoder_ffn_dim": 8192,
22
+ "encoder_layerdrop": 0,
23
+ "encoder_layers": 6,
24
+ "decoder_attention_heads": 16,
25
+ "decoder_ffn_dim": 4096,
26
+ "decoder_layerdrop": 0,
27
+ "decoder_layers": 6,
28
+ "bos_token_id": 0,
29
+ "pad_token_id": 1,
30
+ "eos_token_id": 2,
31
+ "is_encoder_decoder": true,
32
+ "scale_embedding": true,
33
+ "tie_word_embeddings": true,
34
+ "num_beams": 5,
35
+ "early_stopping": false,
36
+ "length_penalty": 1.0
37
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "eos_token_id": 2,
6
+ "forced_eos_token_id": 2,
7
+ "max_length": 200,
8
+ "num_beams": 5,
9
+ "pad_token_id": 1,
10
+ "transformers_version": "4.27.0.dev0"
11
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7aaf73b048c34ac68ec1622b8f3f053425eecdc64c2d224713342b900f6e0d3
3
+ size 1079014272
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9976b462c4c42afd89e718f44639de30167fdb059fe2b3ad603579eaab73d66c
3
+ size 1079071572
tokenizer_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "langs": [
3
+ "en",
4
+ "de"
5
+ ],
6
+ "model_max_length": 1024
7
+ }
vocab-src.json ADDED
The diff for this file is too large to render. See raw diff
 
vocab-tgt.json ADDED
The diff for this file is too large to render. See raw diff