gen-card-allenai-wmt19.py 3.82 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
#!/usr/bin/env python

# Usage:
# ./gen-card-allenai-wmt19.py

import os
from pathlib import Path

def write_model_card(model_card_dir, src_lang, tgt_lang, model_name):

    texts = {
        "en": "Machine learning is great, isn't it?",
        "ru": "Машинное обучение - это здорово, не так ли?",
        "de": "Maschinelles Lernen ist großartig, nicht wahr?",
    }

    # BLUE scores as follows:
    # "pair": [fairseq, transformers]
    scores = {
        "wmt19-de-en-6-6-base": [0, 38.37],
        "wmt19-de-en-6-6-big": [0, 39.90],
    }
    pair = f"{src_lang}-{tgt_lang}"

    readme = f"""
---

28
29
30
language:
- {src_lang}
- {tgt_lang}
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
thumbnail:
tags:
- translation
- wmt19
- allenai
license: Apache 2.0
datasets:
- http://www.statmt.org/wmt19/ ([test-set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561))
metrics:
- http://www.statmt.org/wmt19/metrics-task.html
---

# FSMT

## Model description

47
48
49
This is a ported version of fairseq-based [wmt19 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.

For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
50
51
52
53
54
55

2 models are available:

* [wmt19-de-en-6-6-big](https://huggingface.co/allenai/wmt19-de-en-6-6-big)
* [wmt19-de-en-6-6-base](https://huggingface.co/allenai/wmt19-de-en-6-6-base)

56
57
58
59
60
61
62
63
64
65
66
```
@misc{{kasai2020deep,
    title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
    author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
    year={{2020}},
    eprint={{2006.10369}},
    archivePrefix={{arXiv}},
    primaryClass={{cs.CL}}
}}
```

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
## Intended uses & limitations

#### How to use

```python
from transformers.tokenization_fsmt import FSMTTokenizer
from transformers.modeling_fsmt import FSMTForConditionalGeneration
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)

input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}

```

#### Limitations and bias


## Training data

91
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131

## Eval results

Here are the BLEU scores:

model   |  transformers
-------|---------|----------
{model_name}  |  {scores[model_name][1]}

The score was calculated using this code:

```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```

"""
    model_card_dir.mkdir(parents=True, exist_ok=True)
    path = os.path.join(model_card_dir, "README.md")
    print(f"Generating {path}")
    with open(path, "w", encoding="utf-8") as f:
        f.write(readme)

# make sure we are under the root of the project
repo_dir = Path(__file__).resolve().parent.parent.parent
model_cards_dir = repo_dir / "model_cards"

for model_name in ["wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big"]:
    model_card_dir = model_cards_dir / "allenai" / model_name
    write_model_card(model_card_dir, src_lang="de", tgt_lang="en", model_name=model_name)