translation.py 8.86 KB
Newer Older
1
# Copyright (c) Facebook, Inc. and its affiliates.
Myle Ott's avatar
Myle Ott committed
2
#
3
4
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
Myle Ott's avatar
Myle Ott committed
5

Alexei Baevski's avatar
Alexei Baevski committed
6
import itertools
Myle Ott's avatar
Myle Ott committed
7
8
import os

9
from fairseq import options, utils
Myle Ott's avatar
Myle Ott committed
10
from fairseq.data import (
Myle Ott's avatar
Myle Ott committed
11
12
    ConcatDataset,
    data_utils,
Myle Ott's avatar
Myle Ott committed
13
    indexed_dataset,
Myle Ott's avatar
Myle Ott committed
14
    LanguagePairDataset,
15
    PrependTokenDataset,
Myle Ott's avatar
Myle Ott committed
16
17
18
19
20
)

from . import FairseqTask, register_task


21
22
23
24
25
def load_langpair_dataset(
    data_path, split,
    src, src_dict,
    tgt, tgt_dict,
    combine, dataset_impl, upsample_primary,
26
    left_pad_source, left_pad_target, max_source_positions,
27
    max_target_positions, prepend_bos=False, load_alignments=False,
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
):
    def split_exists(split, src, tgt, lang, data_path):
        filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
        return indexed_dataset.dataset_exists(filename, impl=dataset_impl)

    src_datasets = []
    tgt_datasets = []

    for k in itertools.count():
        split_k = split + (str(k) if k > 0 else '')

        # infer langcode
        if split_exists(split_k, src, tgt, src, data_path):
            prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
        elif split_exists(split_k, tgt, src, src, data_path):
            prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
        else:
            if k > 0:
                break
            else:
                raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))

50
51
52
53
54
55
        src_datasets.append(
            data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)
        )
        tgt_datasets.append(
            data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl)
        )
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71

        print('| {} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[-1])))

        if not combine:
            break

    assert len(src_datasets) == len(tgt_datasets)

    if len(src_datasets) == 1:
        src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0]
    else:
        sample_ratios = [1] * len(src_datasets)
        sample_ratios[0] = upsample_primary
        src_dataset = ConcatDataset(src_datasets, sample_ratios)
        tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)

72
73
74
75
76
    if prepend_bos:
        assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
        src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
        tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())

77
78
79
80
81
82
    align_dataset = None
    if load_alignments:
        align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
        if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
            align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)

83
84
85
86
87
88
89
    return LanguagePairDataset(
        src_dataset, src_dataset.sizes, src_dict,
        tgt_dataset, tgt_dataset.sizes, tgt_dict,
        left_pad_source=left_pad_source,
        left_pad_target=left_pad_target,
        max_source_positions=max_source_positions,
        max_target_positions=max_target_positions,
90
        align_dataset=align_dataset,
91
92
93
    )


Myle Ott's avatar
Myle Ott committed
94
95
@register_task('translation')
class TranslationTask(FairseqTask):
Myle Ott's avatar
Myle Ott committed
96
97
98
99
    """
    Translate from one (source) language to another (target) language.

    Args:
Myle Ott's avatar
Myle Ott committed
100
101
        src_dict (~fairseq.data.Dictionary): dictionary for the source language
        tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
Myle Ott's avatar
Myle Ott committed
102
103
104

    .. note::

Myle Ott's avatar
Myle Ott committed
105
106
        The translation task is compatible with :mod:`fairseq-train`,
        :mod:`fairseq-generate` and :mod:`fairseq-interactive`.
Myle Ott's avatar
Myle Ott committed
107
108
109
110
111
112
113
114

    The translation task provides the following additional command-line
    arguments:

    .. argparse::
        :ref: fairseq.tasks.translation_parser
        :prog:
    """
Myle Ott's avatar
Myle Ott committed
115
116
117
118

    @staticmethod
    def add_args(parser):
        """Add task-specific arguments to the parser."""
119
        # fmt: off
Naman Goyal's avatar
Naman Goyal committed
120
121
        parser.add_argument('data', help='colon separated path to data directories list, \
                            will be iterated upon during epochs in round-robin manner')
Myle Ott's avatar
Myle Ott committed
122
123
124
125
        parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
                            help='source language')
        parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
                            help='target language')
Myle Ott's avatar
Myle Ott committed
126
127
        parser.add_argument('--lazy-load', action='store_true',
                            help='load the dataset lazily')
128
        parser.add_argument('--raw-text', action='store_true',
Myle Ott's avatar
Myle Ott committed
129
                            help='load raw text dataset')
130
131
        parser.add_argument('--load-alignments', action='store_true',
                            help='load the binarized alignments')
Myle Ott's avatar
Myle Ott committed
132
        parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
Myle Ott's avatar
Myle Ott committed
133
                            help='pad the source on the left')
Myle Ott's avatar
Myle Ott committed
134
        parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
Myle Ott's avatar
Myle Ott committed
135
                            help='pad the target on the left')
Myle Ott's avatar
Myle Ott committed
136
137
138
139
        parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
                            help='max number of tokens in the source sequence')
        parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
                            help='max number of tokens in the target sequence')
Myle Ott's avatar
Myle Ott committed
140
141
        parser.add_argument('--upsample-primary', default=1, type=int,
                            help='amount to upsample primary dataset')
142
        # fmt: on
Myle Ott's avatar
Myle Ott committed
143
144
145
146
147
148
149
150

    def __init__(self, args, src_dict, tgt_dict):
        super().__init__(args)
        self.src_dict = src_dict
        self.tgt_dict = tgt_dict

    @classmethod
    def setup_task(cls, args, **kwargs):
Myle Ott's avatar
Myle Ott committed
151
152
153
154
155
        """Setup the task (e.g., load dictionaries).

        Args:
            args (argparse.Namespace): parsed command-line arguments
        """
Myle Ott's avatar
Myle Ott committed
156
157
        args.left_pad_source = options.eval_bool(args.left_pad_source)
        args.left_pad_target = options.eval_bool(args.left_pad_target)
158
159
160
161
162
163
        if getattr(args, 'raw_text', False):
            utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')
            args.dataset_impl = 'raw'
        elif getattr(args, 'lazy_load', False):
            utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')
            args.dataset_impl = 'lazy'
Myle Ott's avatar
Myle Ott committed
164

Naman Goyal's avatar
Naman Goyal committed
165
166
        paths = args.data.split(':')
        assert len(paths) > 0
Myle Ott's avatar
Myle Ott committed
167
168
        # find language pair automatically
        if args.source_lang is None or args.target_lang is None:
Naman Goyal's avatar
Naman Goyal committed
169
            args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
Myle Ott's avatar
Myle Ott committed
170
171
172
173
        if args.source_lang is None or args.target_lang is None:
            raise Exception('Could not infer language pair, please provide it explicitly')

        # load dictionaries
Naman Goyal's avatar
Naman Goyal committed
174
175
        src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
        tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
Myle Ott's avatar
Myle Ott committed
176
177
178
179
180
181
182
183
        assert src_dict.pad() == tgt_dict.pad()
        assert src_dict.eos() == tgt_dict.eos()
        assert src_dict.unk() == tgt_dict.unk()
        print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
        print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))

        return cls(args, src_dict, tgt_dict)

Naman Goyal's avatar
Naman Goyal committed
184
    def load_dataset(self, split, epoch=0, combine=False, **kwargs):
Myle Ott's avatar
Myle Ott committed
185
186
187
188
189
        """Load a given dataset split.

        Args:
            split (str): name of the split (e.g., train, valid, test)
        """
Naman Goyal's avatar
Naman Goyal committed
190
191
192
        paths = self.args.data.split(':')
        assert len(paths) > 0
        data_path = paths[epoch % len(paths)]
Myle Ott's avatar
Myle Ott committed
193

194
195
        # infer langcode
        src, tgt = self.args.source_lang, self.args.target_lang
Myle Ott's avatar
Myle Ott committed
196

197
198
199
200
        self.datasets[split] = load_langpair_dataset(
            data_path, split, src, self.src_dict, tgt, self.tgt_dict,
            combine=combine, dataset_impl=self.args.dataset_impl,
            upsample_primary=self.args.upsample_primary,
Myle Ott's avatar
Myle Ott committed
201
202
203
204
            left_pad_source=self.args.left_pad_source,
            left_pad_target=self.args.left_pad_target,
            max_source_positions=self.args.max_source_positions,
            max_target_positions=self.args.max_target_positions,
205
            load_alignments=self.args.load_alignments,
Myle Ott's avatar
Myle Ott committed
206
207
        )

Myle Ott's avatar
Myle Ott committed
208
209
210
    def build_dataset_for_inference(self, src_tokens, src_lengths):
        return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)

211
    def max_positions(self):
Myle Ott's avatar
Myle Ott committed
212
        """Return the max sentence length allowed by the task."""
213
214
        return (self.args.max_source_positions, self.args.max_target_positions)

Myle Ott's avatar
Myle Ott committed
215
216
    @property
    def source_dictionary(self):
Myle Ott's avatar
Myle Ott committed
217
        """Return the source :class:`~fairseq.data.Dictionary`."""
Myle Ott's avatar
Myle Ott committed
218
219
220
221
        return self.src_dict

    @property
    def target_dictionary(self):
Myle Ott's avatar
Myle Ott committed
222
        """Return the target :class:`~fairseq.data.Dictionary`."""
Myle Ott's avatar
Myle Ott committed
223
        return self.tgt_dict