translation.py 7.82 KB
Newer Older
Myle Ott's avatar
Myle Ott committed
1
2
3
4
5
6
7
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.

Alexei Baevski's avatar
Alexei Baevski committed
8
import itertools
Myle Ott's avatar
Myle Ott committed
9
10
import os

11
from fairseq import options, utils
Myle Ott's avatar
Myle Ott committed
12
from fairseq.data import (
Myle Ott's avatar
Myle Ott committed
13
14
    ConcatDataset,
    data_utils,
Myle Ott's avatar
Myle Ott committed
15
    indexed_dataset,
Myle Ott's avatar
Myle Ott committed
16
    LanguagePairDataset,
Myle Ott's avatar
Myle Ott committed
17
18
19
20
21
22
23
)

from . import FairseqTask, register_task


@register_task('translation')
class TranslationTask(FairseqTask):
Myle Ott's avatar
Myle Ott committed
24
25
26
27
    """
    Translate from one (source) language to another (target) language.

    Args:
Myle Ott's avatar
Myle Ott committed
28
29
        src_dict (~fairseq.data.Dictionary): dictionary for the source language
        tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
Myle Ott's avatar
Myle Ott committed
30
31
32

    .. note::

Myle Ott's avatar
Myle Ott committed
33
34
        The translation task is compatible with :mod:`fairseq-train`,
        :mod:`fairseq-generate` and :mod:`fairseq-interactive`.
Myle Ott's avatar
Myle Ott committed
35
36
37
38
39
40
41
42

    The translation task provides the following additional command-line
    arguments:

    .. argparse::
        :ref: fairseq.tasks.translation_parser
        :prog:
    """
Myle Ott's avatar
Myle Ott committed
43
44
45
46

    @staticmethod
    def add_args(parser):
        """Add task-specific arguments to the parser."""
47
        # fmt: off
Naman Goyal's avatar
Naman Goyal committed
48
49
        parser.add_argument('data', help='colon separated path to data directories list, \
                            will be iterated upon during epochs in round-robin manner')
Myle Ott's avatar
Myle Ott committed
50
51
52
53
        parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
                            help='source language')
        parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
                            help='target language')
Myle Ott's avatar
Myle Ott committed
54
55
        parser.add_argument('--lazy-load', action='store_true',
                            help='load the dataset lazily')
56
        parser.add_argument('--raw-text', default=False, action='store_true',
Myle Ott's avatar
Myle Ott committed
57
58
                            help='load raw text dataset')
        parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
Myle Ott's avatar
Myle Ott committed
59
                            help='pad the source on the left')
Myle Ott's avatar
Myle Ott committed
60
        parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
Myle Ott's avatar
Myle Ott committed
61
                            help='pad the target on the left')
Myle Ott's avatar
Myle Ott committed
62
63
64
65
        parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
                            help='max number of tokens in the source sequence')
        parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
                            help='max number of tokens in the target sequence')
Myle Ott's avatar
Myle Ott committed
66
67
        parser.add_argument('--upsample-primary', default=1, type=int,
                            help='amount to upsample primary dataset')
68
        # fmt: on
Myle Ott's avatar
Myle Ott committed
69
70
71
72
73
74
75
76

    def __init__(self, args, src_dict, tgt_dict):
        super().__init__(args)
        self.src_dict = src_dict
        self.tgt_dict = tgt_dict

    @classmethod
    def setup_task(cls, args, **kwargs):
Myle Ott's avatar
Myle Ott committed
77
78
79
80
81
        """Setup the task (e.g., load dictionaries).

        Args:
            args (argparse.Namespace): parsed command-line arguments
        """
Myle Ott's avatar
Myle Ott committed
82
83
        args.left_pad_source = options.eval_bool(args.left_pad_source)
        args.left_pad_target = options.eval_bool(args.left_pad_target)
84
85
86
87
88
89
        if getattr(args, 'raw_text', False):
            utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')
            args.dataset_impl = 'raw'
        elif getattr(args, 'lazy_load', False):
            utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')
            args.dataset_impl = 'lazy'
Myle Ott's avatar
Myle Ott committed
90

Naman Goyal's avatar
Naman Goyal committed
91
92
        paths = args.data.split(':')
        assert len(paths) > 0
Myle Ott's avatar
Myle Ott committed
93
94
        # find language pair automatically
        if args.source_lang is None or args.target_lang is None:
Naman Goyal's avatar
Naman Goyal committed
95
            args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
Myle Ott's avatar
Myle Ott committed
96
97
98
99
        if args.source_lang is None or args.target_lang is None:
            raise Exception('Could not infer language pair, please provide it explicitly')

        # load dictionaries
Naman Goyal's avatar
Naman Goyal committed
100
101
        src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
        tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
Myle Ott's avatar
Myle Ott committed
102
103
104
105
106
107
108
109
        assert src_dict.pad() == tgt_dict.pad()
        assert src_dict.eos() == tgt_dict.eos()
        assert src_dict.unk() == tgt_dict.unk()
        print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
        print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))

        return cls(args, src_dict, tgt_dict)

Naman Goyal's avatar
Naman Goyal committed
110
    def load_dataset(self, split, epoch=0, combine=False, **kwargs):
Myle Ott's avatar
Myle Ott committed
111
112
113
114
115
        """Load a given dataset split.

        Args:
            split (str): name of the split (e.g., train, valid, test)
        """
Naman Goyal's avatar
Naman Goyal committed
116
117
118
        paths = self.args.data.split(':')
        assert len(paths) > 0
        data_path = paths[epoch % len(paths)]
Myle Ott's avatar
Myle Ott committed
119

Sergey Edunov's avatar
Sergey Edunov committed
120
121
        def split_exists(split, src, tgt, lang, data_path):
            filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
122
            return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl)
Myle Ott's avatar
Myle Ott committed
123

Alexei Baevski's avatar
Alexei Baevski committed
124
125
126
        src_datasets = []
        tgt_datasets = []

Naman Goyal's avatar
Naman Goyal committed
127
128
129
130
131
132
133
134
135
136
137
138
        for k in itertools.count():
            split_k = split + (str(k) if k > 0 else '')

            # infer langcode
            src, tgt = self.args.source_lang, self.args.target_lang
            if split_exists(split_k, src, tgt, src, data_path):
                prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
            elif split_exists(split_k, tgt, src, src, data_path):
                prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
            else:
                if k > 0:
                    break
Alexei Baevski's avatar
Alexei Baevski committed
139
                else:
Naman Goyal's avatar
Naman Goyal committed
140
                    raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
Sergey Edunov's avatar
Sergey Edunov committed
141

142
143
144
145
            src_datasets.append(indexed_dataset.make_dataset(prefix + src, impl=self.args.dataset_impl,
                                                             fix_lua_indexing=True, dictionary=self.src_dict))
            tgt_datasets.append(indexed_dataset.make_dataset(prefix + tgt, impl=self.args.dataset_impl,
                                                             fix_lua_indexing=True, dictionary=self.tgt_dict))
Sergey Edunov's avatar
Sergey Edunov committed
146

Naman Goyal's avatar
Naman Goyal committed
147
            print('| {} {} {} examples'.format(data_path, split_k, len(src_datasets[-1])))
Sergey Edunov's avatar
Sergey Edunov committed
148

Naman Goyal's avatar
Naman Goyal committed
149
150
            if not combine:
                break
Alexei Baevski's avatar
Alexei Baevski committed
151
152
153
154
155
156

        assert len(src_datasets) == len(tgt_datasets)

        if len(src_datasets) == 1:
            src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0]
        else:
Myle Ott's avatar
Myle Ott committed
157
158
159
160
            sample_ratios = [1] * len(src_datasets)
            sample_ratios[0] = self.args.upsample_primary
            src_dataset = ConcatDataset(src_datasets, sample_ratios)
            tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
Alexei Baevski's avatar
Alexei Baevski committed
161

Myle Ott's avatar
Myle Ott committed
162
        self.datasets[split] = LanguagePairDataset(
Myle Ott's avatar
Myle Ott committed
163
164
            src_dataset, src_dataset.sizes, self.src_dict,
            tgt_dataset, tgt_dataset.sizes, self.tgt_dict,
Myle Ott's avatar
Myle Ott committed
165
166
167
168
169
170
            left_pad_source=self.args.left_pad_source,
            left_pad_target=self.args.left_pad_target,
            max_source_positions=self.args.max_source_positions,
            max_target_positions=self.args.max_target_positions,
        )

Myle Ott's avatar
Myle Ott committed
171
172
173
    def build_dataset_for_inference(self, src_tokens, src_lengths):
        return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)

174
    def max_positions(self):
Myle Ott's avatar
Myle Ott committed
175
        """Return the max sentence length allowed by the task."""
176
177
        return (self.args.max_source_positions, self.args.max_target_positions)

Myle Ott's avatar
Myle Ott committed
178
179
    @property
    def source_dictionary(self):
Myle Ott's avatar
Myle Ott committed
180
        """Return the source :class:`~fairseq.data.Dictionary`."""
Myle Ott's avatar
Myle Ott committed
181
182
183
184
        return self.src_dict

    @property
    def target_dictionary(self):
Myle Ott's avatar
Myle Ott committed
185
        """Return the target :class:`~fairseq.data.Dictionary`."""
Myle Ott's avatar
Myle Ott committed
186
        return self.tgt_dict