"src/routes/vscode:/vscode.git/clone" did not exist on "28a3d599553007e42f709b8162e2a3b9f80b284b"
translation.py 8.31 KB
Newer Older
1
# Copyright (c) Facebook, Inc. and its affiliates.
Myle Ott's avatar
Myle Ott committed
2
#
3
4
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
Myle Ott's avatar
Myle Ott committed
5

Alexei Baevski's avatar
Alexei Baevski committed
6
import itertools
Myle Ott's avatar
Myle Ott committed
7
8
import os

9
from fairseq import options, utils
Myle Ott's avatar
Myle Ott committed
10
from fairseq.data import (
Myle Ott's avatar
Myle Ott committed
11
12
    ConcatDataset,
    data_utils,
Myle Ott's avatar
Myle Ott committed
13
    indexed_dataset,
Myle Ott's avatar
Myle Ott committed
14
    LanguagePairDataset,
15
    PrependTokenDataset,
Myle Ott's avatar
Myle Ott committed
16
17
18
19
20
)

from . import FairseqTask, register_task


21
22
23
24
25
def load_langpair_dataset(
    data_path, split,
    src, src_dict,
    tgt, tgt_dict,
    combine, dataset_impl, upsample_primary,
26
27
    left_pad_source, left_pad_target, max_source_positions,
    max_target_positions, prepend_bos=False,
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
):
    def split_exists(split, src, tgt, lang, data_path):
        filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
        return indexed_dataset.dataset_exists(filename, impl=dataset_impl)

    src_datasets = []
    tgt_datasets = []

    for k in itertools.count():
        split_k = split + (str(k) if k > 0 else '')

        # infer langcode
        if split_exists(split_k, src, tgt, src, data_path):
            prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
        elif split_exists(split_k, tgt, src, src, data_path):
            prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
        else:
            if k > 0:
                break
            else:
                raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))

50
51
52
53
54
55
        src_datasets.append(
            data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)
        )
        tgt_datasets.append(
            data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl)
        )
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71

        print('| {} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[-1])))

        if not combine:
            break

    assert len(src_datasets) == len(tgt_datasets)

    if len(src_datasets) == 1:
        src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0]
    else:
        sample_ratios = [1] * len(src_datasets)
        sample_ratios[0] = upsample_primary
        src_dataset = ConcatDataset(src_datasets, sample_ratios)
        tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)

72
73
74
75
76
    if prepend_bos:
        assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
        src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
        tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())

77
78
79
80
81
82
83
84
85
86
    return LanguagePairDataset(
        src_dataset, src_dataset.sizes, src_dict,
        tgt_dataset, tgt_dataset.sizes, tgt_dict,
        left_pad_source=left_pad_source,
        left_pad_target=left_pad_target,
        max_source_positions=max_source_positions,
        max_target_positions=max_target_positions,
    )


Myle Ott's avatar
Myle Ott committed
87
88
@register_task('translation')
class TranslationTask(FairseqTask):
Myle Ott's avatar
Myle Ott committed
89
90
91
92
    """
    Translate from one (source) language to another (target) language.

    Args:
Myle Ott's avatar
Myle Ott committed
93
94
        src_dict (~fairseq.data.Dictionary): dictionary for the source language
        tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
Myle Ott's avatar
Myle Ott committed
95
96
97

    .. note::

Myle Ott's avatar
Myle Ott committed
98
99
        The translation task is compatible with :mod:`fairseq-train`,
        :mod:`fairseq-generate` and :mod:`fairseq-interactive`.
Myle Ott's avatar
Myle Ott committed
100
101
102
103
104
105
106
107

    The translation task provides the following additional command-line
    arguments:

    .. argparse::
        :ref: fairseq.tasks.translation_parser
        :prog:
    """
Myle Ott's avatar
Myle Ott committed
108
109
110
111

    @staticmethod
    def add_args(parser):
        """Add task-specific arguments to the parser."""
112
        # fmt: off
Naman Goyal's avatar
Naman Goyal committed
113
114
        parser.add_argument('data', help='colon separated path to data directories list, \
                            will be iterated upon during epochs in round-robin manner')
Myle Ott's avatar
Myle Ott committed
115
116
117
118
        parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
                            help='source language')
        parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
                            help='target language')
Myle Ott's avatar
Myle Ott committed
119
120
        parser.add_argument('--lazy-load', action='store_true',
                            help='load the dataset lazily')
121
        parser.add_argument('--raw-text', action='store_true',
Myle Ott's avatar
Myle Ott committed
122
123
                            help='load raw text dataset')
        parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
Myle Ott's avatar
Myle Ott committed
124
                            help='pad the source on the left')
Myle Ott's avatar
Myle Ott committed
125
        parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
Myle Ott's avatar
Myle Ott committed
126
                            help='pad the target on the left')
Myle Ott's avatar
Myle Ott committed
127
128
129
130
        parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
                            help='max number of tokens in the source sequence')
        parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
                            help='max number of tokens in the target sequence')
Myle Ott's avatar
Myle Ott committed
131
132
        parser.add_argument('--upsample-primary', default=1, type=int,
                            help='amount to upsample primary dataset')
133
        # fmt: on
Myle Ott's avatar
Myle Ott committed
134
135
136
137
138
139
140
141

    def __init__(self, args, src_dict, tgt_dict):
        super().__init__(args)
        self.src_dict = src_dict
        self.tgt_dict = tgt_dict

    @classmethod
    def setup_task(cls, args, **kwargs):
Myle Ott's avatar
Myle Ott committed
142
143
144
145
146
        """Setup the task (e.g., load dictionaries).

        Args:
            args (argparse.Namespace): parsed command-line arguments
        """
Myle Ott's avatar
Myle Ott committed
147
148
        args.left_pad_source = options.eval_bool(args.left_pad_source)
        args.left_pad_target = options.eval_bool(args.left_pad_target)
149
150
151
152
153
154
        if getattr(args, 'raw_text', False):
            utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')
            args.dataset_impl = 'raw'
        elif getattr(args, 'lazy_load', False):
            utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')
            args.dataset_impl = 'lazy'
Myle Ott's avatar
Myle Ott committed
155

Naman Goyal's avatar
Naman Goyal committed
156
157
        paths = args.data.split(':')
        assert len(paths) > 0
Myle Ott's avatar
Myle Ott committed
158
159
        # find language pair automatically
        if args.source_lang is None or args.target_lang is None:
Naman Goyal's avatar
Naman Goyal committed
160
            args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
Myle Ott's avatar
Myle Ott committed
161
162
163
164
        if args.source_lang is None or args.target_lang is None:
            raise Exception('Could not infer language pair, please provide it explicitly')

        # load dictionaries
Naman Goyal's avatar
Naman Goyal committed
165
166
        src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
        tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
Myle Ott's avatar
Myle Ott committed
167
168
169
170
171
172
173
174
        assert src_dict.pad() == tgt_dict.pad()
        assert src_dict.eos() == tgt_dict.eos()
        assert src_dict.unk() == tgt_dict.unk()
        print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
        print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))

        return cls(args, src_dict, tgt_dict)

Naman Goyal's avatar
Naman Goyal committed
175
    def load_dataset(self, split, epoch=0, combine=False, **kwargs):
Myle Ott's avatar
Myle Ott committed
176
177
178
179
180
        """Load a given dataset split.

        Args:
            split (str): name of the split (e.g., train, valid, test)
        """
Naman Goyal's avatar
Naman Goyal committed
181
182
183
        paths = self.args.data.split(':')
        assert len(paths) > 0
        data_path = paths[epoch % len(paths)]
Myle Ott's avatar
Myle Ott committed
184

185
186
        # infer langcode
        src, tgt = self.args.source_lang, self.args.target_lang
Myle Ott's avatar
Myle Ott committed
187

188
189
190
191
        self.datasets[split] = load_langpair_dataset(
            data_path, split, src, self.src_dict, tgt, self.tgt_dict,
            combine=combine, dataset_impl=self.args.dataset_impl,
            upsample_primary=self.args.upsample_primary,
Myle Ott's avatar
Myle Ott committed
192
193
194
195
196
197
            left_pad_source=self.args.left_pad_source,
            left_pad_target=self.args.left_pad_target,
            max_source_positions=self.args.max_source_positions,
            max_target_positions=self.args.max_target_positions,
        )

Myle Ott's avatar
Myle Ott committed
198
199
200
    def build_dataset_for_inference(self, src_tokens, src_lengths):
        return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)

201
    def max_positions(self):
Myle Ott's avatar
Myle Ott committed
202
        """Return the max sentence length allowed by the task."""
203
204
        return (self.args.max_source_positions, self.args.max_target_positions)

Myle Ott's avatar
Myle Ott committed
205
206
    @property
    def source_dictionary(self):
Myle Ott's avatar
Myle Ott committed
207
        """Return the source :class:`~fairseq.data.Dictionary`."""
Myle Ott's avatar
Myle Ott committed
208
209
210
211
        return self.src_dict

    @property
    def target_dictionary(self):
Myle Ott's avatar
Myle Ott committed
212
        """Return the target :class:`~fairseq.data.Dictionary`."""
Myle Ott's avatar
Myle Ott committed
213
        return self.tgt_dict