"vscode:/vscode.git/clone" did not exist on "3ef01faef2492b3e650f44ecc510f3a8f2426783"
preprocess.py 9.17 KB
Newer Older
Louis Martin's avatar
Louis Martin committed
1
#!/usr/bin/env python3
Sergey Edunov's avatar
Sergey Edunov committed
2
3
4
5
6
7
8
9
10
11
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#

import argparse
from itertools import zip_longest
12
13
import os
import shutil
Sergey Edunov's avatar
Sergey Edunov committed
14
15

from fairseq import dictionary, indexed_dataset
Myle Ott's avatar
Myle Ott committed
16
from fairseq.tokenizer import Tokenizer, tokenize_line
Sergey Edunov's avatar
Sergey Edunov committed
17
18


Myle Ott's avatar
Myle Ott committed
19
def get_parser():
Sergey Edunov's avatar
Sergey Edunov committed
20
21
22
23
    parser = argparse.ArgumentParser(
        description='Data pre-processing: Create dictionary and store data in binary format')
    parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
    parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
24
25
26
    parser.add_argument('--trainpref', metavar='FP', default=None, help='target language')
    parser.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid language prefixes')
    parser.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test language prefixes')
Sergey Edunov's avatar
Sergey Edunov committed
27
28
29
30
31
    parser.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir')
    parser.add_argument('--thresholdtgt', metavar='N', default=0, type=int,
                        help='map words appearing less than threshold times to unknown')
    parser.add_argument('--thresholdsrc', metavar='N', default=0, type=int,
                        help='map words appearing less than threshold times to unknown')
32
33
    parser.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')
    parser.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')
Sergey Edunov's avatar
Sergey Edunov committed
34
35
36
    parser.add_argument('--nwordstgt', metavar='N', default=-1, type=int, help='number of target words to retain')
    parser.add_argument('--nwordssrc', metavar='N', default=-1, type=int, help='number of source words to retain')
    parser.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)')
37
38
    parser.add_argument('--output-format', metavar='FORMAT', default='binary', choices=['binary', 'raw'],
                        help='output format (optional)')
Myle Ott's avatar
Myle Ott committed
39
    parser.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary')
40
    parser.add_argument('--only-source', action='store_true', help='Only process the source language')
41
    parser.add_argument('--padding-factor', metavar='N', default=8, help='Pad dictionary size to be multiple of N')
Myle Ott's avatar
Myle Ott committed
42
    return parser
Sergey Edunov's avatar
Sergey Edunov committed
43

Myle Ott's avatar
Myle Ott committed
44

Myle Ott's avatar
Myle Ott committed
45
def main(args):
Sergey Edunov's avatar
Sergey Edunov committed
46
47
    print(args)
    os.makedirs(args.destdir, exist_ok=True)
48
    target = not args.only_source
Sergey Edunov's avatar
Sergey Edunov committed
49

50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
    def pad_dictionary(d):
        """Pad dictionary to be a multiple of args.padding_factor.

        Keeping the dictionary size a multiple of 8 improves performance on some
        architectures, e.g., Nvidia Tensor Cores.
        """
        if args.padding_factor > 1:
            i = 0
            while len(d) % args.padding_factor != 0:
                d.add_symbol('madeupword{:04d}'.format(i))
                i += 1
            assert len(d) % args.padding_factor == 0

    def build_dictionary(filenames):
        d = dictionary.Dictionary()
        for filename in filenames:
            Tokenizer.add_file_to_dictionary(filename, d, tokenize_line)
        pad_dictionary(d)
        d.finalize()
        return d

Myle Ott's avatar
Myle Ott committed
71
72
73
    if args.joined_dictionary:
        assert not args.srcdict, 'cannot combine --srcdict and --joined-dictionary'
        assert not args.tgtdict, 'cannot combine --tgtdict and --joined-dictionary'
74
75
76
77
        src_dict = build_dictionary([
            '{}.{}'.format(args.trainpref, lang)
            for lang in [args.source_lang, args.target_lang]
        ])
Myle Ott's avatar
Myle Ott committed
78
        tgt_dict = src_dict
79
    else:
Myle Ott's avatar
Myle Ott committed
80
81
82
        if args.srcdict:
            src_dict = dictionary.Dictionary.load(args.srcdict)
        else:
83
            assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
84
            src_dict = build_dictionary(['{}.{}'.format(args.trainpref, args.source_lang)])
85
86
87
88
89
        if target:
            if args.tgtdict:
                tgt_dict = dictionary.Dictionary.load(args.tgtdict)
            else:
                assert args.trainpref, "--trainpref must be set if --tgtdict is not specified"
90
                tgt_dict = build_dictionary(['{}.{}'.format(args.trainpref, args.target_lang)])
Myle Ott's avatar
Myle Ott committed
91

Sergey Edunov's avatar
Sergey Edunov committed
92
93
    src_dict.save(os.path.join(args.destdir, 'dict.{}.txt'.format(args.source_lang)),
                  threshold=args.thresholdsrc, nwords=args.nwordssrc)
94
95
96
    if target:
        tgt_dict.save(os.path.join(args.destdir, 'dict.{}.txt'.format(args.target_lang)),
                      threshold=args.thresholdtgt, nwords=args.nwordstgt)
Sergey Edunov's avatar
Sergey Edunov committed
97

98
    def make_binary_dataset(input_prefix, output_prefix, lang):
Sergey Edunov's avatar
Sergey Edunov committed
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
        dict = dictionary.Dictionary.load(os.path.join(args.destdir, 'dict.{}.txt'.format(lang)))
        print('| [{}] Dictionary: {} types'.format(lang, len(dict) - 1))

        ds = indexed_dataset.IndexedDatasetBuilder(
            '{}/{}.{}-{}.{}.bin'.format(args.destdir, output_prefix, args.source_lang,
                                        args.target_lang, lang)
        )

        def consumer(tensor):
            ds.add_item(tensor)

        input_file = '{}.{}'.format(input_prefix, lang)
        res = Tokenizer.binarize(input_file, dict, consumer)
        print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(
            lang, input_file, res['nseq'], res['ntok'],
            100 * res['nunk'] / res['ntok'], dict.unk_word))
        ds.finalize('{}/{}.{}-{}.{}.idx'.format(
            args.destdir, output_prefix,
            args.source_lang, args.target_lang, lang))

119
120
121
122
123
    def make_dataset(input_prefix, output_prefix, lang, output_format='binary'):
        if output_format == 'binary':
            make_binary_dataset(input_prefix, output_prefix, lang)
        elif output_format == 'raw':
            # Copy original text file to destination folder
124
            output_text_file = os.path.join(args.destdir, '{}.{}'.format(output_prefix, lang))
125
126
            shutil.copyfile('{}.{}'.format(input_prefix, lang), output_text_file)

127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
    def make_all(args, make_dataset, lang):
        if args.trainpref:
            make_dataset(args.trainpref, 'train', lang, args.output_format)
        if args.validpref:
            for k, validpref in enumerate(args.validpref.split(',')):
                outprefix = 'valid{}'.format(k) if k > 0 else 'valid'
                make_dataset(validpref, outprefix, lang, args.output_format)
        if args.testpref:
            for k, testpref in enumerate(args.testpref.split(',')):
                outprefix = 'test{}'.format(k) if k > 0 else 'test'
                make_dataset(testpref, outprefix, lang, args.output_format)

    make_all(args, make_dataset, args.source_lang)
    if target:
        make_all(args, make_dataset, args.target_lang)

Sergey Edunov's avatar
Sergey Edunov committed
143
144
145
    print('| Wrote preprocessed data to {}'.format(args.destdir))

    if args.alignfile:
146
        assert args.trainpref, "--trainpref must be set if --alignfile is specified"
Sergey Edunov's avatar
Sergey Edunov committed
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
        src_file_name = '{}.{}'.format(args.trainpref, args.source_lang)
        tgt_file_name = '{}.{}'.format(args.trainpref, args.target_lang)
        src_dict = dictionary.Dictionary.load(os.path.join(args.destdir, 'dict.{}.txt'.format(args.source_lang)))
        tgt_dict = dictionary.Dictionary.load(os.path.join(args.destdir, 'dict.{}.txt'.format(args.target_lang)))
        freq_map = {}
        with open(args.alignfile, 'r') as align_file:
            with open(src_file_name, 'r') as src_file:
                with open(tgt_file_name, 'r') as tgt_file:
                    for a, s, t in zip_longest(align_file, src_file, tgt_file):
                        si = Tokenizer.tokenize(s, src_dict, add_if_not_exist=False)
                        ti = Tokenizer.tokenize(t, tgt_dict, add_if_not_exist=False)
                        ai = list(map(lambda x: tuple(x.split('-')), a.split()))
                        for sai, tai in ai:
                            srcidx = si[int(sai)]
                            tgtidx = ti[int(tai)]
                            if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
                                assert srcidx != src_dict.pad()
                                assert srcidx != src_dict.eos()
                                assert tgtidx != tgt_dict.pad()
                                assert tgtidx != tgt_dict.eos()

                                if srcidx not in freq_map:
                                    freq_map[srcidx] = {}
                                if tgtidx not in freq_map[srcidx]:
                                    freq_map[srcidx][tgtidx] = 1
                                else:
                                    freq_map[srcidx][tgtidx] += 1

        align_dict = {}
        for srcidx in freq_map.keys():
            align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)

        with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(
                args.source_lang, args.target_lang)), 'w') as f:
            for k, v in align_dict.items():
                print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)


if __name__ == '__main__':
Myle Ott's avatar
Myle Ott committed
186
187
188
    parser = get_parser()
    args = parser.parse_args()
    main(args)