"comfy/text_encoders/sd2_clip.py" did not exist on "f87ec10a97664c4a8e00d856c4c48836cfbfcbdf"
run_xnli.py 15.7 KB
Newer Older
1
#!/usr/bin/env python
VictorSanh's avatar
VictorSanh committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
17
""" Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM).
18
    Adapted from `examples/text-classification/run_glue.py`"""
VictorSanh's avatar
VictorSanh committed
19
20
21
22

import logging
import os
import random
23
24
25
import sys
from dataclasses import dataclass, field
from typing import Optional
VictorSanh's avatar
VictorSanh committed
26
27

import numpy as np
28
from datasets import load_dataset, load_metric
VictorSanh's avatar
VictorSanh committed
29

30
import transformers
31
from transformers import (
32
33
34
    AutoConfig,
    AutoModelForSequenceClassification,
    AutoTokenizer,
35
36
37
38
39
40
41
    DataCollatorWithPadding,
    EvalPrediction,
    HfArgumentParser,
    Trainer,
    TrainingArguments,
    default_data_collator,
    set_seed,
42
)
43
from transformers.trainer_utils import get_last_checkpoint
44
from transformers.utils import check_min_version
45
from transformers.utils.versions import require_version
Aymeric Augustin's avatar
Aymeric Augustin committed
46

VictorSanh's avatar
VictorSanh committed
47

48
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
Sylvain Gugger's avatar
Sylvain Gugger committed
49
check_min_version("4.9.0.dev0")
Lysandre's avatar
Lysandre committed
50

51
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
52

VictorSanh's avatar
VictorSanh committed
53
54
55
logger = logging.getLogger(__name__)


56
57
58
59
@dataclass
class DataTrainingArguments:
    """
    Arguments pertaining to what data we are going to input our model for training and eval.
VictorSanh's avatar
VictorSanh committed
60

61
62
63
64
    Using `HfArgumentParser` we can turn this class
    into argparse arguments to be able to specify them on
    the command line.
    """
VictorSanh's avatar
VictorSanh committed
65

66
67
68
69
70
    max_seq_length: Optional[int] = field(
        default=128,
        metadata={
            "help": "The maximum total input sequence length after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded."
71
72
        },
    )
73
74
    overwrite_cache: bool = field(
        default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
75
    )
76
77
78
79
80
81
    pad_to_max_length: bool = field(
        default=True,
        metadata={
            "help": "Whether to pad all samples to `max_seq_length`. "
            "If False, will pad the samples dynamically when batching to the maximum length in the batch."
        },
82
    )
83
84
85
86
87
88
89
    max_train_samples: Optional[int] = field(
        default=None,
        metadata={
            "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
            "value if set."
        },
    )
90
    max_eval_samples: Optional[int] = field(
91
92
        default=None,
        metadata={
93
            "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
94
95
96
            "value if set."
        },
    )
97
    max_predict_samples: Optional[int] = field(
98
99
        default=None,
        metadata={
100
            "help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
101
102
103
            "value if set."
        },
    )
104
105
    server_ip: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
    server_port: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
VictorSanh's avatar
VictorSanh committed
106
107


108
109
110
111
112
@dataclass
class ModelArguments:
    """
    Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
    """
VictorSanh's avatar
VictorSanh committed
113

114
115
    model_name_or_path: str = field(
        default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
116
    )
117
118
    language: str = field(
        default=None, metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}
119
    )
120
121
    train_language: Optional[str] = field(
        default=None, metadata={"help": "Train language if it is different from the evaluation language."}
122
    )
123
124
    config_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
125
    )
126
127
    tokenizer_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
128
    )
129
    cache_dir: Optional[str] = field(
130
        default=None,
131
        metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
132
    )
133
134
135
    do_lower_case: Optional[bool] = field(
        default=False,
        metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
136
    )
137
138
139
    use_fast_tokenizer: bool = field(
        default=True,
        metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
140
    )
141
142
143
    model_revision: str = field(
        default="main",
        metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
144
    )
145
146
147
148
149
150
    use_auth_token: bool = field(
        default=False,
        metadata={
            "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
            "with private models)."
        },
151
152
    )

153
154
155
156
157
158
159
160
161

def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

VictorSanh's avatar
VictorSanh committed
162
    # Setup distant debugging if needed
163
    if data_args.server_ip and data_args.server_port:
VictorSanh's avatar
VictorSanh committed
164
165
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
166

VictorSanh's avatar
VictorSanh committed
167
        print("Waiting for debugger attach")
168
        ptvsd.enable_attach(address=(data_args.server_ip, data_args.server_port), redirect_output=True)
VictorSanh's avatar
VictorSanh committed
169
170
171
        ptvsd.wait_for_attach()

    # Setup logging
172
    logging.basicConfig(
173
        format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
174
        datefmt="%m/%d/%Y %H:%M:%S",
175
        handlers=[logging.StreamHandler(sys.stdout)],
176
    )
177
    logger.setLevel(logging.INFO if training_args.should_log else logging.WARN)
178
179

    # Log on each process the small summary:
180
    logger.warning(
181
182
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
        + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
183
    )
184

185
    # Set the verbosity to info of the Transformers logger (on main process only):
186
    if training_args.should_log:
187
188
189
        transformers.utils.logging.set_verbosity_info()
        transformers.utils.logging.enable_default_handler()
        transformers.utils.logging.enable_explicit_format()
190
191
    logger.info(f"Training/evaluation parameters {training_args}")

192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
    # Detecting last checkpoint.
    last_checkpoint = None
    if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome."
            )
        elif last_checkpoint is not None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
            )

207
208
209
210
211
212
    # Set seed before initializing model.
    set_seed(training_args.seed)

    # In distributed training, the load_dataset function guarantees that only one local process can concurrently
    # download the dataset.
    # Downloading and loading xnli dataset from the hub.
213
214
    if training_args.do_train:
        if model_args.train_language is None:
215
            train_dataset = load_dataset("xnli", model_args.language, split="train", cache_dir=model_args.cache_dir)
216
        else:
217
218
219
            train_dataset = load_dataset(
                "xnli", model_args.train_language, split="train", cache_dir=model_args.cache_dir
            )
220
221
222
        label_list = train_dataset.features["label"].names

    if training_args.do_eval:
223
        eval_dataset = load_dataset("xnli", model_args.language, split="validation", cache_dir=model_args.cache_dir)
224
225
226
        label_list = eval_dataset.features["label"].names

    if training_args.do_predict:
227
228
        predict_dataset = load_dataset("xnli", model_args.language, split="test", cache_dir=model_args.cache_dir)
        label_list = predict_dataset.features["label"].names
229
230

    # Labels
VictorSanh's avatar
VictorSanh committed
231
232
233
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
234
235
    # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.
236
    config = AutoConfig.from_pretrained(
237
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
238
        num_labels=num_labels,
239
240
241
242
        finetuning_task="xnli",
        cache_dir=model_args.cache_dir,
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
243
    )
244
    tokenizer = AutoTokenizer.from_pretrained(
245
246
247
248
249
250
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        do_lower_case=model_args.do_lower_case,
        cache_dir=model_args.cache_dir,
        use_fast=model_args.use_fast_tokenizer,
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
251
    )
252
    model = AutoModelForSequenceClassification.from_pretrained(
253
254
        model_args.model_name_or_path,
        from_tf=bool(".ckpt" in model_args.model_name_or_path),
255
        config=config,
256
257
258
        cache_dir=model_args.cache_dir,
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
259
    )
VictorSanh's avatar
VictorSanh committed
260

261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
    # Preprocessing the datasets
    # Padding strategy
    if data_args.pad_to_max_length:
        padding = "max_length"
    else:
        # We will pad later, dynamically at batch creation, to the max sequence length in each batch
        padding = False

    def preprocess_function(examples):
        # Tokenize the texts
        return tokenizer(
            examples["premise"],
            examples["hypothesis"],
            padding=padding,
            max_length=data_args.max_seq_length,
            truncation=True,
        )
VictorSanh's avatar
VictorSanh committed
278

279
280
281
282
283
284
285
    if training_args.do_train:
        if data_args.max_train_samples is not None:
            train_dataset = train_dataset.select(range(data_args.max_train_samples))
        train_dataset = train_dataset.map(
            preprocess_function,
            batched=True,
            load_from_cache_file=not data_args.overwrite_cache,
286
            desc="Running tokenizer on train dataset",
287
        )
288
289
290
        # Log a few random samples from the training set:
        for index in random.sample(range(len(train_dataset)), 3):
            logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
291
292

    if training_args.do_eval:
293
294
        if data_args.max_eval_samples is not None:
            eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
295
296
297
298
        eval_dataset = eval_dataset.map(
            preprocess_function,
            batched=True,
            load_from_cache_file=not data_args.overwrite_cache,
299
            desc="Running tokenizer on validation dataset",
300
        )
VictorSanh's avatar
VictorSanh committed
301

302
    if training_args.do_predict:
303
304
305
        if data_args.max_predict_samples is not None:
            predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
        predict_dataset = predict_dataset.map(
306
307
308
            preprocess_function,
            batched=True,
            load_from_cache_file=not data_args.overwrite_cache,
309
            desc="Running tokenizer on prediction dataset",
310
        )
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333

    # Get the metric function
    metric = load_metric("xnli")

    # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
    # predictions and label_ids field) and has to return a dictionary string to float.
    def compute_metrics(p: EvalPrediction):
        preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
        preds = np.argmax(preds, axis=1)
        return metric.compute(predictions=preds, references=p.label_ids)

    # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
    if data_args.pad_to_max_length:
        data_collator = default_data_collator
    elif training_args.fp16:
        data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
    else:
        data_collator = None

    # Initialize our Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
334
        train_dataset=train_dataset if training_args.do_train else None,
335
336
337
338
339
        eval_dataset=eval_dataset if training_args.do_eval else None,
        compute_metrics=compute_metrics,
        tokenizer=tokenizer,
        data_collator=data_collator,
    )
VictorSanh's avatar
VictorSanh committed
340
341

    # Training
342
    if training_args.do_train:
343
        checkpoint = None
344
345
346
        if training_args.resume_from_checkpoint is not None:
            checkpoint = training_args.resume_from_checkpoint
        elif last_checkpoint is not None:
347
348
            checkpoint = last_checkpoint
        train_result = trainer.train(resume_from_checkpoint=checkpoint)
349
        metrics = train_result.metrics
350
351
352
353
        max_train_samples = (
            data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
        )
        metrics["train_samples"] = min(max_train_samples, len(train_dataset))
VictorSanh's avatar
VictorSanh committed
354

355
        trainer.save_model()  # Saves the tokenizer too for easy upload
356

357
358
359
        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)
        trainer.save_state()
VictorSanh's avatar
VictorSanh committed
360

361
362
363
    # Evaluation
    if training_args.do_eval:
        logger.info("*** Evaluate ***")
364
365
        metrics = trainer.evaluate(eval_dataset=eval_dataset)

366
367
        max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
        metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
368

369
370
        trainer.log_metrics("eval", metrics)
        trainer.save_metrics("eval", metrics)
VictorSanh's avatar
VictorSanh committed
371

372
373
374
    # Prediction
    if training_args.do_predict:
        logger.info("*** Predict ***")
375
        predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict")
376

377
378
379
380
        max_predict_samples = (
            data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
        )
        metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
381

382
383
        trainer.log_metrics("predict", metrics)
        trainer.save_metrics("predict", metrics)
384
385

        predictions = np.argmax(predictions, axis=1)
386
        output_predict_file = os.path.join(training_args.output_dir, "predictions.txt")
387
        if trainer.is_world_process_zero():
388
            with open(output_predict_file, "w") as writer:
389
390
391
392
393
                writer.write("index\tprediction\n")
                for index, item in enumerate(predictions):
                    item = label_list[item]
                    writer.write(f"{index}\t{item}\n")

VictorSanh's avatar
VictorSanh committed
394
395
396

if __name__ == "__main__":
    main()