".github/vscode:/vscode.git/clone" did not exist on "bf2c36a92091a5aa28c34fc944d020a76a92a922"
integrations.py 24.3 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integrations with other Python libraries.
"""
17
import importlib.util
18
import math
19
import numbers
20
import os
21
22
23
import re
import tempfile
from pathlib import Path
24

25
26
27
28
29
from .utils import logging


logger = logging.get_logger(__name__)

Sylvain Gugger's avatar
Sylvain Gugger committed
30

31
# comet_ml requires to be imported before any ML frameworks
32
_has_comet = importlib.util.find_spec("comet_ml") is not None and os.getenv("COMET_MODE", "").upper() != "DISABLED"
33
if _has_comet:
34
    try:
35
        import comet_ml  # noqa: F401
36

37
38
39
40
41
42
43
44
        if hasattr(comet_ml, "config") and comet_ml.config.get_config("comet.api_key"):
            _has_comet = True
        else:
            if os.getenv("COMET_MODE", "").upper() != "DISABLED":
                logger.warning("comet_ml is installed but `COMET_API_KEY` is not set.")
            _has_comet = False
    except (ImportError, ValueError):
        _has_comet = False
45

46

47
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available  # noqa: E402
48
from .trainer_callback import TrainerCallback  # noqa: E402
49
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, EvaluationStrategy  # noqa: E402
50
51


Sylvain Gugger's avatar
Sylvain Gugger committed
52
# Integration functions:
53
def is_wandb_available():
54
55
56
    if os.getenv("WANDB_DISABLED"):
        return False
    return importlib.util.find_spec("wandb") is not None
57
58
59
60
61
62
63


def is_comet_available():
    return _has_comet


def is_tensorboard_available():
64
    return importlib.util.find_spec("tensorboard") is not None or importlib.util.find_spec("tensorboardX") is not None
65
66
67


def is_optuna_available():
68
    return importlib.util.find_spec("optuna") is not None
69
70
71


def is_ray_available():
72
    return importlib.util.find_spec("ray") is not None
73
74


75
def is_ray_tune_available():
76
77
78
    if not is_ray_available():
        return False
    return importlib.util.find_spec("ray.tune") is not None
79
80


81
def is_azureml_available():
82
83
84
85
86
    if importlib.util.find_spec("azureml") is None:
        return False
    if importlib.util.find_spec("azureml.core") is None:
        return False
    return importlib.util.find_spec("azureml.core.run") is not None
87
88


89
def is_mlflow_available():
90
    return importlib.util.find_spec("mlflow") is not None
91
92


93
def is_fairscale_available():
94
    return importlib.util.find_spec("fairscale") is not None
95
96


97
98
def hp_params(trial):
    if is_optuna_available():
99
100
        import optuna

101
102
        if isinstance(trial, optuna.Trial):
            return trial.params
103
    if is_ray_tune_available():
104
105
106
107
108
109
        if isinstance(trial, dict):
            return trial

    raise RuntimeError(f"Unknown type for trial {trial.__class__}")


110
111
112
def default_hp_search_backend():
    if is_optuna_available():
        return "optuna"
113
    elif is_ray_tune_available():
114
        return "ray"
115
116


117
def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
118
119
    import optuna

120
121
122
123
124
125
126
127
128
129
130
131
132
133
    def _objective(trial, checkpoint_dir=None):
        model_path = None
        if checkpoint_dir:
            for subdir in os.listdir(checkpoint_dir):
                if subdir.startswith(PREFIX_CHECKPOINT_DIR):
                    model_path = os.path.join(checkpoint_dir, subdir)
        trainer.objective = None
        trainer.train(model_path=model_path, trial=trial)
        # If there hasn't been any evaluation during the training loop.
        if getattr(trainer, "objective", None) is None:
            metrics = trainer.evaluate()
            trainer.objective = trainer.compute_objective(metrics)
        return trainer.objective

134
135
136
137
138
139
140
141
142
    timeout = kwargs.pop("timeout", None)
    n_jobs = kwargs.pop("n_jobs", 1)
    study = optuna.create_study(direction=direction, **kwargs)
    study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs)
    best_trial = study.best_trial
    return BestRun(str(best_trial.number), best_trial.value, best_trial.params)


def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
143
144
    import ray

145
146
147
148
149
150
151
152
153
154
155
156
157
    def _objective(trial, checkpoint_dir=None):
        model_path = None
        if checkpoint_dir:
            for subdir in os.listdir(checkpoint_dir):
                if subdir.startswith(PREFIX_CHECKPOINT_DIR):
                    model_path = os.path.join(checkpoint_dir, subdir)
        trainer.objective = None
        trainer.train(model_path=model_path, trial=trial)
        # If there hasn't been any evaluation during the training loop.
        if getattr(trainer, "objective", None) is None:
            metrics = trainer.evaluate()
            trainer.objective = trainer.compute_objective(metrics)
            trainer._tune_save_checkpoint()
158
            ray.tune.report(objective=trainer.objective, **metrics, done=True)
159
160
161

    # The model and TensorBoard writer do not pickle so we have to remove them (if they exists)
    # while doing the ray hp search.
Sylvain Gugger's avatar
Sylvain Gugger committed
162
163

    _tb_writer = trainer.pop_callback(TensorBoardCallback)
164
165
166
167
168
169
170
171
    trainer.model = None
    # Setup default `resources_per_trial` and `reporter`.
    if "resources_per_trial" not in kwargs and trainer.args.n_gpu > 0:
        # `args.n_gpu` is considered the total number of GPUs that will be split
        # among the `n_jobs`
        n_jobs = int(kwargs.pop("n_jobs", 1))
        num_gpus_per_trial = trainer.args.n_gpu
        if num_gpus_per_trial / n_jobs >= 1:
172
            num_gpus_per_trial = int(math.ceil(num_gpus_per_trial / n_jobs))
173
174
        kwargs["resources_per_trial"] = {"gpu": num_gpus_per_trial}

175
    if "progress_reporter" not in kwargs:
176
177
178
179
180
181
182
183
184
185
        from ray.tune import CLIReporter

        kwargs["progress_reporter"] = CLIReporter(metric_columns=["objective"])
    if "keep_checkpoints_num" in kwargs and kwargs["keep_checkpoints_num"] > 0:
        # `keep_checkpoints_num=0` would disabled checkpointing
        trainer.use_tune_checkpoints = True
        if kwargs["keep_checkpoints_num"] > 1:
            logger.warning(
                "Currently keeping {} checkpoints for each trial. Checkpoints are usually huge, "
                "consider setting `keep_checkpoints_num=1`."
186
            )
187
188
    if "scheduler" in kwargs:
        from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining
189

190
191
192
193
194
195
196
197
        # Check if checkpointing is enabled for PopulationBasedTraining
        if isinstance(kwargs["scheduler"], PopulationBasedTraining):
            if not trainer.use_tune_checkpoints:
                logger.warning(
                    "You are using PopulationBasedTraining but you haven't enabled checkpointing. "
                    "This means your trials will train from scratch everytime they are exploiting "
                    "new configurations. Consider enabling checkpointing by passing "
                    "`keep_checkpoints_num=1` as an additional argument to `Trainer.hyperparameter_search`."
198
199
                )

200
201
202
        # Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting.
        if isinstance(
            kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining)
203
        ) and (not trainer.args.do_eval or trainer.args.evaluation_strategy == EvaluationStrategy.NO):
204
205
206
207
208
            raise RuntimeError(
                "You are using {cls} as a scheduler but you haven't enabled evaluation during training. "
                "This means your trials will not report intermediate results to Ray Tune, and "
                "can thus not be stopped early or used to exploit other trials parameters. "
                "If this is what you want, do not use {cls}. If you would like to use {cls}, "
209
                "make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the "
210
211
                "Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__)
            )
212

213
214
215
    analysis = ray.tune.run(_objective, config=trainer.hp_space(None), num_samples=n_trials, **kwargs)
    best_trial = analysis.get_best_trial(metric="objective", mode=direction[:3])
    best_run = BestRun(best_trial.trial_id, best_trial.last_result["objective"], best_trial.config)
Sylvain Gugger's avatar
Sylvain Gugger committed
216
217
    if _tb_writer is not None:
        trainer.add_callback(_tb_writer)
218
    return best_run
Sylvain Gugger's avatar
Sylvain Gugger committed
219
220


221
222
223
224
225
226
227
228
229
230
231
232
def rewrite_logs(d):
    new_d = {}
    eval_prefix = "eval_"
    eval_prefix_len = len(eval_prefix)
    for k, v in d.items():
        if k.startswith(eval_prefix):
            new_d["eval/" + k[eval_prefix_len:]] = v
        else:
            new_d["train/" + k] = v
    return new_d


Sylvain Gugger's avatar
Sylvain Gugger committed
233
234
235
236
237
238
239
class TensorBoardCallback(TrainerCallback):
    """
    A :class:`~transformers.TrainerCallback` that sends the logs to `TensorBoard
    <https://www.tensorflow.org/tensorboard>`__.

    Args:
        tb_writer (:obj:`SummaryWriter`, `optional`):
Tiger's avatar
Tiger committed
240
            The writer to use. Will instantiate one if not set.
Sylvain Gugger's avatar
Sylvain Gugger committed
241
242
243
    """

    def __init__(self, tb_writer=None):
244
        has_tensorboard = is_tensorboard_available()
Sylvain Gugger's avatar
Sylvain Gugger committed
245
        assert (
246
            has_tensorboard
Sylvain Gugger's avatar
Sylvain Gugger committed
247
        ), "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or install tensorboardX."
248
249
250
251
252
253
254
255
256
257
258
259
        if has_tensorboard:
            try:
                from torch.utils.tensorboard import SummaryWriter  # noqa: F401

                self._SummaryWriter = SummaryWriter
            except ImportError:
                try:
                    from tensorboardX import SummaryWriter

                    self._SummaryWriter = SummaryWriter
                except ImportError:
                    self._SummaryWriter = None
Sylvain Gugger's avatar
Sylvain Gugger committed
260
        self.tb_writer = tb_writer
261
        self._SummaryWriter = SummaryWriter
Sylvain Gugger's avatar
Sylvain Gugger committed
262

263
264
    def _init_summary_writer(self, args, log_dir=None):
        log_dir = log_dir or args.logging_dir
265
266
        if self._SummaryWriter is not None:
            self.tb_writer = self._SummaryWriter(log_dir=log_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
267
268

    def on_train_begin(self, args, state, control, **kwargs):
269
270
271
272
273
274
275
276
277
278
279
280
        if not state.is_world_process_zero:
            return

        log_dir = None

        if state.is_hyper_param_search:
            trial_name = state.trial_name
            if trial_name is not None:
                log_dir = os.path.join(args.logging_dir, trial_name)

        self._init_summary_writer(args, log_dir)

Sylvain Gugger's avatar
Sylvain Gugger committed
281
282
        if self.tb_writer is not None:
            self.tb_writer.add_text("args", args.to_json_string())
283
284
285
286
287
            if "model" in kwargs:
                model = kwargs["model"]
                if hasattr(model, "config") and model.config is not None:
                    model_config_json = model.config.to_json_string()
                    self.tb_writer.add_text("model_config", model_config_json)
288
289
290
            # Version of TensorBoard coming from tensorboardX does not have this method.
            if hasattr(self.tb_writer, "add_hparams"):
                self.tb_writer.add_hparams(args.to_sanitized_dict(), metric_dict={})
Sylvain Gugger's avatar
Sylvain Gugger committed
291
292

    def on_log(self, args, state, control, logs=None, **kwargs):
293
294
295
296
        if state.is_world_process_zero:
            if self.tb_writer is None:
                self._init_summary_writer(args)

297
        if self.tb_writer is not None:
298
            logs = rewrite_logs(logs)
Sylvain Gugger's avatar
Sylvain Gugger committed
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
            for k, v in logs.items():
                if isinstance(v, (int, float)):
                    self.tb_writer.add_scalar(k, v, state.global_step)
                else:
                    logger.warning(
                        "Trainer is attempting to log a value of "
                        '"%s" of type %s for key "%s" as a scalar. '
                        "This invocation of Tensorboard's writer.add_scalar() "
                        "is incorrect so we dropped this attribute.",
                        v,
                        type(v),
                        k,
                    )
            self.tb_writer.flush()

    def on_train_end(self, args, state, control, **kwargs):
        if self.tb_writer:
            self.tb_writer.close()


class WandbCallback(TrainerCallback):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
321
    A :class:`~transformers.TrainerCallback` that sends the logs to `Weight and Biases <https://www.wandb.com/>`__.
Sylvain Gugger's avatar
Sylvain Gugger committed
322
323
324
    """

    def __init__(self):
325
326
327
328
329
330
331
332
333
334
335
336
        has_wandb = is_wandb_available()
        assert has_wandb, "WandbCallback requires wandb to be installed. Run `pip install wandb`."
        if has_wandb:
            import wandb

            wandb.ensure_configured()
            if wandb.api.api_key is None:
                has_wandb = False
                logger.warning(
                    "W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable."
                )
                self._wandb = None
Boris Dayma's avatar
Boris Dayma committed
337
338
            else:
                self._wandb = wandb
Sylvain Gugger's avatar
Sylvain Gugger committed
339
340
        self._initialized = False

341
    def setup(self, args, state, model, reinit, **kwargs):
Sylvain Gugger's avatar
Sylvain Gugger committed
342
343
344
        """
        Setup the optional Weights & Biases (`wandb`) integration.

Sylvain Gugger's avatar
Sylvain Gugger committed
345
346
        One can subclass and override this method to customize the setup if needed. Find more information `here
        <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Sylvain Gugger's avatar
Sylvain Gugger committed
347
348

        Environment:
349
350
            WANDB_LOG_MODEL (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to log model as artifact at the end of training.
Sylvain Gugger's avatar
Sylvain Gugger committed
351
352
353
354
355
356
357
358
            WANDB_WATCH (:obj:`str`, `optional` defaults to :obj:`"gradients"`):
                Can be :obj:`"gradients"`, :obj:`"all"` or :obj:`"false"`. Set to :obj:`"false"` to disable gradient
                logging or :obj:`"all"` to log gradients and parameters.
            WANDB_PROJECT (:obj:`str`, `optional`, defaults to :obj:`"huggingface"`):
                Set this to a custom string to store results in a different project.
            WANDB_DISABLED (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to disable wandb entirely.
        """
359
360
        if self._wandb is None:
            return
Sylvain Gugger's avatar
Sylvain Gugger committed
361
362
363
364
365
366
        self._initialized = True
        if state.is_world_process_zero:
            logger.info(
                'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"'
            )
            combined_dict = {**args.to_sanitized_dict()}
367
368
369
370
371
372
373
374
375
376
377
378

            if hasattr(model, "config") and model.config is not None:
                model_config = model.config.to_dict()
                combined_dict = {**model_config, **combined_dict}
            trial_name = state.trial_name
            init_args = {}
            if trial_name is not None:
                run_name = trial_name
                init_args["group"] = args.run_name
            else:
                run_name = args.run_name

379
            self._wandb.init(
380
381
382
383
384
385
386
                project=os.getenv("WANDB_PROJECT", "huggingface"),
                config=combined_dict,
                name=run_name,
                reinit=reinit,
                **init_args,
            )

Sylvain Gugger's avatar
Sylvain Gugger committed
387
388
            # keep track of model topology and gradients, unsupported on TPU
            if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false":
389
390
391
                self._wandb.watch(
                    model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, args.logging_steps)
                )
Sylvain Gugger's avatar
Sylvain Gugger committed
392

393
394
395
            # log outputs
            self._log_model = os.getenv("WANDB_LOG_MODEL", "FALSE").upper() in ENV_VARS_TRUE_VALUES.union({"TRUE"})

Sylvain Gugger's avatar
Sylvain Gugger committed
396
    def on_train_begin(self, args, state, control, model=None, **kwargs):
397
398
        if self._wandb is None:
            return
399
400
401
        hp_search = state.is_hyper_param_search
        if not self._initialized or hp_search:
            self.setup(args, state, model, reinit=hp_search, **kwargs)
Sylvain Gugger's avatar
Sylvain Gugger committed
402

403
    def on_train_end(self, args, state, control, model=None, tokenizer=None, **kwargs):
404
405
        if self._wandb is None:
            return
406
        # commit last step
407
        self._wandb.log({})
408
409
410
411
412
413
414
        if self._log_model and self._initialized and state.is_world_process_zero:
            from .trainer import Trainer

            fake_trainer = Trainer(args=args, model=model, tokenizer=tokenizer)
            with tempfile.TemporaryDirectory() as temp_dir:
                fake_trainer.save_model(temp_dir)
                # use run name and ensure it's a valid Artifact name
415
                artifact_name = re.sub(r"[^a-zA-Z0-9_\.\-]", "", self._wandb.run.name)
416
417
418
                metadata = (
                    {
                        k: v
419
                        for k, v in dict(self._wandb.summary).items()
420
421
422
423
424
425
426
427
                        if isinstance(v, numbers.Number) and not k.startswith("_")
                    }
                    if not args.load_best_model_at_end
                    else {
                        f"eval/{args.metric_for_best_model}": state.best_metric,
                        "train/total_floss": state.total_flos,
                    }
                )
428
                artifact = self._wandb.Artifact(name=f"run-{artifact_name}", type="model", metadata=metadata)
429
430
431
432
                for f in Path(temp_dir).glob("*"):
                    if f.is_file():
                        with artifact.new_file(f.name, mode="wb") as fa:
                            fa.write(f.read_bytes())
433
                self._wandb.run.log_artifact(artifact)
434

Sylvain Gugger's avatar
Sylvain Gugger committed
435
    def on_log(self, args, state, control, model=None, logs=None, **kwargs):
436
437
        if self._wandb is None:
            return
Sylvain Gugger's avatar
Sylvain Gugger committed
438
        if not self._initialized:
439
            self.setup(args, state, model, reinit=False)
Sylvain Gugger's avatar
Sylvain Gugger committed
440
        if state.is_world_process_zero:
441
            logs = rewrite_logs(logs)
442
            self._wandb.log(logs, step=state.global_step)
Sylvain Gugger's avatar
Sylvain Gugger committed
443
444
445
446


class CometCallback(TrainerCallback):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
447
    A :class:`~transformers.TrainerCallback` that sends the logs to `Comet ML <https://www.comet.ml/site/>`__.
Sylvain Gugger's avatar
Sylvain Gugger committed
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
    """

    def __init__(self):
        assert _has_comet, "CometCallback requires comet-ml to be installed. Run `pip install comet-ml`."
        self._initialized = False

    def setup(self, args, state, model):
        """
        Setup the optional Comet.ml integration.

        Environment:
            COMET_MODE (:obj:`str`, `optional`):
                "OFFLINE", "ONLINE", or "DISABLED"
            COMET_PROJECT_NAME (:obj:`str`, `optional`):
                Comet.ml project name for experiments
            COMET_OFFLINE_DIRECTORY (:obj:`str`, `optional`):
                Folder to use for saving offline experiments when :obj:`COMET_MODE` is "OFFLINE"

Sylvain Gugger's avatar
Sylvain Gugger committed
466
467
        For a number of configurable items in the environment, see `here
        <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__.
Sylvain Gugger's avatar
Sylvain Gugger committed
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
        """
        self._initialized = True
        if state.is_world_process_zero:
            comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
            args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
            experiment = None
            if comet_mode == "ONLINE":
                experiment = comet_ml.Experiment(**args)
                logger.info("Automatic Comet.ml online logging enabled")
            elif comet_mode == "OFFLINE":
                args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
                experiment = comet_ml.OfflineExperiment(**args)
                logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
            if experiment is not None:
                experiment._set_model_graph(model, framework="transformers")
                experiment._log_parameters(args, prefix="args/", framework="transformers")
                if hasattr(model, "config"):
                    experiment._log_parameters(model.config, prefix="config/", framework="transformers")

    def on_train_begin(self, args, state, control, model=None, **kwargs):
        if not self._initialized:
            self.setup(args, state, model)

    def on_log(self, args, state, control, model=None, logs=None, **kwargs):
        if not self._initialized:
            self.setup(args, state, model)
        if state.is_world_process_zero:
            experiment = comet_ml.config.get_global_experiment()
            if experiment is not None:
                experiment._log_metrics(logs, step=state.global_step, epoch=state.epoch, framework="transformers")
498
499


500
501
502
503
504
505
506
class AzureMLCallback(TrainerCallback):
    """
    A :class:`~transformers.TrainerCallback` that sends the logs to `AzureML
    <https://pypi.org/project/azureml-sdk/>`__.
    """

    def __init__(self, azureml_run=None):
507
508
509
        assert (
            is_azureml_available()
        ), "AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`."
510
511
512
        self.azureml_run = azureml_run

    def on_init_end(self, args, state, control, **kwargs):
513
514
        from azureml.core.run import Run

515
516
517
518
519
520
521
522
523
524
        if self.azureml_run is None and state.is_world_process_zero:
            self.azureml_run = Run.get_context()

    def on_log(self, args, state, control, logs=None, **kwargs):
        if self.azureml_run:
            for k, v in logs.items():
                if isinstance(v, (int, float)):
                    self.azureml_run.log(k, v, description=k)


525
526
class MLflowCallback(TrainerCallback):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
527
    A :class:`~transformers.TrainerCallback` that sends the logs to `MLflow <https://www.mlflow.org/>`__.
528
529
530
531
532
    """

    MAX_LOG_SIZE = 100

    def __init__(self):
533
534
535
        assert is_mlflow_available(), "MLflowCallback requires mlflow to be installed. Run `pip install mlflow`."
        import mlflow

536
537
        self._initialized = False
        self._log_artifacts = False
538
        self._ml_flow = mlflow
539
540
541
542
543
544
545
546
547

    def setup(self, args, state, model):
        """
        Setup the optional MLflow integration.

        Environment:
            HF_MLFLOW_LOG_ARTIFACTS (:obj:`str`, `optional`):
                Whether to use MLflow .log_artifact() facility to log artifacts.

Sylvain Gugger's avatar
Sylvain Gugger committed
548
549
550
                This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to `True` or `1`, will copy
                whatever is in TrainerArgument's output_dir to the local or remote artifact storage. Using it without a
                remote storage will just copy the files to your artifact location.
551
552
553
554
555
        """
        log_artifacts = os.getenv("HF_MLFLOW_LOG_ARTIFACTS", "FALSE").upper()
        if log_artifacts in {"TRUE", "1"}:
            self._log_artifacts = True
        if state.is_world_process_zero:
556
            self._ml_flow.start_run()
557
558
559
560
561
562
563
            combined_dict = args.to_dict()
            if hasattr(model, "config") and model.config is not None:
                model_config = model.config.to_dict()
                combined_dict = {**model_config, **combined_dict}
            # MLflow cannot log more than 100 values in one go, so we have to split it
            combined_dict_items = list(combined_dict.items())
            for i in range(0, len(combined_dict_items), MLflowCallback.MAX_LOG_SIZE):
564
                self._ml_flow.log_params(dict(combined_dict_items[i : i + MLflowCallback.MAX_LOG_SIZE]))
565
566
567
568
569
570
571
572
573
574
575
576
        self._initialized = True

    def on_train_begin(self, args, state, control, model=None, **kwargs):
        if not self._initialized:
            self.setup(args, state, model)

    def on_log(self, args, state, control, logs, model=None, **kwargs):
        if not self._initialized:
            self.setup(args, state, model)
        if state.is_world_process_zero:
            for k, v in logs.items():
                if isinstance(v, (int, float)):
577
                    self._ml_flow.log_metric(k, v, step=state.global_step)
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
                else:
                    logger.warning(
                        "Trainer is attempting to log a value of "
                        '"%s" of type %s for key "%s" as a metric. '
                        "MLflow's log_metric() only accepts float and "
                        "int types so we dropped this attribute.",
                        v,
                        type(v),
                        k,
                    )

    def on_train_end(self, args, state, control, **kwargs):
        if self._initialized and state.is_world_process_zero:
            if self._log_artifacts:
                logger.info("Logging artifacts. This may take time.")
593
594
                self._ml_flow.log_artifacts(args.output_dir)
            self._ml_flow.end_run()
595
596
597
598

    def __del__(self):
        # if the previous run is not terminated correctly, the fluent API will
        # not let you start a new run before the previous one is killed
599
600
        if self._ml_flow.active_run is not None:
            self._ml_flow.end_run(status="KILLED")