train_openfold.py 24.1 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
import argparse
import logging
import os
4
import sys
5
import json
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
6
7

import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
8
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
9
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
10
from pytorch_lightning.loggers import WandbLogger
11
from pytorch_lightning.strategies import DeepSpeedStrategy, DDPStrategy
12
from pytorch_lightning import seed_everything
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
13
14
15
import torch

from openfold.config import model_config
16
from openfold.data.data_modules import OpenFoldDataModule, OpenFoldMultimerDataModule
17
from openfold.model.model import AlphaFold
18
from openfold.model.torchscript import script_preset_
19
from openfold.np import residue_constants
20
from openfold.utils.argparse_utils import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
21
22
23
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
24
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
25
from openfold.utils.loss import AlphaFoldLoss, lddt_ca
26
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
27
from openfold.utils.multi_chain_permutation import multi_chain_permutation_align
28
from openfold.utils.superimposition import superimpose
29
from openfold.utils.tensor_utils import tensor_tree_map
30
31
32
33
34
from openfold.utils.validation_metrics import (
    drmsd,
    gdt_ts,
    gdt_ha,
)
35
36
from openfold.utils.import_weights import (
    import_jax_weights_,
37
    import_openfold_weights_
38
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
39
from scripts.zero_to_fp32 import (
40
41
    get_fp32_state_dict_from_zero_checkpoint,
    get_global_step_from_zero_checkpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
42
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43

Marta's avatar
Marta committed
44
45
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
46
47
48
49
50

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
51
        self.model = AlphaFold(config)
52
        self.is_multimer = self.config.globals.is_multimer
53

54
        self.loss = AlphaFoldLoss(config.loss)
55

56
57
58
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
59

60
        self.cached_weights = None
61
        self.last_lr_step = -1
62
        self.save_hyperparameters
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
63
64
65
66

    def forward(self, batch):
        return self.model(batch)

67
68
69
70
    def _log(self, loss_breakdown, batch, outputs, train=True):
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
71
72
                f"{phase}/{loss_name}",
                indiv_loss,
73
74
75
                on_step=train, on_epoch=(not train), logger=True,
            )

76
            if (train):
77
78
79
80
81
82
83
84
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
85
                batch,
86
87
88
89
                outputs,
                superimposition_metrics=(not train)
            )

90
        for k, v in other_metrics.items():
91
            self.log(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
92
93
                f"{phase}/{k}",
                torch.mean(v),
94
95
96
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
97
    def training_step(self, batch, batch_idx):
98
        if (self.ema.device != batch["aatype"].device):
99
100
            self.ema.to(batch["aatype"].device)

101
102
        ground_truth = batch.pop('gt_features', None)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
103
104
        # Run the model
        outputs = self(batch)
105

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
106
107
108
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

109
110
111
112
113
        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
114
        # Compute loss
115
116
117
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
118

119
120
        # Log it
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
121

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
122
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
123

124
125
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
126

127
128
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
129
        if (self.cached_weights is None):
130
            # model.state_dict() contains references to model weights rather
131
            # than copies. Therefore, we need to clone them before calling
132
            # load_state_dict().
133
134
135
            def clone_param(t): return t.detach().clone()
            self.cached_weights = tensor_tree_map(
                clone_param, self.model.state_dict())
136
            self.model.load_state_dict(self.ema.state_dict()["params"])
137
138
139

        ground_truth = batch.pop('gt_features', None)

140
        # Run the model
141
142
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
143
144

        batch["use_clamped_fape"] = 0.
145
146
147
148
149
150
151

        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

        # Compute loss and other metrics
152
153
        _, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
154
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
155

156
        self._log(loss_breakdown, batch, outputs, train=False)
157

158
    def on_validation_epoch_end(self):
159
160
161
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
162

163
164
165
166
167
    def _compute_validation_metrics(self,
                                    batch,
                                    outputs,
                                    superimposition_metrics=False
                                    ):
168
        metrics = {}
169

170
171
172
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
173

174
175
176
177
178
179
180
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
181

182
183
184
185
186
187
188
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
189

190
        metrics["lddt_ca"] = lddt_ca_score
191

192
193
194
        drmsd_ca_score = drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
195
            mask=all_atom_mask_ca,  # still required here to compute n
196
        )
197

198
        metrics["drmsd_ca"] = drmsd_ca_score
199
200

        if (superimposition_metrics):
201
202
203
204
205
206
207
208
209
210
211
212
213
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["alignment_rmsd"] = alignment_rmsd
            metrics["gdt_ts"] = gdt_ts_score
            metrics["gdt_ha"] = gdt_ha_score
214

215
216
        return metrics

217
218
219
220
221
222
223
224
225
    def configure_optimizers(self,
                             learning_rate: float = 1e-3,
                             eps: float = 1e-5,
                             ) -> torch.optim.Adam:
        #        return torch.optim.Adam(
        #            self.model.parameters(),
        #            lr=learning_rate,
        #            eps=eps
        #        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
226
        # Ignored as long as a DeepSpeed optimizer is configured
227
        optimizer = torch.optim.Adam(
228
229
            self.model.parameters(),
            lr=learning_rate,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
230
231
            eps=eps
        )
232
233
234
235
236
237

        if self.last_lr_step != -1:
            for group in optimizer.param_groups:
                if 'initial_lr' not in group:
                    group['initial_lr'] = learning_rate

238
239
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
240
            last_epoch=self.last_lr_step
241
        )
242

243
244
245
246
247
248
249
250
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
251

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
252
    def on_load_checkpoint(self, checkpoint):
253
        ema = checkpoint["ema"]
254
255
256
        if (not self.model.template_config.enabled):
            ema["params"] = {k: v for k,
                             v in ema["params"].items() if not "template" in k}
257
        self.ema.load_state_dict(ema)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
258

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
259
260
261
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

262
263
264
    def resume_last_lr_step(self, lr_step):
        self.last_lr_step = lr_step

265
266
    def load_from_jax(self, jax_path):
        model_basename = os.path.splitext(
267
268
269
            os.path.basename(
                os.path.normpath(jax_path)
            )
270
271
272
        )[0]
        model_version = "_".join(model_basename.split("_")[1:])
        import_jax_weights_(
273
            self.model, jax_path, version=model_version
274
275
        )

276

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
277
def main(args):
278
    if (args.seed is not None):
279
        seed_everything(args.seed, workers=True)
280

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
281
    config = model_config(
282
283
        args.config_preset,
        train=True,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
284
        low_prec=(str(args.precision) == "16")
285
    ) 
286
287
288
289
290
    if args.experiment_config_json: 
        with open(args.experiment_config_json, 'r') as f:
            custom_config_dict = json.load(f)
        config.update_from_flattened_dict(custom_config_dict)

291
292
    model_module = OpenFoldWrapper(config)

293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
    if args.resume_from_ckpt:
        if args.resume_model_weights_only:
            # Load the checkpoint
            if os.path.isdir(args.resume_from_ckpt):
                sd = get_fp32_state_dict_from_zero_checkpoint(
                    args.resume_from_ckpt)
            else:
                sd = torch.load(args.resume_from_ckpt)
            # Process the state dict
            if 'module' in sd:
                sd = {k[len('module.'):]: v for k, v in sd['module'].items()}
                import_openfold_weights_(model=model_module, state_dict=sd)
            elif 'state_dict' in sd:
                import_openfold_weights_(
                    model=model_module, state_dict=sd['state_dict'])
            else:
                # Loading from pre-trained model
                sd = {'model.'+k: v for k, v in sd.items()}
                import_openfold_weights_(model=model_module, state_dict=sd)
            logging.info("Successfully loaded model weights...")

        else:  # Loads a checkpoint to start from a specific time step
            if os.path.isdir(args.resume_from_ckpt):
                last_global_step = get_global_step_from_zero_checkpoint(
                    args.resume_from_ckpt)
            else:
                sd = torch.load(args.resume_from_ckpt)
                last_global_step = int(sd['global_step'])
            model_module.resume_last_lr_step(last_global_step)
            logging.info("Successfully loaded last lr step...")

    if args.resume_from_jax_params:
Lucas Bickmann's avatar
Lucas Bickmann committed
325
        model_module.load_from_jax(args.resume_from_jax_params)
326
327
328
        logging.info(
            f"Successfully loaded JAX parameters at {args.resume_from_jax_params}...")

329
    # TorchScript components of the model
330
    if (args.script_modules):
331
        script_preset_(model_module)
332

333
334
    if "multimer" in args.config_preset:
        data_module = OpenFoldMultimerDataModule(
335
336
337
338
            config=config.data,
            batch_seed=args.seed,
            **vars(args)
        )
339
340
    else:
        data_module = OpenFoldDataModule(
341
            config=config.data,
342
343
344
            batch_seed=args.seed,
            **vars(args)
        )
345

346
347
    data_module.prepare_data()
    data_module.setup()
348

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
349
    callbacks = []
350
    if (args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
351
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
352
            every_n_epochs=1,
353
354
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
355
356
357
        )
        callbacks.append(mc)

358
    if (args.early_stopping):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
359
        es = EarlyStoppingVerbose(
360
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
361
362
363
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
364
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
365
366
367
368
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
369

370
    if (args.log_performance):
Marta's avatar
Marta committed
371
372
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
373
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
374
375
376
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
377

378
    if (args.log_lr):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
379
380
381
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
382
    loggers = []
383
    if (args.wandb):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
384
385
386
387
388
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
Jennifer's avatar
Jennifer committed
389
            config=config.to_dict(),
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
390
391
392
393
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

394
395
    if (args.deepspeed_config_path is not None):
        strategy = DeepSpeedStrategy(
396
397
            config=args.deepspeed_config_path,
        )
398
        if (args.wandb):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
399
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
400
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
401
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
402
        strategy = DDPStrategy(find_unused_parameters=False)
403
404
    else:
        strategy = None
405
406

    if (args.wandb):
407
408
409
410
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

411
412
413
414
415
416
417
418
419
420
421
422
423
424
    # Raw dump of all args from pl.Trainer constructor
    trainer_kws = set([
        'accelerator', 'strategy', 'devices', 'num_nodes', 'precision', 'logger', 'callbacks', 'fast_dev_run', 'max_epochs', 'min_epochs', 'max_steps', 'min_steps', 'max_tim', 'limit_train_batches', 'limit_val_batches', 'limit_test_batches', 'limit_predict_batches', 'overfit_batches', 'val_check_interval', 'check_val_every_n_epoch', 'num_sanity_val_steps', 'log_every_n_steps', 'enable_checkpointing', 'enable_progress_bar', 'enable_model_summary', 'accumulate_grad_batches', 'gradient_clip_val', 'gradient_clip_algorithm', 'deterministic', 'benchmark', 'inference_mode', 'use_distributed_sampler', 'profiler', 'detect_anomaly', 'barebones', 'plugins', 'sync_batchnorm', 'reload_dataloaders_every_n_epochs', 'default_root_dir',
    ])
    trainer_args = {k: v for k, v in vars(args).items() if k in trainer_kws}
    trainer_args.update({
        'default_root_dir': args.output_dir,
        'strategy': strategy,
        'callbacks': callbacks,
        'logger': loggers,
    })
    trainer = pl.Trainer(**trainer_args)

    if (args.resume_model_weights_only):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
425
426
427
428
429
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
430
        model_module,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
431
432
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
433
434
435
    )


Marta's avatar
Marta committed
436
437
438
439
440
441
442
443
444
445
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
446
447
448
449
450
451
452
453
454
455
456
457
458
459
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
460
461
462
463
464
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
465
466
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
467
468
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
469
    )
470
471
    parser.add_argument(
        "--train_mmcif_data_cache_path", type=str, default=None,
472
473
        help="Path to the json file which records all the information of mmcif structures used during training"
    )
474
    parser.add_argument(
475
        "--use_single_seq_mode", type=str, default=False,
476
        help="Use single sequence embeddings instead of MSAs."
477
    )
478
479
480
481
482
483
484
485
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
486
487
488
489
490
491
492
493
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
494
495
    parser.add_argument(
        "--val_mmcif_data_cache_path", type=str, default=None,
Dingquan Yu's avatar
Dingquan Yu committed
496
        help="path to the json file which records all the information of mmcif structures used during validation"
497
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
498
499
500
501
502
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
503
504
505
506
        "--train_filter_path", type=str, default=None,
        help='''Optional path to a text file containing names of training
                examples to include, one per line. Used to filter the training 
                set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
507
508
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
509
510
        "--distillation_filter_path", type=str, default=None,
        help="""See --train_filter_path"""
511
    )
512
513
514
515
516
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
517
518
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
519
520
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
521
522
    )
    parser.add_argument(
Marta's avatar
Marta committed
523
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
524
525
526
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
527
528
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
529
    )
530
531
532
533
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
534
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
535
536
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
537
538
    )
    parser.add_argument(
Marta's avatar
Marta committed
539
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
540
541
542
543
544
545
546
547
548
549
550
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
551
552
553
554
555
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
556
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
557
558
        help="Whether to load just model weights as opposed to training state"
    )
Lucas Bickmann's avatar
Lucas Bickmann committed
559
    parser.add_argument(
560
561
        "--resume_from_jax_params", type=str, default=None,
        help="""Path to an .npz JAX parameter file with which to initialize the model"""
Lucas Bickmann's avatar
Lucas Bickmann committed
562
    )
Marta's avatar
Marta committed
563
    parser.add_argument(
564
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
565
566
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
567
568
    parser.add_argument(
        "--wandb", action="store_true", default=False,
569
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
570
571
572
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
573
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
574
575
576
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
577
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
578
579
580
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
581
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
582
583
584
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
585
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
586
    )
587
588
589
590
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
591
    parser.add_argument(
592
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
593
594
    )
    parser.add_argument(
595
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
596
597
598
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
599
600
601
602
603
604
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
605
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
606
    parser.add_argument(
607
608
        "--log_lr", action="store_true", default=False,
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
609
    )
610
    parser.add_argument(
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
        "--config_preset", type=str, default="initial_training",
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
    )
    parser.add_argument(
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
628
    )
629
630
631
    parser.add_argument(
        "--experiment_config_json", default="", help="Path to a json file with custom config values to overwrite config setting",
    )
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
    parser.add_argument(
        "--num_nodes", type=int, default=1,
    )
    parser.add_argument(
        "--gpus", type=int, default=1,
    )
    parser.add_argument(
        "--precision", type=str, default=None,
    )
    parser.add_argument(
        "--replace_sampler_ddp", type=bool_type, default=True,
    )
    parser.add_argument(
        "--max_epochs", type=int, default=1,
    )
    parser.add_argument(
        "--log_every_n_steps", type=int, default=25,
    )
    parser.add_argument(
        "--num_sanity_val_steps", type=int, default=0,
    )

    #  parser = pl.Trainer.add_argparse_args(parser)
    #
    #  # Disable the initial validation pass
    #  parser.set_defaults(
    #      num_sanity_val_steps=0,
    #  )

    #  # Remove some buggy/redundant arguments introduced by the Trainer
    #  remove_arguments(
    #      parser,
    #      [
    #          "--accelerator",
    #          "--resume_from_checkpoint",
    #          "--reload_dataloaders_every_epoch",
    #          "--reload_dataloaders_every_n_epochs",
    #      ]
    #  )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
671

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
672
673
    args = parser.parse_args()

674
675
    if (args.seed is None and
        ((args.gpus is not None and args.gpus > 1) or
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
676
677
678
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

679
    if (str(args.precision) == "16" and args.deepspeed_config_path is not None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
680
681
        raise ValueError("DeepSpeed and FP16 training are not compatible")

682
683
684
    if (args.resume_from_jax_params is not None and args.resume_from_ckpt is not None):
        raise ValueError(
            "Choose between loading pretrained Jax-weights and a checkpoint-path")
685

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
686
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
687
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
688

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
689
    main(args)