train_openfold.py 24.4 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
import argparse
import logging
import os
4
import sys
5
import json
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
6
7

import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
8
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
9
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
10
from pytorch_lightning.loggers import WandbLogger
11
from pytorch_lightning.strategies import DeepSpeedStrategy, DDPStrategy
12
from pytorch_lightning import seed_everything
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
13
14
15
import torch

from openfold.config import model_config
16
from openfold.data.data_modules import OpenFoldDataModule, OpenFoldMultimerDataModule
17
from openfold.model.model import AlphaFold
18
from openfold.model.torchscript import script_preset_
19
from openfold.np import residue_constants
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
20
21
22
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
23
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
24
from openfold.utils.loss import AlphaFoldLoss, lddt_ca
25
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
26
from openfold.utils.multi_chain_permutation import multi_chain_permutation_align
27
from openfold.utils.superimposition import superimpose
28
from openfold.utils.tensor_utils import tensor_tree_map
29
30
31
32
33
from openfold.utils.validation_metrics import (
    drmsd,
    gdt_ts,
    gdt_ha,
)
34
35
from openfold.utils.import_weights import (
    import_jax_weights_,
36
    import_openfold_weights_
37
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
38
from scripts.zero_to_fp32 import (
39
40
    get_fp32_state_dict_from_zero_checkpoint,
    get_global_step_from_zero_checkpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
41
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
42

Marta's avatar
Marta committed
43
44
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
45
46
47
48
49

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
50
        self.model = AlphaFold(config)
51
        self.is_multimer = self.config.globals.is_multimer
52

53
        self.loss = AlphaFoldLoss(config.loss)
54

55
56
57
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
58

59
        self.cached_weights = None
60
        self.last_lr_step = -1
61
        self.save_hyperparameters
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
62
63
64
65

    def forward(self, batch):
        return self.model(batch)

66
67
68
69
    def _log(self, loss_breakdown, batch, outputs, train=True):
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
70
71
                f"{phase}/{loss_name}",
                indiv_loss,
Jennifer's avatar
Jennifer committed
72
                prog_bar=(loss_name == 'loss'),
73
74
75
                on_step=train, on_epoch=(not train), logger=True,
            )

76
            if (train):
77
78
                self.log(
                    f"{phase}/{loss_name}_epoch",
Jennifer's avatar
Jennifer committed
79
                    indiv_loss, 
80
81
82
83
84
                    on_step=False, on_epoch=True, logger=True,
                )

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
85
                batch,
86
87
88
89
                outputs,
                superimposition_metrics=(not train)
            )

90
        for k, v in other_metrics.items():
91
            self.log(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
92
93
                f"{phase}/{k}",
                torch.mean(v),
Jennifer's avatar
Jennifer committed
94
95
                prog_bar = (k == 'loss'),
                on_step=False, on_epoch=True, logger=True,
96
97
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
98
    def training_step(self, batch, batch_idx):
99
        if (self.ema.device != batch["aatype"].device):
100
101
            self.ema.to(batch["aatype"].device)

102
103
        ground_truth = batch.pop('gt_features', None)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
104
105
        # Run the model
        outputs = self(batch)
106

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
107
108
109
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

110
111
112
113
114
        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
115
        # Compute loss
116
117
118
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
119

120
121
        # Log it
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
122

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
123
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
124

125
126
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
127

128
129
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
130
        if (self.cached_weights is None):
131
            # model.state_dict() contains references to model weights rather
132
            # than copies. Therefore, we need to clone them before calling
133
            # load_state_dict().
134
135
136
            def clone_param(t): return t.detach().clone()
            self.cached_weights = tensor_tree_map(
                clone_param, self.model.state_dict())
137
            self.model.load_state_dict(self.ema.state_dict()["params"])
138
139
140

        ground_truth = batch.pop('gt_features', None)

141
        # Run the model
142
143
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
144
145

        batch["use_clamped_fape"] = 0.
146
147
148
149
150
151
152

        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

        # Compute loss and other metrics
153
154
        _, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
155
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
156

157
        self._log(loss_breakdown, batch, outputs, train=False)
158

159
    def on_validation_epoch_end(self):
160
161
162
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
163

164
165
166
167
168
    def _compute_validation_metrics(self,
                                    batch,
                                    outputs,
                                    superimposition_metrics=False
                                    ):
169
        metrics = {}
170

171
172
173
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
174

175
176
177
178
179
180
181
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
182

183
184
185
186
187
188
189
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
190

191
        metrics["lddt_ca"] = lddt_ca_score
192

193
194
195
        drmsd_ca_score = drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
196
            mask=all_atom_mask_ca,  # still required here to compute n
197
        )
198

199
        metrics["drmsd_ca"] = drmsd_ca_score
200
201

        if (superimposition_metrics):
202
203
204
205
206
207
208
209
210
211
212
213
214
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["alignment_rmsd"] = alignment_rmsd
            metrics["gdt_ts"] = gdt_ts_score
            metrics["gdt_ha"] = gdt_ha_score
215

216
217
        return metrics

218
219
220
221
222
223
224
225
226
    def configure_optimizers(self,
                             learning_rate: float = 1e-3,
                             eps: float = 1e-5,
                             ) -> torch.optim.Adam:
        #        return torch.optim.Adam(
        #            self.model.parameters(),
        #            lr=learning_rate,
        #            eps=eps
        #        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
227
        # Ignored as long as a DeepSpeed optimizer is configured
228
        optimizer = torch.optim.Adam(
229
230
            self.model.parameters(),
            lr=learning_rate,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
231
232
            eps=eps
        )
233
234
235
236
237
238

        if self.last_lr_step != -1:
            for group in optimizer.param_groups:
                if 'initial_lr' not in group:
                    group['initial_lr'] = learning_rate

239
240
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
241
            last_epoch=self.last_lr_step
242
        )
243

244
245
246
247
248
249
250
251
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
252

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
253
    def on_load_checkpoint(self, checkpoint):
254
        ema = checkpoint["ema"]
255
256
257
        if (not self.model.template_config.enabled):
            ema["params"] = {k: v for k,
                             v in ema["params"].items() if not "template" in k}
258
        self.ema.load_state_dict(ema)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
259

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
260
261
262
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

263
264
265
    def resume_last_lr_step(self, lr_step):
        self.last_lr_step = lr_step

266
267
    def load_from_jax(self, jax_path):
        model_basename = os.path.splitext(
268
269
270
            os.path.basename(
                os.path.normpath(jax_path)
            )
271
272
273
        )[0]
        model_version = "_".join(model_basename.split("_")[1:])
        import_jax_weights_(
274
            self.model, jax_path, version=model_version
275
276
        )

277

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
278
def main(args):
279
    if (args.seed is not None):
280
        seed_everything(args.seed, workers=True)
281

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
282
    config = model_config(
283
284
        args.config_preset,
        train=True,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
285
        low_prec=(str(args.precision) == "16")
286
    ) 
287
288
289
290
291
    if args.experiment_config_json: 
        with open(args.experiment_config_json, 'r') as f:
            custom_config_dict = json.load(f)
        config.update_from_flattened_dict(custom_config_dict)

292
293
    model_module = OpenFoldWrapper(config)

294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
    if args.resume_from_ckpt:
        if args.resume_model_weights_only:
            # Load the checkpoint
            if os.path.isdir(args.resume_from_ckpt):
                sd = get_fp32_state_dict_from_zero_checkpoint(
                    args.resume_from_ckpt)
            else:
                sd = torch.load(args.resume_from_ckpt)
            # Process the state dict
            if 'module' in sd:
                sd = {k[len('module.'):]: v for k, v in sd['module'].items()}
                import_openfold_weights_(model=model_module, state_dict=sd)
            elif 'state_dict' in sd:
                import_openfold_weights_(
                    model=model_module, state_dict=sd['state_dict'])
            else:
                # Loading from pre-trained model
                sd = {'model.'+k: v for k, v in sd.items()}
                import_openfold_weights_(model=model_module, state_dict=sd)
            logging.info("Successfully loaded model weights...")

        else:  # Loads a checkpoint to start from a specific time step
            if os.path.isdir(args.resume_from_ckpt):
                last_global_step = get_global_step_from_zero_checkpoint(
                    args.resume_from_ckpt)
            else:
                sd = torch.load(args.resume_from_ckpt)
                last_global_step = int(sd['global_step'])
            model_module.resume_last_lr_step(last_global_step)
            logging.info("Successfully loaded last lr step...")

    if args.resume_from_jax_params:
Lucas Bickmann's avatar
Lucas Bickmann committed
326
        model_module.load_from_jax(args.resume_from_jax_params)
327
328
329
        logging.info(
            f"Successfully loaded JAX parameters at {args.resume_from_jax_params}...")

330
    # TorchScript components of the model
331
    if (args.script_modules):
332
        script_preset_(model_module)
333

334
335
    if "multimer" in args.config_preset:
        data_module = OpenFoldMultimerDataModule(
336
337
338
339
            config=config.data,
            batch_seed=args.seed,
            **vars(args)
        )
340
341
    else:
        data_module = OpenFoldDataModule(
342
            config=config.data,
343
344
345
            batch_seed=args.seed,
            **vars(args)
        )
346

347
348
    data_module.prepare_data()
    data_module.setup()
349

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
350
    callbacks = []
351
    if (args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
352
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
353
            every_n_epochs=1,
354
355
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
356
357
358
        )
        callbacks.append(mc)

359
    if (args.early_stopping):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
360
        es = EarlyStoppingVerbose(
361
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
362
363
364
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
365
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
366
367
368
369
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
370

371
    if (args.log_performance):
Marta's avatar
Marta committed
372
373
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
374
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
375
376
377
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
378

379
    if (args.log_lr):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
380
381
382
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
383
    loggers = []
384
    if (args.wandb):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
385
386
387
388
389
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
Jennifer's avatar
Jennifer committed
390
            config=config.to_dict(),
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
391
392
393
394
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

395
396
    if (args.deepspeed_config_path is not None):
        strategy = DeepSpeedStrategy(
397
398
            config=args.deepspeed_config_path,
        )
399
        if (args.wandb):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
400
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
401
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
402
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
403
        strategy = DDPStrategy(find_unused_parameters=False)
404
405
    else:
        strategy = None
406
407

    if (args.wandb):
408
409
410
411
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

412
413
414
415
416
417
418
419
420
421
422
423
424
425
    # Raw dump of all args from pl.Trainer constructor
    trainer_kws = set([
        'accelerator', 'strategy', 'devices', 'num_nodes', 'precision', 'logger', 'callbacks', 'fast_dev_run', 'max_epochs', 'min_epochs', 'max_steps', 'min_steps', 'max_tim', 'limit_train_batches', 'limit_val_batches', 'limit_test_batches', 'limit_predict_batches', 'overfit_batches', 'val_check_interval', 'check_val_every_n_epoch', 'num_sanity_val_steps', 'log_every_n_steps', 'enable_checkpointing', 'enable_progress_bar', 'enable_model_summary', 'accumulate_grad_batches', 'gradient_clip_val', 'gradient_clip_algorithm', 'deterministic', 'benchmark', 'inference_mode', 'use_distributed_sampler', 'profiler', 'detect_anomaly', 'barebones', 'plugins', 'sync_batchnorm', 'reload_dataloaders_every_n_epochs', 'default_root_dir',
    ])
    trainer_args = {k: v for k, v in vars(args).items() if k in trainer_kws}
    trainer_args.update({
        'default_root_dir': args.output_dir,
        'strategy': strategy,
        'callbacks': callbacks,
        'logger': loggers,
    })
    trainer = pl.Trainer(**trainer_args)

    if (args.resume_model_weights_only):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
426
427
428
429
430
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
431
        model_module,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
432
433
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
434
435
436
    )


Marta's avatar
Marta committed
437
438
439
440
441
442
443
444
445
446
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
447
448
449
450
451
452
453
454
455
456
457
458
459
460
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
461
462
463
464
465
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
466
467
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
468
469
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
470
    )
471
472
    parser.add_argument(
        "--train_mmcif_data_cache_path", type=str, default=None,
473
474
        help="Path to the json file which records all the information of mmcif structures used during training"
    )
475
    parser.add_argument(
476
        "--use_single_seq_mode", type=str, default=False,
477
        help="Use single sequence embeddings instead of MSAs."
478
    )
479
480
481
482
483
484
485
486
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
487
488
489
490
491
492
493
494
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
495
496
    parser.add_argument(
        "--val_mmcif_data_cache_path", type=str, default=None,
Dingquan Yu's avatar
Dingquan Yu committed
497
        help="path to the json file which records all the information of mmcif structures used during validation"
498
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
499
500
501
502
503
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
504
505
506
507
        "--train_filter_path", type=str, default=None,
        help='''Optional path to a text file containing names of training
                examples to include, one per line. Used to filter the training 
                set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
508
509
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
510
511
        "--distillation_filter_path", type=str, default=None,
        help="""See --train_filter_path"""
512
    )
513
514
515
516
517
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
518
519
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
520
521
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
522
523
    )
    parser.add_argument(
Marta's avatar
Marta committed
524
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
525
526
527
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
528
529
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
530
    )
531
532
533
534
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
535
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
536
537
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
538
539
    )
    parser.add_argument(
Marta's avatar
Marta committed
540
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
541
542
543
544
545
546
547
548
549
550
551
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
552
553
554
555
556
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
557
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
558
559
        help="Whether to load just model weights as opposed to training state"
    )
Lucas Bickmann's avatar
Lucas Bickmann committed
560
    parser.add_argument(
561
562
        "--resume_from_jax_params", type=str, default=None,
        help="""Path to an .npz JAX parameter file with which to initialize the model"""
Lucas Bickmann's avatar
Lucas Bickmann committed
563
    )
Marta's avatar
Marta committed
564
    parser.add_argument(
565
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
566
567
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
568
569
    parser.add_argument(
        "--wandb", action="store_true", default=False,
570
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
571
572
573
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
574
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
575
576
577
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
578
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
579
580
581
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
582
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
583
584
585
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
586
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
587
    )
588
589
590
591
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
592
    parser.add_argument(
593
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
594
595
    )
    parser.add_argument(
596
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
597
598
599
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
600
601
602
603
604
605
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
606
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
607
    parser.add_argument(
608
609
        "--log_lr", action="store_true", default=False,
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
610
    )
611
    parser.add_argument(
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
        "--config_preset", type=str, default="initial_training",
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
    )
    parser.add_argument(
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
629
    )
630
631
632
    parser.add_argument(
        "--experiment_config_json", default="", help="Path to a json file with custom config values to overwrite config setting",
    )
Jennifer's avatar
Jennifer committed
633
634
    # Trainer additional arguments
    # Ideally we'd want something like config.add_trainer_args()
635
636
637
638
639
640
    parser.add_argument(
        "--num_nodes", type=int, default=1,
    )
    parser.add_argument(
        "--gpus", type=int, default=1,
    )
Jennifer's avatar
Jennifer committed
641
642
643
    parser.add_argument(
        "--num_workers", type=int, default=4, # interaction with num_data_workers? 
    )
644
645
646
647
648
649
650
651
652
653
654
655
    parser.add_argument(
        "--precision", type=str, default=None,
    )
    parser.add_argument(
        "--replace_sampler_ddp", type=bool_type, default=True,
    )
    parser.add_argument(
        "--max_epochs", type=int, default=1,
    )
    parser.add_argument(
        "--log_every_n_steps", type=int, default=25,
    )
Jennifer's avatar
Jennifer committed
656
657
658
    parser.add_argument(
        "--flush_logs_every_n_steps", type=int, default=5,
    )
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
    parser.add_argument(
        "--num_sanity_val_steps", type=int, default=0,
    )

    #  parser = pl.Trainer.add_argparse_args(parser)
    #
    #  # Disable the initial validation pass
    #  parser.set_defaults(
    #      num_sanity_val_steps=0,
    #  )

    #  # Remove some buggy/redundant arguments introduced by the Trainer
    #  remove_arguments(
    #      parser,
    #      [
    #          "--accelerator",
    #          "--resume_from_checkpoint",
    #          "--reload_dataloaders_every_epoch",
    #          "--reload_dataloaders_every_n_epochs",
    #      ]
    #  )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
680

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
681
682
    args = parser.parse_args()

683
684
    if (args.seed is None and
        ((args.gpus is not None and args.gpus > 1) or
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
685
686
687
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

688
    if (str(args.precision) == "16" and args.deepspeed_config_path is not None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
689
690
        raise ValueError("DeepSpeed and FP16 training are not compatible")

691
692
693
    if (args.resume_from_jax_params is not None and args.resume_from_ckpt is not None):
        raise ValueError(
            "Choose between loading pretrained Jax-weights and a checkpoint-path")
694

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
695
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
696
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
697

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
698
    main(args)