train_openfold.py 21.4 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
import argparse
import logging
import os
4
import sys
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
5
6

import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
7
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
8
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
9
from pytorch_lightning.loggers import WandbLogger
10
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
12
13
import torch

from openfold.config import model_config
14
from openfold.data.data_modules import OpenFoldDataModule, OpenFoldMultimerDataModule
15
from openfold.model.model import AlphaFold
16
from openfold.model.torchscript import script_preset_
17
from openfold.np import residue_constants
18
from openfold.utils.argparse_utils import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
19
20
21
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
22
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
23
from openfold.utils.loss import AlphaFoldLoss, lddt_ca
24
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
25
from openfold.utils.multi_chain_permutation import multi_chain_permutation_align
26
from openfold.utils.seed import seed_everything
27
from openfold.utils.superimposition import superimpose
28
from openfold.utils.tensor_utils import tensor_tree_map
29
30
31
32
33
from openfold.utils.validation_metrics import (
    drmsd,
    gdt_ts,
    gdt_ha,
)
34
35
from openfold.utils.import_weights import (
    import_jax_weights_,
36
    import_openfold_weights_
37
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
38
from scripts.zero_to_fp32 import (
39
40
    get_fp32_state_dict_from_zero_checkpoint,
    get_global_step_from_zero_checkpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
41
)
42
from scripts.zero_to_fp32 import get_optim_files, parse_optim_states, get_model_state_file
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43

Marta's avatar
Marta committed
44
45
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
46
47
48
49
50

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
51
        self.model = AlphaFold(config)
52
        self.is_multimer = self.config.globals.is_multimer
53

54
        self.loss = AlphaFoldLoss(config.loss)
55

56
57
58
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
59
60
        
        self.cached_weights = None
61
        self.last_lr_step = -1
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
62
63
64
65

    def forward(self, batch):
        return self.model(batch)

66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
    def _log(self, loss_breakdown, batch, outputs, train=True):
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"{phase}/{loss_name}", 
                indiv_loss, 
                on_step=train, on_epoch=(not train), logger=True,
            )

            if(train):
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
                batch, 
                outputs,
                superimposition_metrics=(not train)
            )

        for k,v in other_metrics.items():
            self.log(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
91
92
                f"{phase}/{k}",
                torch.mean(v),
93
94
95
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
96
    def training_step(self, batch, batch_idx):
97
98
99
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

100
101
        ground_truth = batch.pop('gt_features', None)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
102
103
        # Run the model
        outputs = self(batch)
104

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
105
106
107
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

108
109
110
111
112
        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
113
        # Compute loss
114
115
116
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
117

118
119
        # Log it
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
120

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
121
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
122

123
124
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
125

126
127
128
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
129
130
131
132
133
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
134
            self.model.load_state_dict(self.ema.state_dict()["params"])
135
136
137

        ground_truth = batch.pop('gt_features', None)

138
        # Run the model
139
140
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
141
142

        batch["use_clamped_fape"] = 0.
143
144
145
146
147
148
149

        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

        # Compute loss and other metrics
150
151
        _, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
152
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
153

154
155
        self._log(loss_breakdown, batch, outputs, train=False)
        
156
157
158
159
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
160

161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
    def _compute_validation_metrics(self, 
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
   
        metrics["lddt_ca"] = lddt_ca_score
   
        drmsd_ca_score = drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
            mask=all_atom_mask_ca, # still required here to compute n
        )
   
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["alignment_rmsd"] = alignment_rmsd
            metrics["gdt_ts"] = gdt_ts_score
            metrics["gdt_ha"] = gdt_ha_score
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
215
216
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
217
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
218
    ) -> torch.optim.Adam:
219
220
221
222
223
#        return torch.optim.Adam(
#            self.model.parameters(),
#            lr=learning_rate,
#            eps=eps
#        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
224
        # Ignored as long as a DeepSpeed optimizer is configured
225
        optimizer = torch.optim.Adam(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
226
227
228
229
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )
230
231
232
233
234
235

        if self.last_lr_step != -1:
            for group in optimizer.param_groups:
                if 'initial_lr' not in group:
                    group['initial_lr'] = learning_rate

236
237
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
238
            last_epoch=self.last_lr_step
239
        )
240

241
242
243
244
245
246
247
248
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
249

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
250
    def on_load_checkpoint(self, checkpoint):
251
252
253
254
        ema = checkpoint["ema"]
        if(not self.model.template_config.enabled):
            ema["params"] = {k:v for k,v in ema["params"].items() if not "template" in k}
        self.ema.load_state_dict(ema)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
255

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
256
257
258
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

259
260
261
    def resume_last_lr_step(self, lr_step):
        self.last_lr_step = lr_step

262
263
264
265
266
267
268
269
270
271
272
    def load_from_jax(self, jax_path):
        model_basename = os.path.splitext(
                os.path.basename(
                    os.path.normpath(jax_path)
                )
        )[0]
        model_version = "_".join(model_basename.split("_")[1:])
        import_jax_weights_(
                self.model, jax_path, version=model_version
        )

273

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
274
def main(args):
275
276
277
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
278
    config = model_config(
279
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
280
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
281
        low_prec=(str(args.precision) == "16")
282
    ) 
283
284
    model_module = OpenFoldWrapper(config)

285
    if(args.resume_from_ckpt):
286
287
288
289
290
        if(os.path.isdir(args.resume_from_ckpt)):  
            last_global_step = get_global_step_from_zero_checkpoint(args.resume_from_ckpt)
        else:
            sd = torch.load(args.resume_from_ckpt)
            last_global_step = int(sd['global_step'])
291
292
        model_module.resume_last_lr_step(last_global_step)
        logging.info("Successfully loaded last lr step...")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
293
    if(args.resume_from_ckpt and args.resume_model_weights_only):
294
295
296
297
        if(os.path.isdir(args.resume_from_ckpt)):
            sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        else:
            sd = torch.load(args.resume_from_ckpt)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
298
        sd = {k[len("module."):]:v for k,v in sd.items()}
299
        import_openfold_weights_(model=model_module, state_dict=sd)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
300
        logging.info("Successfully loaded model weights...")
Lucas Bickmann's avatar
Lucas Bickmann committed
301
302
303
    if(args.resume_from_jax_params):
        model_module.load_from_jax(args.resume_from_jax_params)
        logging.info(f"Successfully loaded JAX parameters at {args.resume_from_jax_params}...")
304
 
305
    # TorchScript components of the model
306
307
    if(args.script_modules):
        script_preset_(model_module)
308

309
310
    if "multimer" in args.config_preset:
        data_module = OpenFoldMultimerDataModule(
311
312
313
314
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
315
316
317
318
319
320
    else:
        data_module = OpenFoldDataModule(
            config=config.data, 
            batch_seed=args.seed,
            **vars(args)
        )
321

322
323
    data_module.prepare_data()
    data_module.setup()
324
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
325
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
326
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
327
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
328
            every_n_epochs=1,
329
330
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
331
332
333
334
335
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
336
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
337
338
339
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
340
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
341
342
343
344
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
345

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
346
    if(args.log_performance):
Marta's avatar
Marta committed
347
348
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
349
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
350
351
352
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
353

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
354
355
356
357
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
358
359
360
361
362
363
364
365
366
367
368
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

369
    if(args.deepspeed_config_path is not None):
370
371
372
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
373
374
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
375
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
376
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
377
        strategy = DDPPlugin(find_unused_parameters=False)
378
379
    else:
        strategy = None
380
381
382
383
384
385
 
    if(args.wandb):
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
386
387
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
388
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
389
        strategy=strategy,
Marta's avatar
Marta committed
390
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
391
        logger=loggers,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
392
393
394
395
396
397
398
399
400
401
402
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
403
404
405
    )


Marta's avatar
Marta committed
406
407
408
409
410
411
412
413
414
415
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
416
417
418
419
420
421
422
423
424
425
426
427
428
429
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
430
431
432
433
434
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
435
436
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
437
438
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
439
    )
440
441
    parser.add_argument(
        "--train_mmcif_data_cache_path", type=str, default=None,
442
443
        help="Path to the json file which records all the information of mmcif structures used during training"
    )
444
    parser.add_argument(
445
        "--use_single_seq_mode", type=str, default=False,
446
        help="Use single sequence embeddings instead of MSAs."
447
    )
448
449
450
451
452
453
454
455
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
456
457
458
459
460
461
462
463
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
464
465
    parser.add_argument(
        "--val_mmcif_data_cache_path", type=str, default=None,
Dingquan Yu's avatar
Dingquan Yu committed
466
        help="path to the json file which records all the information of mmcif structures used during validation"
467
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
468
469
470
471
472
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
473
474
475
476
        "--train_filter_path", type=str, default=None,
        help='''Optional path to a text file containing names of training
                examples to include, one per line. Used to filter the training 
                set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
477
478
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
479
480
        "--distillation_filter_path", type=str, default=None,
        help="""See --train_filter_path"""
481
    )
482
483
484
485
486
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
487
488
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
489
490
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
491
492
    )
    parser.add_argument(
Marta's avatar
Marta committed
493
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
494
495
496
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
497
498
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
499
    )
500
501
502
503
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
504
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
505
506
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
507
508
    )
    parser.add_argument(
Marta's avatar
Marta committed
509
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
510
511
512
513
514
515
516
517
518
519
520
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
521
522
523
524
525
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
526
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
527
528
        help="Whether to load just model weights as opposed to training state"
    )
Lucas Bickmann's avatar
Lucas Bickmann committed
529
    parser.add_argument(
530
531
        "--resume_from_jax_params", type=str, default=None,
        help="""Path to an .npz JAX parameter file with which to initialize the model"""
Lucas Bickmann's avatar
Lucas Bickmann committed
532
    )
Marta's avatar
Marta committed
533
    parser.add_argument(
534
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
535
536
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
537
538
    parser.add_argument(
        "--wandb", action="store_true", default=False,
539
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
540
541
542
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
543
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
544
545
546
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
547
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
548
549
550
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
551
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
552
553
554
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
555
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
556
    )
557
558
559
560
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
561
    parser.add_argument(
562
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
563
564
    )
    parser.add_argument(
565
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
566
567
568
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
569
570
571
572
573
574
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
575
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
576
    parser.add_argument(
577
578
        "--log_lr", action="store_true", default=False,
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
579
    )
580
    parser.add_argument(
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
        "--config_preset", type=str, default="initial_training",
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
    )
    parser.add_argument(
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
598
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
599
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
600
601
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
602
603
604
605
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
606
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
607
608
609
610
611
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
612
613
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
614
615
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
616

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
617
618
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
619
620
621
622
623
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
624
    if(str(args.precision) == "16" and args.deepspeed_config_path is not None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
625
626
        raise ValueError("DeepSpeed and FP16 training are not compatible")

Lucas Bickmann's avatar
Lucas Bickmann committed
627
    if(args.resume_from_jax_params is not None and args.resume_from_ckpt is not None):
628
629
        raise ValueError("Choose between loading pretrained Jax-weights and a checkpoint-path")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
630
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
631
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
632

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
633
    main(args)