train_openfold.py 22.5 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
import argparse
import logging
import os
4
import sys
5
import json
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
6
7

import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
8
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Jennifer's avatar
Jennifer committed
9
from pytorch_lightning.callbacks import DeviceStatsMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
10
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
from pytorch_lightning.loggers import WandbLogger
12
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
13
from pytorch_lightning.utilities.seed import seed_everything
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
14
15
16
import torch

from openfold.config import model_config
17
from openfold.data.data_modules import OpenFoldDataModule, OpenFoldMultimerDataModule
18
from openfold.model.model import AlphaFold
19
from openfold.model.torchscript import script_preset_
20
from openfold.np import residue_constants
21
from openfold.utils.argparse_utils import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
22
23
24
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
25
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
26
from openfold.utils.loss import AlphaFoldLoss, lddt_ca
27
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
28
from openfold.utils.multi_chain_permutation import multi_chain_permutation_align
29
from openfold.utils.superimposition import superimpose
30
from openfold.utils.tensor_utils import tensor_tree_map
31
32
33
34
35
from openfold.utils.validation_metrics import (
    drmsd,
    gdt_ts,
    gdt_ha,
)
36
37
from openfold.utils.import_weights import (
    import_jax_weights_,
38
    import_openfold_weights_
39
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
40
from scripts.zero_to_fp32 import (
41
42
    get_fp32_state_dict_from_zero_checkpoint,
    get_global_step_from_zero_checkpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
44

Marta's avatar
Marta committed
45
46
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
47
48
49
50
51

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
52
        self.model = AlphaFold(config)
53
        self.is_multimer = self.config.globals.is_multimer
54

55
        self.loss = AlphaFoldLoss(config.loss)
56

57
58
59
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
60
61
        
        self.cached_weights = None
62
        self.last_lr_step = -1
63
        self.save_hyperparameters
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
64
65
66
67

    def forward(self, batch):
        return self.model(batch)

68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
    def _log(self, loss_breakdown, batch, outputs, train=True):
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"{phase}/{loss_name}", 
                indiv_loss, 
                on_step=train, on_epoch=(not train), logger=True,
            )

            if(train):
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
                batch, 
                outputs,
                superimposition_metrics=(not train)
            )

        for k,v in other_metrics.items():
            self.log(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
93
94
                f"{phase}/{k}",
                torch.mean(v),
95
96
97
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
98
    def training_step(self, batch, batch_idx):
99
100
101
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

102
103
        ground_truth = batch.pop('gt_features', None)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
104
105
        # Run the model
        outputs = self(batch)
106

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
107
108
109
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

110
111
112
113
114
        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
115
        # Compute loss
116
117
118
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
119

120
121
        # Log it
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
122

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
123
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
124

125
126
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
127

128
129
130
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
131
132
133
134
135
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
136
            self.model.load_state_dict(self.ema.state_dict()["params"])
137
138
139

        ground_truth = batch.pop('gt_features', None)

140
        # Run the model
141
142
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
143
144

        batch["use_clamped_fape"] = 0.
145
146
147
148
149
150
151

        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

        # Compute loss and other metrics
152
153
        _, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
154
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
155

156
157
        self._log(loss_breakdown, batch, outputs, train=False)
        
158
159
160
161
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
162

163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
    def _compute_validation_metrics(self, 
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
   
        metrics["lddt_ca"] = lddt_ca_score
   
        drmsd_ca_score = drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
            mask=all_atom_mask_ca, # still required here to compute n
        )
   
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["alignment_rmsd"] = alignment_rmsd
            metrics["gdt_ts"] = gdt_ts_score
            metrics["gdt_ha"] = gdt_ha_score
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
217
218
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
219
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
220
    ) -> torch.optim.Adam:
221
222
223
224
225
#        return torch.optim.Adam(
#            self.model.parameters(),
#            lr=learning_rate,
#            eps=eps
#        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
226
        # Ignored as long as a DeepSpeed optimizer is configured
227
        optimizer = torch.optim.Adam(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
228
229
230
231
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )
232
233
234
235
236
237

        if self.last_lr_step != -1:
            for group in optimizer.param_groups:
                if 'initial_lr' not in group:
                    group['initial_lr'] = learning_rate

238
239
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
240
            last_epoch=self.last_lr_step
241
        )
242

243
244
245
246
247
248
249
250
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
251

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
252
    def on_load_checkpoint(self, checkpoint):
253
254
255
256
        ema = checkpoint["ema"]
        if(not self.model.template_config.enabled):
            ema["params"] = {k:v for k,v in ema["params"].items() if not "template" in k}
        self.ema.load_state_dict(ema)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
257

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
258
259
260
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

261
262
263
    def resume_last_lr_step(self, lr_step):
        self.last_lr_step = lr_step

264
265
266
267
268
269
270
271
272
273
274
    def load_from_jax(self, jax_path):
        model_basename = os.path.splitext(
                os.path.basename(
                    os.path.normpath(jax_path)
                )
        )[0]
        model_version = "_".join(model_basename.split("_")[1:])
        import_jax_weights_(
                self.model, jax_path, version=model_version
        )

275

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
276
def main(args):
277
    if(args.seed is not None):
278
        seed_everything(args.seed, workers=True) 
279

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
280
    config = model_config(
281
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
282
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
283
        low_prec=(str(args.precision) == "16")
284
    ) 
285
286
287
288
289
    if args.experiment_config_json: 
        with open(args.experiment_config_json, 'r') as f:
            custom_config_dict = json.load(f)
        config.update_from_flattened_dict(custom_config_dict)

290
291
    model_module = OpenFoldWrapper(config)

292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
    if args.resume_from_ckpt:
        if args.resume_model_weights_only:
            # Load the checkpoint
            if os.path.isdir(args.resume_from_ckpt):
                sd = get_fp32_state_dict_from_zero_checkpoint(
                    args.resume_from_ckpt)
            else:
                sd = torch.load(args.resume_from_ckpt)
            # Process the state dict
            if 'module' in sd:
                sd = {k[len('module.'):]: v for k, v in sd['module'].items()}
                import_openfold_weights_(model=model_module, state_dict=sd)
            elif 'state_dict' in sd:
                import_openfold_weights_(
                    model=model_module, state_dict=sd['state_dict'])
            else:
                # Loading from pre-trained model
                sd = {'model.'+k: v for k, v in sd.items()}
                import_openfold_weights_(model=model_module, state_dict=sd)
            logging.info("Successfully loaded model weights...")

        else:  # Loads a checkpoint to start from a specific time step
            if os.path.isdir(args.resume_from_ckpt):
                last_global_step = get_global_step_from_zero_checkpoint(
                    args.resume_from_ckpt)
            else:
                sd = torch.load(args.resume_from_ckpt)
                last_global_step = int(sd['global_step'])
            model_module.resume_last_lr_step(last_global_step)
            logging.info("Successfully loaded last lr step...")

    if args.resume_from_jax_params:
Lucas Bickmann's avatar
Lucas Bickmann committed
324
325
        model_module.load_from_jax(args.resume_from_jax_params)
        logging.info(f"Successfully loaded JAX parameters at {args.resume_from_jax_params}...")
326
 
327
    # TorchScript components of the model
328
329
    if(args.script_modules):
        script_preset_(model_module)
330

331
332
    if "multimer" in args.config_preset:
        data_module = OpenFoldMultimerDataModule(
333
334
335
336
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
337
338
339
340
341
342
    else:
        data_module = OpenFoldDataModule(
            config=config.data, 
            batch_seed=args.seed,
            **vars(args)
        )
343

344
345
    data_module.prepare_data()
    data_module.setup()
346
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
347
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
348
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
349
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
350
            every_n_epochs=1,
351
352
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
353
354
355
356
357
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
358
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
359
360
361
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
362
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
363
364
365
366
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
367

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
368
    if(args.log_performance):
Marta's avatar
Marta committed
369
370
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
371
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
372
373
374
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
375

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
376
377
378
379
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
380
381
382
383
384
385
386
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
Jennifer's avatar
Jennifer committed
387
            config=config.to_dict(),
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
388
389
390
391
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

392
    if(args.deepspeed_config_path is not None):
393
394
395
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
396
397
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
398
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
399
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
400
        strategy = DDPPlugin(find_unused_parameters=False)
401
402
    else:
        strategy = None
403
404
405
406
407
408
 
    if(args.wandb):
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
409
410
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
411
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
412
        strategy=strategy,
Marta's avatar
Marta committed
413
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
414
        logger=loggers,
Jennifer's avatar
Jennifer committed
415
        profiler='simple',
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
416
417
    )

Jennifer's avatar
Jennifer committed
418
    if (args.resume_model_weights_only):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
419
420
421
422
423
424
425
426
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
427
428
429
    )


Marta's avatar
Marta committed
430
431
432
433
434
435
436
437
438
439
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
440
441
442
443
444
445
446
447
448
449
450
451
452
453
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
454
455
456
457
458
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
459
460
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
461
462
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
463
    )
464
465
    parser.add_argument(
        "--train_mmcif_data_cache_path", type=str, default=None,
466
467
        help="Path to the json file which records all the information of mmcif structures used during training"
    )
468
    parser.add_argument(
469
        "--use_single_seq_mode", type=str, default=False,
470
        help="Use single sequence embeddings instead of MSAs."
471
    )
472
473
474
475
476
477
478
479
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
480
481
482
483
484
485
486
487
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
488
489
    parser.add_argument(
        "--val_mmcif_data_cache_path", type=str, default=None,
Dingquan Yu's avatar
Dingquan Yu committed
490
        help="path to the json file which records all the information of mmcif structures used during validation"
491
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
492
493
494
495
496
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
497
498
499
500
        "--train_filter_path", type=str, default=None,
        help='''Optional path to a text file containing names of training
                examples to include, one per line. Used to filter the training 
                set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
501
502
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
503
504
        "--distillation_filter_path", type=str, default=None,
        help="""See --train_filter_path"""
505
    )
506
507
508
509
510
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
511
512
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
513
514
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
515
516
    )
    parser.add_argument(
Marta's avatar
Marta committed
517
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
518
519
520
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
521
522
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
523
    )
524
525
526
527
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
528
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
529
530
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
531
532
    )
    parser.add_argument(
Marta's avatar
Marta committed
533
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
534
535
536
537
538
539
540
541
542
543
544
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
545
546
547
548
549
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
550
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
551
552
        help="Whether to load just model weights as opposed to training state"
    )
Lucas Bickmann's avatar
Lucas Bickmann committed
553
    parser.add_argument(
554
555
        "--resume_from_jax_params", type=str, default=None,
        help="""Path to an .npz JAX parameter file with which to initialize the model"""
Lucas Bickmann's avatar
Lucas Bickmann committed
556
    )
Marta's avatar
Marta committed
557
    parser.add_argument(
558
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
559
560
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
561
562
    parser.add_argument(
        "--wandb", action="store_true", default=False,
563
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
564
565
566
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
567
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
568
569
570
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
571
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
572
573
574
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
575
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
576
577
578
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
579
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
580
    )
581
582
583
584
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
585
    parser.add_argument(
586
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
587
588
    )
    parser.add_argument(
589
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
590
591
592
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
593
594
595
596
597
598
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
599
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
600
    parser.add_argument(
601
602
        "--log_lr", action="store_true", default=False,
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
603
    )
604
    parser.add_argument(
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
        "--config_preset", type=str, default="initial_training",
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
    )
    parser.add_argument(
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
622
    )
623
624
625
    parser.add_argument(
        "--experiment_config_json", default="", help="Path to a json file with custom config values to overwrite config setting",
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
626
    parser = pl.Trainer.add_argparse_args(parser)
Jennifer's avatar
Jennifer committed
627

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
628
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
629
630
631
632
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
633
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
634
635
636
637
638
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
639
640
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
641
642
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
643

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
644
645
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
646
647
648
649
650
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
651
    if(str(args.precision) == "16" and args.deepspeed_config_path is not None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
652
653
        raise ValueError("DeepSpeed and FP16 training are not compatible")

Lucas Bickmann's avatar
Lucas Bickmann committed
654
    if(args.resume_from_jax_params is not None and args.resume_from_ckpt is not None):
655
656
        raise ValueError("Choose between loading pretrained Jax-weights and a checkpoint-path")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
657
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
658
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
659

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
660
    main(args)