train_openfold.py 22.3 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
import argparse
import logging
import os
4
import sys
5
import json
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
6
7

import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
8
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
9
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
10
from pytorch_lightning.loggers import WandbLogger
11
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
12
13
14
import torch

from openfold.config import model_config
15
from openfold.data.data_modules import OpenFoldDataModule, OpenFoldMultimerDataModule
16
from openfold.model.model import AlphaFold
17
from openfold.model.torchscript import script_preset_
18
from openfold.np import residue_constants
19
from openfold.utils.argparse_utils import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
20
21
22
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
23
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
24
from openfold.utils.loss import AlphaFoldLoss, lddt_ca
25
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
26
from openfold.utils.multi_chain_permutation import multi_chain_permutation_align
27
from openfold.utils.seed import seed_everything
28
from openfold.utils.superimposition import superimpose
29
from openfold.utils.tensor_utils import tensor_tree_map
30
31
32
33
34
from openfold.utils.validation_metrics import (
    drmsd,
    gdt_ts,
    gdt_ha,
)
35
36
from openfold.utils.import_weights import (
    import_jax_weights_,
37
    import_openfold_weights_
38
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
39
from scripts.zero_to_fp32 import (
40
41
    get_fp32_state_dict_from_zero_checkpoint,
    get_global_step_from_zero_checkpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
42
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43

Marta's avatar
Marta committed
44
45
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
46
47
48
49
50

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
51
        self.model = AlphaFold(config)
52
        self.is_multimer = self.config.globals.is_multimer
53

54
        self.loss = AlphaFoldLoss(config.loss)
55

56
57
58
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
59
60
        
        self.cached_weights = None
61
        self.last_lr_step = -1
62
        self.save_hyperparameters
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
63
64
65
66

    def forward(self, batch):
        return self.model(batch)

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
    def _log(self, loss_breakdown, batch, outputs, train=True):
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"{phase}/{loss_name}", 
                indiv_loss, 
                on_step=train, on_epoch=(not train), logger=True,
            )

            if(train):
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
                batch, 
                outputs,
                superimposition_metrics=(not train)
            )

        for k,v in other_metrics.items():
            self.log(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
92
93
                f"{phase}/{k}",
                torch.mean(v),
94
95
96
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
97
    def training_step(self, batch, batch_idx):
98
99
100
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

101
102
        ground_truth = batch.pop('gt_features', None)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
103
104
        # Run the model
        outputs = self(batch)
105

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
106
107
108
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

109
110
111
112
113
        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
114
        # Compute loss
115
116
117
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
118

119
120
        # Log it
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
121

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
122
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
123

124
125
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
126

127
128
129
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
130
131
132
133
134
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
135
            self.model.load_state_dict(self.ema.state_dict()["params"])
136
137
138

        ground_truth = batch.pop('gt_features', None)

139
        # Run the model
140
141
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
142
143

        batch["use_clamped_fape"] = 0.
144
145
146
147
148
149
150

        if self.is_multimer:
            batch = multi_chain_permutation_align(out=outputs,
                                                  features=batch,
                                                  ground_truth=ground_truth)

        # Compute loss and other metrics
151
152
        _, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
153
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
154

155
156
        self._log(loss_breakdown, batch, outputs, train=False)
        
157
158
159
160
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
    def _compute_validation_metrics(self, 
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
   
        metrics["lddt_ca"] = lddt_ca_score
   
        drmsd_ca_score = drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
            mask=all_atom_mask_ca, # still required here to compute n
        )
   
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["alignment_rmsd"] = alignment_rmsd
            metrics["gdt_ts"] = gdt_ts_score
            metrics["gdt_ha"] = gdt_ha_score
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
216
217
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
218
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
219
    ) -> torch.optim.Adam:
220
221
222
223
224
#        return torch.optim.Adam(
#            self.model.parameters(),
#            lr=learning_rate,
#            eps=eps
#        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
225
        # Ignored as long as a DeepSpeed optimizer is configured
226
        optimizer = torch.optim.Adam(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
227
228
229
230
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )
231
232
233
234
235
236

        if self.last_lr_step != -1:
            for group in optimizer.param_groups:
                if 'initial_lr' not in group:
                    group['initial_lr'] = learning_rate

237
238
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
239
            last_epoch=self.last_lr_step
240
        )
241

242
243
244
245
246
247
248
249
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
250

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
251
    def on_load_checkpoint(self, checkpoint):
252
253
254
255
        ema = checkpoint["ema"]
        if(not self.model.template_config.enabled):
            ema["params"] = {k:v for k,v in ema["params"].items() if not "template" in k}
        self.ema.load_state_dict(ema)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
256

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
257
258
259
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

260
261
262
    def resume_last_lr_step(self, lr_step):
        self.last_lr_step = lr_step

263
264
265
266
267
268
269
270
271
272
273
    def load_from_jax(self, jax_path):
        model_basename = os.path.splitext(
                os.path.basename(
                    os.path.normpath(jax_path)
                )
        )[0]
        model_version = "_".join(model_basename.split("_")[1:])
        import_jax_weights_(
                self.model, jax_path, version=model_version
        )

274

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
275
def main(args):
276
277
278
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
279
    config = model_config(
280
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
281
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
282
        low_prec=(str(args.precision) == "16")
283
    ) 
284
285
286
287
288
    if args.experiment_config_json: 
        with open(args.experiment_config_json, 'r') as f:
            custom_config_dict = json.load(f)
        config.update_from_flattened_dict(custom_config_dict)

289
290
    model_module = OpenFoldWrapper(config)

291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
    if args.resume_from_ckpt:
        if args.resume_model_weights_only:
            # Load the checkpoint
            if os.path.isdir(args.resume_from_ckpt):
                sd = get_fp32_state_dict_from_zero_checkpoint(
                    args.resume_from_ckpt)
            else:
                sd = torch.load(args.resume_from_ckpt)
            # Process the state dict
            if 'module' in sd:
                sd = {k[len('module.'):]: v for k, v in sd['module'].items()}
                import_openfold_weights_(model=model_module, state_dict=sd)
            elif 'state_dict' in sd:
                import_openfold_weights_(
                    model=model_module, state_dict=sd['state_dict'])
            else:
                # Loading from pre-trained model
                sd = {'model.'+k: v for k, v in sd.items()}
                import_openfold_weights_(model=model_module, state_dict=sd)
            logging.info("Successfully loaded model weights...")

        else:  # Loads a checkpoint to start from a specific time step
            if os.path.isdir(args.resume_from_ckpt):
                last_global_step = get_global_step_from_zero_checkpoint(
                    args.resume_from_ckpt)
            else:
                sd = torch.load(args.resume_from_ckpt)
                last_global_step = int(sd['global_step'])
            model_module.resume_last_lr_step(last_global_step)
            logging.info("Successfully loaded last lr step...")

    if args.resume_from_jax_params:
Lucas Bickmann's avatar
Lucas Bickmann committed
323
324
        model_module.load_from_jax(args.resume_from_jax_params)
        logging.info(f"Successfully loaded JAX parameters at {args.resume_from_jax_params}...")
325
 
326
    # TorchScript components of the model
327
328
    if(args.script_modules):
        script_preset_(model_module)
329

330
331
    if "multimer" in args.config_preset:
        data_module = OpenFoldMultimerDataModule(
332
333
334
335
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
336
337
338
339
340
341
    else:
        data_module = OpenFoldDataModule(
            config=config.data, 
            batch_seed=args.seed,
            **vars(args)
        )
342

343
344
    data_module.prepare_data()
    data_module.setup()
345
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
346
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
347
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
348
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
349
            every_n_epochs=1,
350
351
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
352
353
354
355
356
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
357
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
358
359
360
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
361
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
362
363
364
365
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
366

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
367
    if(args.log_performance):
Marta's avatar
Marta committed
368
369
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
370
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
371
372
373
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
374

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
375
376
377
378
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
379
380
381
382
383
384
385
386
387
388
389
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

390
    if(args.deepspeed_config_path is not None):
391
392
393
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
394
395
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
396
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
397
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
398
        strategy = DDPPlugin(find_unused_parameters=False)
399
400
    else:
        strategy = None
401
402
403
404
405
406
 
    if(args.wandb):
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
407
408
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
409
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
410
        strategy=strategy,
Marta's avatar
Marta committed
411
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
412
        logger=loggers,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
413
414
415
416
417
418
419
420
421
422
423
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
424
425
426
    )


Marta's avatar
Marta committed
427
428
429
430
431
432
433
434
435
436
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
437
438
439
440
441
442
443
444
445
446
447
448
449
450
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
451
452
453
454
455
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
456
457
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
458
459
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
460
    )
461
462
    parser.add_argument(
        "--train_mmcif_data_cache_path", type=str, default=None,
463
464
        help="Path to the json file which records all the information of mmcif structures used during training"
    )
465
    parser.add_argument(
466
        "--use_single_seq_mode", type=str, default=False,
467
        help="Use single sequence embeddings instead of MSAs."
468
    )
469
470
471
472
473
474
475
476
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
477
478
479
480
481
482
483
484
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
485
486
    parser.add_argument(
        "--val_mmcif_data_cache_path", type=str, default=None,
Dingquan Yu's avatar
Dingquan Yu committed
487
        help="path to the json file which records all the information of mmcif structures used during validation"
488
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
489
490
491
492
493
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
494
495
496
497
        "--train_filter_path", type=str, default=None,
        help='''Optional path to a text file containing names of training
                examples to include, one per line. Used to filter the training 
                set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
498
499
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
500
501
        "--distillation_filter_path", type=str, default=None,
        help="""See --train_filter_path"""
502
    )
503
504
505
506
507
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
508
509
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
510
511
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
512
513
    )
    parser.add_argument(
Marta's avatar
Marta committed
514
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
515
516
517
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
518
519
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
520
    )
521
522
523
524
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
525
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
526
527
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
528
529
    )
    parser.add_argument(
Marta's avatar
Marta committed
530
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
531
532
533
534
535
536
537
538
539
540
541
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
542
543
544
545
546
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
547
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
548
549
        help="Whether to load just model weights as opposed to training state"
    )
Lucas Bickmann's avatar
Lucas Bickmann committed
550
    parser.add_argument(
551
552
        "--resume_from_jax_params", type=str, default=None,
        help="""Path to an .npz JAX parameter file with which to initialize the model"""
Lucas Bickmann's avatar
Lucas Bickmann committed
553
    )
Marta's avatar
Marta committed
554
    parser.add_argument(
555
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
556
557
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
558
559
    parser.add_argument(
        "--wandb", action="store_true", default=False,
560
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
561
562
563
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
564
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
565
566
567
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
568
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
569
570
571
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
572
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
573
574
575
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
576
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
577
    )
578
579
580
581
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
582
    parser.add_argument(
583
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
584
585
    )
    parser.add_argument(
586
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
587
588
589
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
590
591
592
593
594
595
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
596
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
597
    parser.add_argument(
598
599
        "--log_lr", action="store_true", default=False,
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
600
    )
601
    parser.add_argument(
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
        "--config_preset", type=str, default="initial_training",
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
    )
    parser.add_argument(
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
619
    )
620
621
622
    parser.add_argument(
        "--experiment_config_json", default="", help="Path to a json file with custom config values to overwrite config setting",
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
623
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
624
625
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
626
627
628
629
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
630
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
631
632
633
634
635
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
636
637
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
638
639
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
640

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
641
642
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
643
644
645
646
647
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
648
    if(str(args.precision) == "16" and args.deepspeed_config_path is not None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
649
650
        raise ValueError("DeepSpeed and FP16 training are not compatible")

Lucas Bickmann's avatar
Lucas Bickmann committed
651
    if(args.resume_from_jax_params is not None and args.resume_from_ckpt is not None):
652
653
        raise ValueError("Choose between loading pretrained Jax-weights and a checkpoint-path")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
654
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
655
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
656

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
657
    main(args)