train_openfold.py 20.5 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
import argparse
import logging
import os
4
import sys
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
5
6

import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
7
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
8
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
9
from pytorch_lightning.loggers import WandbLogger
10
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
12
13
import torch

from openfold.config import model_config
14
from openfold.data.data_modules import OpenFoldDataModule, OpenFoldMultimerDataModule
15
from openfold.model.model import AlphaFold
16
from openfold.model.torchscript import script_preset_
17
from openfold.np import residue_constants
18
from openfold.utils.argparse_utils import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
19
20
21
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
22
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
23
from openfold.utils.loss import AlphaFoldLoss, AlphaFoldMultimerLoss,lddt_ca
24
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
25
from openfold.utils.seed import seed_everything
26
from openfold.utils.superimposition import superimpose
27
from openfold.utils.tensor_utils import tensor_tree_map
28
29
30
31
32
from openfold.utils.validation_metrics import (
    drmsd,
    gdt_ts,
    gdt_ha,
)
33
34
35
from openfold.utils.import_weights import (
    import_jax_weights_,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
36
from scripts.zero_to_fp32 import (
37
38
    get_fp32_state_dict_from_zero_checkpoint,
    get_global_step_from_zero_checkpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
39
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
40

Marta's avatar
Marta committed
41
42
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43
44
45
46
47

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
48
        self.model = AlphaFold(config)
49
50
51
52
53
54

        if self.config.globals.is_multimer:
            self.loss = AlphaFoldMultimerLoss(config.loss)
        else:
            self.loss = AlphaFoldLoss(config.loss)

55
56
57
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
58
59
        
        self.cached_weights = None
60
        self.last_lr_step = -1
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
61
62
63
64

    def forward(self, batch):
        return self.model(batch)

65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
    def _log(self, loss_breakdown, batch, outputs, train=True):
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"{phase}/{loss_name}", 
                indiv_loss, 
                on_step=train, on_epoch=(not train), logger=True,
            )

            if(train):
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
                batch, 
                outputs,
                superimposition_metrics=(not train)
            )

        for k,v in other_metrics.items():
            self.log(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
90
91
                f"{phase}/{k}",
                torch.mean(v),
92
93
94
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
95
    def training_step(self, batch, batch_idx):
96
97
98
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
99
100
        # Run the model
        outputs = self(batch)
101

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
102
103
104
105
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

        # Compute loss
106
107
108
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
109

110
111
        # Log it
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
112

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
113
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
114

115
116
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
117

118
119
120
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
121
122
123
124
125
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
126
            self.model.load_state_dict(self.ema.state_dict()["params"])
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
127
       
128
        # Run the model
129
130
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
131
132
133
134
135

        # Compute loss and other metrics
        batch["use_clamped_fape"] = 0.
        _, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
136
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
137

138
139
        self._log(loss_breakdown, batch, outputs, train=False)
        
140
141
142
143
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
144

145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
    def _compute_validation_metrics(self, 
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
   
        metrics["lddt_ca"] = lddt_ca_score
   
        drmsd_ca_score = drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
            mask=all_atom_mask_ca, # still required here to compute n
        )
   
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["alignment_rmsd"] = alignment_rmsd
            metrics["gdt_ts"] = gdt_ts_score
            metrics["gdt_ha"] = gdt_ha_score
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
199
200
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
201
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
202
    ) -> torch.optim.Adam:
203
204
205
206
207
#        return torch.optim.Adam(
#            self.model.parameters(),
#            lr=learning_rate,
#            eps=eps
#        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
208
        # Ignored as long as a DeepSpeed optimizer is configured
209
        optimizer = torch.optim.Adam(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
210
211
212
213
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )
214
215
216
217
218
219

        if self.last_lr_step != -1:
            for group in optimizer.param_groups:
                if 'initial_lr' not in group:
                    group['initial_lr'] = learning_rate

220
221
222
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
        )
223

224
225
226
227
228
229
230
231
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
232

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
233
    def on_load_checkpoint(self, checkpoint):
234
235
236
237
        ema = checkpoint["ema"]
        if(not self.model.template_config.enabled):
            ema["params"] = {k:v for k,v in ema["params"].items() if not "template" in k}
        self.ema.load_state_dict(ema)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
238

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
239
240
241
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

242
243
244
    def resume_last_lr_step(self, lr_step):
        self.last_lr_step = lr_step

245
246
247
248
249
250
251
252
253
254
255
    def load_from_jax(self, jax_path):
        model_basename = os.path.splitext(
                os.path.basename(
                    os.path.normpath(jax_path)
                )
        )[0]
        model_version = "_".join(model_basename.split("_")[1:])
        import_jax_weights_(
                self.model, jax_path, version=model_version
        )

256

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
257
def main(args):
258
259
260
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
261
    config = model_config(
262
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
263
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
264
        low_prec=(str(args.precision) == "16")
265
    ) 
266
267
    model_module = OpenFoldWrapper(config)

268
    if(args.resume_from_ckpt):
269
270
271
272
273
        if(os.path.isdir(args.resume_from_ckpt)):  
            last_global_step = get_global_step_from_zero_checkpoint(args.resume_from_ckpt)
        else:
            sd = torch.load(args.resume_from_ckpt)
            last_global_step = int(sd['global_step'])
274
275
        model_module.resume_last_lr_step(last_global_step)
        logging.info("Successfully loaded last lr step...")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
276
    if(args.resume_from_ckpt and args.resume_model_weights_only):
277
278
279
280
        if(os.path.isdir(args.resume_from_ckpt)):
            sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        else:
            sd = torch.load(args.resume_from_ckpt)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
281
282
283
        sd = {k[len("module."):]:v for k,v in sd.items()}
        model_module.load_state_dict(sd)
        logging.info("Successfully loaded model weights...")
Lucas Bickmann's avatar
Lucas Bickmann committed
284
285
286
    if(args.resume_from_jax_params):
        model_module.load_from_jax(args.resume_from_jax_params)
        logging.info(f"Successfully loaded JAX parameters at {args.resume_from_jax_params}...")
287
 
288
    # TorchScript components of the model
289
290
    if(args.script_modules):
        script_preset_(model_module)
291

292
293
    if "multimer" in args.config_preset:
        data_module = OpenFoldMultimerDataModule(
294
295
296
297
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
298
299
300
301
302
303
    else:
        data_module = OpenFoldDataModule(
            config=config.data, 
            batch_seed=args.seed,
            **vars(args)
        )
304

305
306
    data_module.prepare_data()
    data_module.setup()
307
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
308
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
309
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
310
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
311
            every_n_epochs=1,
312
313
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
314
315
316
317
318
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
319
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
320
321
322
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
323
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
324
325
326
327
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
328

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
329
    if(args.log_performance):
Marta's avatar
Marta committed
330
331
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
332
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
333
334
335
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
336

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
337
338
339
340
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
341
342
343
344
345
346
347
348
349
350
351
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

352
    if(args.deepspeed_config_path is not None):
353
354
355
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
356
357
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
358
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
359
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
360
        strategy = DDPPlugin(find_unused_parameters=False)
361
362
    else:
        strategy = None
363
364
365
366
367
368
 
    if(args.wandb):
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
369
370
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
371
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
372
        strategy=strategy,
Marta's avatar
Marta committed
373
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
374
        logger=loggers,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
375
376
377
378
379
380
381
382
383
384
385
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
386
387
388
    )


Marta's avatar
Marta committed
389
390
391
392
393
394
395
396
397
398
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
399
400
401
402
403
404
405
406
407
408
409
410
411
412
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
413
414
415
416
417
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
418
419
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
420
421
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
422
    )
423
424
425
426
    parser.add_argument(
        "--train_mmcif_data_cache_path", type=str, default=None,
        help="path to the json file which records all the information of mmcif structures used during training"
    )
427
428
429
430
431
432
433
434
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
435
436
437
438
439
440
441
442
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
443
444
    parser.add_argument(
        "--val_mmcif_data_cache_path", type=str, default=None,
Dingquan Yu's avatar
Dingquan Yu committed
445
        help="path to the json file which records all the information of mmcif structures used during validation"
446
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
447
448
449
450
451
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
452
453
454
455
        "--train_filter_path", type=str, default=None,
        help='''Optional path to a text file containing names of training
                examples to include, one per line. Used to filter the training 
                set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
456
457
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
458
459
        "--distillation_filter_path", type=str, default=None,
        help="""See --train_filter_path"""
460
    )
461
462
463
464
465
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
466
467
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
468
469
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
470
471
    )
    parser.add_argument(
Marta's avatar
Marta committed
472
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
473
474
475
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
476
477
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
478
    )
479
480
481
482
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
483
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
484
485
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
486
487
    )
    parser.add_argument(
Marta's avatar
Marta committed
488
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
489
490
491
492
493
494
495
496
497
498
499
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
500
501
502
503
504
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
505
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
506
507
        help="Whether to load just model weights as opposed to training state"
    )
Lucas Bickmann's avatar
Lucas Bickmann committed
508
    parser.add_argument(
509
510
        "--resume_from_jax_params", type=str, default=None,
        help="""Path to an .npz JAX parameter file with which to initialize the model"""
Lucas Bickmann's avatar
Lucas Bickmann committed
511
    )
Marta's avatar
Marta committed
512
    parser.add_argument(
513
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
514
515
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
516
517
    parser.add_argument(
        "--wandb", action="store_true", default=False,
518
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
519
520
521
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
522
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
523
524
525
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
526
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
527
528
529
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
530
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
531
532
533
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
534
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
535
    )
536
537
538
539
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
540
    parser.add_argument(
541
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
542
543
    )
    parser.add_argument(
544
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
545
546
547
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
548
549
550
551
552
553
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
554
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
555
    parser.add_argument(
556
557
        "--log_lr", action="store_true", default=False,
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
558
    )
559
    parser.add_argument(
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
        "--config_preset", type=str, default="initial_training",
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
    )
    parser.add_argument(
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
577
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
578
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
579
580
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
581
582
583
584
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
585
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
586
587
588
589
590
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
591
592
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
593
594
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
595

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
596
597
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
598
599
600
601
602
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
603
    if(str(args.precision) == "16" and args.deepspeed_config_path is not None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
604
605
        raise ValueError("DeepSpeed and FP16 training are not compatible")

Lucas Bickmann's avatar
Lucas Bickmann committed
606
    if(args.resume_from_jax_params is not None and args.resume_from_ckpt is not None):
607
608
        raise ValueError("Choose between loading pretrained Jax-weights and a checkpoint-path")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
609
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
610
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
611

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
612
    main(args)