train_openfold.py 20 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
import argparse
import logging
import os
4
import random
5
import sys
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
6
7
import time

8
import numpy as np
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
9
import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
10
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
12
from pytorch_lightning.loggers import WandbLogger
13
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
14
from pytorch_lightning.plugins.environments import SLURMEnvironment
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
15
16
17
import torch

from openfold.config import model_config
18
19
from openfold.data.data_modules import (
    OpenFoldDataModule,
20
    DummyDataLoader,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
21
)
22
from openfold.model.model import AlphaFold
23
from openfold.model.torchscript import script_preset_
24
25
from openfold.np import residue_constants
from openfold.utils.argparse import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
26
27
28
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
29
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
30
from openfold.utils.loss import AlphaFoldLoss, lddt_ca
31
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
32
from openfold.utils.seed import seed_everything
33
from openfold.utils.superimposition import superimpose
34
from openfold.utils.tensor_utils import tensor_tree_map
35
36
37
38
39
from openfold.utils.validation_metrics import (
    drmsd,
    gdt_ts,
    gdt_ha,
)
40
41
42
from openfold.utils.import_weights import (
    import_jax_weights_,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43
from scripts.zero_to_fp32 import (
44
45
    get_fp32_state_dict_from_zero_checkpoint,
    get_global_step_from_zero_checkpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
46
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
47

Marta's avatar
Marta committed
48
49
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
50
51
52
53
54

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
55
        self.model = AlphaFold(config)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
56
        self.loss = AlphaFoldLoss(config.loss)
57
58
59
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
60
61
        
        self.cached_weights = None
62
        self.last_lr_step = -1
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
63
64
65
66

    def forward(self, batch):
        return self.model(batch)

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
    def _log(self, loss_breakdown, batch, outputs, train=True):
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"{phase}/{loss_name}", 
                indiv_loss, 
                on_step=train, on_epoch=(not train), logger=True,
            )

            if(train):
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
                batch, 
                outputs,
                superimposition_metrics=(not train)
            )

        for k,v in other_metrics.items():
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
91
            assert(len(v.shape) == 1)
92
            self.log(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
93
94
                f"{phase}/{k}",
                torch.mean(v),
95
96
97
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
98
    def training_step(self, batch, batch_idx):
99
100
101
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
102
103
        # Run the model
        outputs = self(batch)
104

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
105
106
107
108
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

        # Compute loss
109
110
111
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
112

113
114
        # Log it
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
115

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
116
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
117

118
119
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
120

121
122
123
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
124
125
126
127
128
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
129
            self.model.load_state_dict(self.ema.state_dict()["params"])
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
130
       
131
        # Run the model
132
133
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
134
135
136
137
138

        # Compute loss and other metrics
        batch["use_clamped_fape"] = 0.
        _, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
139
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
140

141
142
        self._log(loss_breakdown, batch, outputs, train=False)
        
143
144
145
146
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
147

148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
    def _compute_validation_metrics(self, 
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
   
        metrics["lddt_ca"] = lddt_ca_score
   
        drmsd_ca_score = drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
            mask=all_atom_mask_ca, # still required here to compute n
        )
   
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["alignment_rmsd"] = alignment_rmsd
            metrics["gdt_ts"] = gdt_ts_score
            metrics["gdt_ha"] = gdt_ha_score
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
202
203
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
204
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
205
    ) -> torch.optim.Adam:
206
207
208
209
210
#        return torch.optim.Adam(
#            self.model.parameters(),
#            lr=learning_rate,
#            eps=eps
#        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
211
        # Ignored as long as a DeepSpeed optimizer is configured
212
        optimizer = torch.optim.Adam(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
213
214
215
216
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )
217
218
219
220
221
222

        if self.last_lr_step != -1:
            for group in optimizer.param_groups:
                if 'initial_lr' not in group:
                    group['initial_lr'] = learning_rate

223
224
225
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
        )
226

227
228
229
230
231
232
233
234
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
235

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
236
    def on_load_checkpoint(self, checkpoint):
237
238
239
240
        ema = checkpoint["ema"]
        if(not self.model.template_config.enabled):
            ema["params"] = {k:v for k,v in ema["params"].items() if not "template" in k}
        self.ema.load_state_dict(ema)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
241

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
242
243
244
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

245
246
247
    def resume_last_lr_step(self, lr_step):
        self.last_lr_step = lr_step

248
249
250
251
252
253
254
255
256
257
258
    def load_from_jax(self, jax_path):
        model_basename = os.path.splitext(
                os.path.basename(
                    os.path.normpath(jax_path)
                )
        )[0]
        model_version = "_".join(model_basename.split("_")[1:])
        import_jax_weights_(
                self.model, jax_path, version=model_version
        )

259

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
260
def main(args):
261
262
263
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
264
    config = model_config(
265
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
266
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
267
        low_prec=(str(args.precision) == "16")
268
    ) 
Gustaf's avatar
Gustaf committed
269
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
270
    model_module = OpenFoldWrapper(config)
271
    if(args.resume_from_ckpt):
272
273
274
275
276
        if(os.path.isdir(args.resume_from_ckpt)):  
            last_global_step = get_global_step_from_zero_checkpoint(args.resume_from_ckpt)
        else:
            sd = torch.load(args.resume_from_ckpt)
            last_global_step = int(sd['global_step'])
277
278
        model_module.resume_last_lr_step(last_global_step)
        logging.info("Successfully loaded last lr step...")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
279
    if(args.resume_from_ckpt and args.resume_model_weights_only):
280
281
282
283
        if(os.path.isdir(args.resume_from_ckpt)):
            sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        else:
            sd = torch.load(args.resume_from_ckpt)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
284
285
286
        sd = {k[len("module."):]:v for k,v in sd.items()}
        model_module.load_state_dict(sd)
        logging.info("Successfully loaded model weights...")
Lucas Bickmann's avatar
Lucas Bickmann committed
287
288
289
    if(args.resume_from_jax_params):
        model_module.load_from_jax(args.resume_from_jax_params)
        logging.info(f"Successfully loaded JAX parameters at {args.resume_from_jax_params}...")
290
 
291
    # TorchScript components of the model
292
293
    if(args.script_modules):
        script_preset_(model_module)
294

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
295
    #data_module = DummyDataLoader("new_batch.pickle")
296
297
298
299
300
    data_module = OpenFoldDataModule(
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
301

302
303
    data_module.prepare_data()
    data_module.setup()
304
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
305
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
306
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
307
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
308
            every_n_epochs=1,
309
310
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
311
312
313
314
315
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
316
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
317
318
319
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
320
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
321
322
323
324
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
325

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
326
    if(args.log_performance):
Marta's avatar
Marta committed
327
328
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
329
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
330
331
332
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
333

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
334
335
336
337
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
338
339
340
341
342
343
344
345
346
347
348
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

349
    if(args.deepspeed_config_path is not None):
350
351
352
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
353
354
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
355
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
356
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
357
        strategy = DDPPlugin(find_unused_parameters=False)
358
359
    else:
        strategy = None
360
361
362
363
364
365
 
    if(args.wandb):
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
366
367
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
368
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
369
        strategy=strategy,
Marta's avatar
Marta committed
370
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
371
        logger=loggers,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
372
373
374
375
376
377
378
379
380
381
382
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
383
384
385
    )


Marta's avatar
Marta committed
386
387
388
389
390
391
392
393
394
395
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
396
397
398
399
400
401
402
403
404
405
406
407
408
409
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
410
411
412
413
414
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
415
416
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
417
418
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
419
    )
420
421
422
423
424
425
426
427
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
428
429
430
431
432
433
434
435
436
437
438
439
440
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
441
442
443
444
        "--train_filter_path", type=str, default=None,
        help='''Optional path to a text file containing names of training
                examples to include, one per line. Used to filter the training 
                set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
445
446
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
447
448
        "--distillation_filter_path", type=str, default=None,
        help="""See --train_filter_path"""
449
    )
450
451
452
453
454
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
455
456
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
457
458
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
459
460
    )
    parser.add_argument(
Marta's avatar
Marta committed
461
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
462
463
464
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
465
466
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
467
    )
468
469
470
471
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
472
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
473
474
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
475
476
    )
    parser.add_argument(
Marta's avatar
Marta committed
477
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
478
479
480
481
482
483
484
485
486
487
488
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
489
490
491
492
493
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
494
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
495
496
        help="Whether to load just model weights as opposed to training state"
    )
Lucas Bickmann's avatar
Lucas Bickmann committed
497
    parser.add_argument(
498
499
        "--resume_from_jax_params", type=str, default=None,
        help="""Path to an .npz JAX parameter file with which to initialize the model"""
Lucas Bickmann's avatar
Lucas Bickmann committed
500
    )
Marta's avatar
Marta committed
501
    parser.add_argument(
502
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
503
504
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
505
506
    parser.add_argument(
        "--wandb", action="store_true", default=False,
507
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
508
509
510
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
511
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
512
513
514
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
515
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
516
517
518
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
519
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
520
521
522
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
523
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
524
    )
525
526
527
528
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
529
    parser.add_argument(
530
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
531
532
    )
    parser.add_argument(
533
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
534
535
536
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
537
538
539
540
541
542
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
543
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
544
    parser.add_argument(
545
546
        "--log_lr", action="store_true", default=False,
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
547
    )
548
    parser.add_argument(
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
        "--config_preset", type=str, default="initial_training",
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
    )
    parser.add_argument(
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
566
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
567
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
568
569
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
570
571
572
573
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
574
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
575
576
577
578
579
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
580
581
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
582
583
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
584

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
585
586
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
587
588
589
590
591
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
592
    if(str(args.precision) == "16" and args.deepspeed_config_path is not None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
593
594
        raise ValueError("DeepSpeed and FP16 training are not compatible")

Lucas Bickmann's avatar
Lucas Bickmann committed
595
    if(args.resume_from_jax_params is not None and args.resume_from_ckpt is not None):
596
597
        raise ValueError("Choose between loading pretrained Jax-weights and a checkpoint-path")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
598
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
599
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
600

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
601
    main(args)