"llm/rust/triton-llm/vscode:/vscode.git/clone" did not exist on "8588e33a464d9f82d6ad93a433590a3bc3ff92de"
train_openfold.py 23 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
import argparse
import logging
import os
4
import random
5
import sys
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
6
7
import time

8
import numpy as np
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
9
import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
10
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
12
from pytorch_lightning.loggers import WandbLogger
13
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
14
from pytorch_lightning.plugins.environments import SLURMEnvironment
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
15
16
17
import torch

from openfold.config import model_config
18
from openfold.data.data_modules import (
19
    OpenFoldDataModule,OpenFoldMultimerDataModule,
20
    DummyDataLoader,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
21
)
22
from openfold.model.model import AlphaFold
23
from openfold.model.torchscript import script_preset_
24
from openfold.np import residue_constants
25
from openfold.utils.argparse_utils import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
26
27
28
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
29
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
30
from openfold.utils.loss import AlphaFoldLoss, AlphaFoldMultimerLoss,lddt_ca
31
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
32
from openfold.utils.seed import seed_everything
33
from openfold.utils.superimposition import superimpose
34
from openfold.utils.tensor_utils import tensor_tree_map
35
36
37
38
39
from openfold.utils.validation_metrics import (
    drmsd,
    gdt_ts,
    gdt_ha,
)
40
41
42
from openfold.utils.import_weights import (
    import_jax_weights_,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43
from scripts.zero_to_fp32 import (
44
45
    get_fp32_state_dict_from_zero_checkpoint,
    get_global_step_from_zero_checkpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
46
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
47

Marta's avatar
Marta committed
48
49
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
50
51
52
53
54

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
55
        self.model = AlphaFold(config)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
56
        self.loss = AlphaFoldLoss(config.loss)
57
58
59
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
60
61
        
        self.cached_weights = None
62
        self.last_lr_step = -1
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
63
64
65
66

    def forward(self, batch):
        return self.model(batch)

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
    def _log(self, loss_breakdown, batch, outputs, train=True):
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"{phase}/{loss_name}", 
                indiv_loss, 
                on_step=train, on_epoch=(not train), logger=True,
            )

            if(train):
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
                batch, 
                outputs,
                superimposition_metrics=(not train)
            )

        for k,v in other_metrics.items():
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
91
            assert(len(v.shape) == 1)
92
            self.log(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
93
94
                f"{phase}/{k}",
                torch.mean(v),
95
96
97
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
98
    def training_step(self, batch, batch_idx):
99
100
101
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
102
103
        # Run the model
        outputs = self(batch)
104

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
105
106
107
108
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

        # Compute loss
109
110
111
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
112

113
114
        # Log it
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
115

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
116
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
117

118
119
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
120

121
122
123
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
124
125
126
127
128
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
129
            self.model.load_state_dict(self.ema.state_dict()["params"])
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
130
       
131
        # Run the model
132
133
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
134
135
136
137
138

        # Compute loss and other metrics
        batch["use_clamped_fape"] = 0.
        _, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
139
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
140

141
142
        self._log(loss_breakdown, batch, outputs, train=False)
        
143
144
145
146
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
147

148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
    def _compute_validation_metrics(self, 
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
   
        metrics["lddt_ca"] = lddt_ca_score
   
        drmsd_ca_score = drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
            mask=all_atom_mask_ca, # still required here to compute n
        )
   
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["alignment_rmsd"] = alignment_rmsd
            metrics["gdt_ts"] = gdt_ts_score
            metrics["gdt_ha"] = gdt_ha_score
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
202
203
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
204
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
205
    ) -> torch.optim.Adam:
206
207
208
209
210
#        return torch.optim.Adam(
#            self.model.parameters(),
#            lr=learning_rate,
#            eps=eps
#        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
211
        # Ignored as long as a DeepSpeed optimizer is configured
212
        optimizer = torch.optim.Adam(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
213
214
215
216
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )
217
218
219
220
221
222

        if self.last_lr_step != -1:
            for group in optimizer.param_groups:
                if 'initial_lr' not in group:
                    group['initial_lr'] = learning_rate

223
224
225
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
        )
226

227
228
229
230
231
232
233
234
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
235

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
236
    def on_load_checkpoint(self, checkpoint):
237
238
239
240
        ema = checkpoint["ema"]
        if(not self.model.template_config.enabled):
            ema["params"] = {k:v for k,v in ema["params"].items() if not "template" in k}
        self.ema.load_state_dict(ema)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
241

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
242
243
244
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

245
246
247
    def resume_last_lr_step(self, lr_step):
        self.last_lr_step = lr_step

248
249
250
251
252
253
254
255
256
257
258
    def load_from_jax(self, jax_path):
        model_basename = os.path.splitext(
                os.path.basename(
                    os.path.normpath(jax_path)
                )
        )[0]
        model_version = "_".join(model_basename.split("_")[1:])
        import_jax_weights_(
                self.model, jax_path, version=model_version
        )

259

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
class OpenFoldMultimerWrapper(OpenFoldWrapper):
    def __init__(self, config):
        super(OpenFoldMultimerWrapper, self).__init__(config)
        self.config = config
        self.config.loss.masked_msa.num_classes = 22 # somehow need overwrite this part in multimer loss config
        self.config.model.evoformer_stack.no_blocks = 4  # no need to go overboard here
        self.config.model.evoformer_stack.blocks_per_ckpt = None  # don't want to set up
        self.model = AlphaFold(config)
        self.loss = AlphaFoldMultimerLoss(config.loss)
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
        
        self.cached_weights = None
        self.last_lr_step = -1

    def forward(self, batch):
        return self.model(batch)

    def training_step(self, batch, batch_idx):
        all_chain_features,ground_truth = batch
        if(self.ema.device != all_chain_features["aatype"].device):
            self.ema.to(all_chain_features["aatype"].device)

        # Run the model
        outputs = self(all_chain_features)
        # Compute loss
        loss = self.loss(
            outputs, (all_chain_features,ground_truth), _return_breakdown=False
        )

        # Log it
        self._log(loss, all_chain_features, outputs)

        return loss
    
    def validation_step(self, batch, batch_idx):
        all_chain_features,ground_truth = batch
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
            self.model.load_state_dict(self.ema.state_dict()["params"])
       
        # Run the model
        outputs = self(all_chain_features)

        # Compute loss and other metrics
        all_chain_features["use_clamped_fape"] = 0.
        _, loss_breakdown = self.loss(
            outputs, all_chain_features, _return_breakdown=True
        )

        self._log(loss_breakdown, all_chain_features, outputs, train=False)
        
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
323
def main(args):
324
325
326
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
327
    config = model_config(
328
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
329
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
330
        low_prec=(str(args.precision) == "16")
331
    ) 
332
333
334
335
336
    if "multimer" in args.config_preset:
        print("training multimer models now")
        model_module = OpenFoldMultimerWrapper(config)   
    else: 
        model_module = OpenFoldWrapper(config)
337
    if(args.resume_from_ckpt):
338
339
340
341
342
        if(os.path.isdir(args.resume_from_ckpt)):  
            last_global_step = get_global_step_from_zero_checkpoint(args.resume_from_ckpt)
        else:
            sd = torch.load(args.resume_from_ckpt)
            last_global_step = int(sd['global_step'])
343
344
        model_module.resume_last_lr_step(last_global_step)
        logging.info("Successfully loaded last lr step...")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
345
    if(args.resume_from_ckpt and args.resume_model_weights_only):
346
347
348
349
        if(os.path.isdir(args.resume_from_ckpt)):
            sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        else:
            sd = torch.load(args.resume_from_ckpt)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
350
351
352
        sd = {k[len("module."):]:v for k,v in sd.items()}
        model_module.load_state_dict(sd)
        logging.info("Successfully loaded model weights...")
Lucas Bickmann's avatar
Lucas Bickmann committed
353
354
355
    if(args.resume_from_jax_params):
        model_module.load_from_jax(args.resume_from_jax_params)
        logging.info(f"Successfully loaded JAX parameters at {args.resume_from_jax_params}...")
356
 
357
    # TorchScript components of the model
358
359
    if(args.script_modules):
        script_preset_(model_module)
360

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
361
    #data_module = DummyDataLoader("new_batch.pickle")
362
363
364
    if "multimer" in args.config_preset:
        print("use multimer datamodule now")
        data_module = OpenFoldMultimerDataModule(
365
366
367
368
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
369
370
371
372
373
374
    else:
        data_module = OpenFoldDataModule(
            config=config.data, 
            batch_seed=args.seed,
            **vars(args)
        )
375

376
377
    data_module.prepare_data()
    data_module.setup()
378
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
379
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
380
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
381
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
382
            every_n_epochs=1,
383
384
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
385
386
387
388
389
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
390
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
391
392
393
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
394
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
395
396
397
398
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
399

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
400
    if(args.log_performance):
Marta's avatar
Marta committed
401
402
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
403
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
404
405
406
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
407

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
408
409
410
411
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
412
413
414
415
416
417
418
419
420
421
422
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

423
    if(args.deepspeed_config_path is not None):
424
425
426
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
427
428
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
429
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
430
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
431
        strategy = DDPPlugin(find_unused_parameters=False)
432
433
    else:
        strategy = None
434
435
436
437
438
439
 
    if(args.wandb):
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
440
441
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
442
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
443
        strategy=strategy,
Marta's avatar
Marta committed
444
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
445
        logger=loggers,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
446
447
448
449
450
451
452
453
454
455
456
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
457
458
459
    )


Marta's avatar
Marta committed
460
461
462
463
464
465
466
467
468
469
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
470
471
472
473
474
475
476
477
478
479
480
481
482
483
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
484
485
486
487
488
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
489
490
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
491
492
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
493
    )
494
495
496
497
    parser.add_argument(
        "--train_mmcif_data_cache_path", type=str, default=None,
        help="path to the json file which records all the information of mmcif structures used during training"
    )
498
499
500
501
502
503
504
505
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
506
507
508
509
510
511
512
513
514
515
516
517
518
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
519
520
521
522
        "--train_filter_path", type=str, default=None,
        help='''Optional path to a text file containing names of training
                examples to include, one per line. Used to filter the training 
                set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
523
524
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
525
526
        "--distillation_filter_path", type=str, default=None,
        help="""See --train_filter_path"""
527
    )
528
529
530
531
532
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
533
534
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
535
536
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
537
538
    )
    parser.add_argument(
Marta's avatar
Marta committed
539
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
540
541
542
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
543
544
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
545
    )
546
547
548
549
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
550
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
551
552
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
553
554
    )
    parser.add_argument(
Marta's avatar
Marta committed
555
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
556
557
558
559
560
561
562
563
564
565
566
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
567
568
569
570
571
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
572
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
573
574
        help="Whether to load just model weights as opposed to training state"
    )
Lucas Bickmann's avatar
Lucas Bickmann committed
575
    parser.add_argument(
576
577
        "--resume_from_jax_params", type=str, default=None,
        help="""Path to an .npz JAX parameter file with which to initialize the model"""
Lucas Bickmann's avatar
Lucas Bickmann committed
578
    )
Marta's avatar
Marta committed
579
    parser.add_argument(
580
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
581
582
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
583
584
    parser.add_argument(
        "--wandb", action="store_true", default=False,
585
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
586
587
588
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
589
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
590
591
592
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
593
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
594
595
596
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
597
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
598
599
600
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
601
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
602
    )
603
604
605
606
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
607
    parser.add_argument(
608
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
609
610
    )
    parser.add_argument(
611
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
612
613
614
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
615
616
617
618
619
620
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
621
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
622
    parser.add_argument(
623
624
        "--log_lr", action="store_true", default=False,
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
625
    )
626
    parser.add_argument(
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
        "--config_preset", type=str, default="initial_training",
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
    )
    parser.add_argument(
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
644
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
645
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
646
647
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
648
649
650
651
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
652
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
653
654
655
656
657
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
658
659
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
660
661
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
662

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
663
664
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
665
666
667
668
669
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
670
    if(str(args.precision) == "16" and args.deepspeed_config_path is not None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
671
672
        raise ValueError("DeepSpeed and FP16 training are not compatible")

Lucas Bickmann's avatar
Lucas Bickmann committed
673
    if(args.resume_from_jax_params is not None and args.resume_from_ckpt is not None):
674
675
        raise ValueError("Choose between loading pretrained Jax-weights and a checkpoint-path")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
676
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
677
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
678

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
679
    main(args)