train_openfold.py 17.9 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
4
import argparse
import logging
import os

5
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
6
7
8
9
#os.environ["MASTER_ADDR"]="10.119.81.14"
#os.environ["MASTER_PORT"]="42069"
#os.environ["NODE_RANK"]="0"

10
import random
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
import sys
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
12
13
import time

14
import numpy as np
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
15
import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
16
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
17
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
18
from pytorch_lightning.loggers import WandbLogger
19
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
20
from pytorch_lightning.plugins.environments import SLURMEnvironment
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
21
22
23
import torch

from openfold.config import model_config
24
25
from openfold.data.data_modules import (
    OpenFoldDataModule,
26
    DummyDataLoader,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
27
)
28
from openfold.model.model import AlphaFold
29
from openfold.model.torchscript import script_preset_
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
30
from openfold.np import residue_constants
31
from openfold.utils.argparse import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
32
33
34
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
35
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
36
from openfold.utils.loss import AlphaFoldLoss, lddt_ca
37
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
38
from openfold.utils.seed import seed_everything
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
39
from openfold.utils.superimposition import superimpose
40
from openfold.utils.tensor_utils import tensor_tree_map
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
41
from openfold.utils.validation_metrics import (
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
42
    drmsd,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43
44
45
    gdt_ts,
    gdt_ha,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
46
47
48
from scripts.zero_to_fp32 import (
    get_fp32_state_dict_from_zero_checkpoint
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
49

Marta's avatar
Marta committed
50
51
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
52
53
54
55
56

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
57
        self.model = AlphaFold(config)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
58
        self.loss = AlphaFoldLoss(config.loss)
59
60
61
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
62
63
        
        self.cached_weights = None
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
64
        self.last_lr_step = 0
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
65
66
67
68

    def forward(self, batch):
        return self.model(batch)

69
    def _log(self, loss_breakdown, batch, outputs, train=True):
70
71
72
73
74
75
76
77
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"{phase}/{loss_name}", 
                indiv_loss, 
                on_step=train, on_epoch=(not train), logger=True,
            )

78
79
80
81
82
83
            if(train):
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
                batch, 
                outputs,
                superimposition_metrics=(not train)
            )

        for k,v in other_metrics.items():
            self.log(
                f"{phase}/{k}", 
                v, 
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
99
    def training_step(self, batch, batch_idx):
100
101
102
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
103
104
105
106
107
108
109
        # Run the model
        outputs = self(batch)
        
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

        # Compute loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
110
111
112
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
113

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
114
        # Log it
115
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
116

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
117
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
118

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
119
120
121
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)

122
123
124
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
125
126
127
128
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
129
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
130
            self.model.load_state_dict(self.ema.state_dict()["params"])
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
131
       
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
132
        # Run the model
133
134
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
135

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
136
        # Compute loss and other metrics
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
137
        batch["use_clamped_fape"] = 0.
138
        _, loss_breakdown = self.loss(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
139
140
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
141

142
        self._log(loss_breakdown, batch, outputs, train=False)
143
        
144
145
146
147
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
148

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
149
    def _compute_validation_metrics(self, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
175
   
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
176
        metrics["lddt_ca"] = lddt_ca_score
177
   
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
178
        drmsd_ca_score = drmsd(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
179
180
            pred_coords_masked_ca,
            gt_coords_masked_ca,
181
            mask=all_atom_mask_ca, # still required here to compute n
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
182
        )
183
   
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
184
185
186
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
187
188
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
189
190
191
192
193
194
195
196
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
197
            metrics["alignment_rmsd"] = alignment_rmsd
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
198
            metrics["gdt_ts"] = gdt_ts_score
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
199
            metrics["gdt_ha"] = gdt_ha_score
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
200
201
202
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
203
204
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
205
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
206
207
    ) -> torch.optim.Adam:
        # Ignored as long as a DeepSpeed optimizer is configured
208
        optimizer = torch.optim.Adam(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
209
210
211
212
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )
213
214
215
216
217
218
219
220
221
222
223
224
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
        )

        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
225

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
226
227
228
    def on_load_checkpoint(self, checkpoint):
        self.ema.load_state_dict(checkpoint["ema"])

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
229
230
231
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

232

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
233
def main(args):
234
235
236
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
237
    config = model_config(
238
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
239
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
240
        low_prec=(args.precision == "16")
241
    ) 
Gustaf's avatar
Gustaf committed
242
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
243
244
245
246
247
248
    model_module = OpenFoldWrapper(config)
    if(args.resume_from_ckpt and args.resume_model_weights_only):
        sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        sd = {k[len("module."):]:v for k,v in sd.items()}
        model_module.load_state_dict(sd)
        logging.info("Successfully loaded model weights...")
249
 
250
    # TorchScript components of the model
251
252
    if(args.script_modules):
        script_preset_(model_module)
253

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
254
    #data_module = DummyDataLoader("new_batch.pickle")
255
256
257
258
259
    data_module = OpenFoldDataModule(
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
260

261
262
    data_module.prepare_data()
    data_module.setup()
263
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
264
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
265
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
266
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
267
            every_n_epochs=1,
268
269
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
270
271
272
273
274
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
275
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
276
277
278
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
279
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
280
281
282
283
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
284

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
285
    if(args.log_performance):
Marta's avatar
Marta committed
286
287
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
288
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
289
290
291
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
292

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
293
294
295
296
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
297
298
299
300
301
302
303
304
305
306
307
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

308
    if(args.deepspeed_config_path is not None):
309
310
311
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
312
313
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
314
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
315
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
316
        strategy = DDPPlugin(find_unused_parameters=False)
317
318
    else:
        strategy = None
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
319
320
321
322
323
324
 
    if(args.wandb):
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
325
326
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
327
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
328
        strategy=strategy,
Marta's avatar
Marta committed
329
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
330
        logger=loggers,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
331
332
333
334
335
336
337
338
339
340
341
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
342
343
344
    )


Marta's avatar
Marta committed
345
346
347
348
349
350
351
352
353
354
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
355
356
357
358
359
360
361
362
363
364
365
366
367
368
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
369
370
371
372
373
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
374
375
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
376
377
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
378
    )
379
380
381
382
383
384
385
386
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
387
388
389
390
391
392
393
394
395
396
397
398
399
400
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
        "--train_mapping_path", type=str, default=None,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
401
        help='''Optional path to a .json file containing a mapping from
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
402
                consecutive numerical indices to sample names. Used to filter
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
403
                the training set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
404
405
    )
    parser.add_argument(
406
407
408
        "--distillation_mapping_path", type=str, default=None,
        help="""See --train_mapping_path"""
    )
409
410
411
412
413
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
414
415
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
416
417
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
418
419
    )
    parser.add_argument(
Marta's avatar
Marta committed
420
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
421
422
423
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
424
425
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
426
    )
427
428
429
430
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
431
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
432
433
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
434
435
    )
    parser.add_argument(
Marta's avatar
Marta committed
436
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
437
438
439
440
441
442
443
444
445
446
447
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
448
449
450
451
452
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
453
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
454
455
        help="Whether to load just model weights as opposed to training state"
    )
Marta's avatar
Marta committed
456
    parser.add_argument(
457
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
458
459
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
460
461
    parser.add_argument(
        "--wandb", action="store_true", default=False,
462
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
463
464
465
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
466
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
467
468
469
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
470
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
471
472
473
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
474
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
475
476
477
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
478
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
479
    )
480
481
482
483
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
484
    parser.add_argument(
485
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
486
487
    )
    parser.add_argument(
488
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
489
490
491
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
492
493
494
495
496
497
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
498
499
500
    )
    parser.add_argument(
        "--log_lr", action="store_true", default=False,
501
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
502
    )
503
    parser.add_argument(
504
        "--config_preset", type=str, default="initial_training",
505
506
507
508
509
510
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
511
512
513
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
514
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
515
516
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
517
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
518
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
519
520
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
521
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
522
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
523
524
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
525
526
527
528
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
529
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
530
531
532
533
534
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
535
536
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
537
538
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
539

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
540
541
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
542
543
544
545
546
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
547
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
548
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
549

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
550
    main(args)