train_openfold.py 17.2 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
4
import argparse
import logging
import os

5
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
6
7
8
9
#os.environ["MASTER_ADDR"]="10.119.81.14"
#os.environ["MASTER_PORT"]="42069"
#os.environ["NODE_RANK"]="0"

10
import random
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
12
import time

13
import numpy as np
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
14
import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
15
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
16
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
17
from pytorch_lightning.loggers import WandbLogger
18
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
19
from pytorch_lightning.plugins.environments import SLURMEnvironment
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
20
21
22
import torch

from openfold.config import model_config
23
24
from openfold.data.data_modules import (
    OpenFoldDataModule,
25
    DummyDataLoader,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
26
)
27
from openfold.model.model import AlphaFold
28
from openfold.model.torchscript import script_preset_
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
29
from openfold.np import residue_constants
30
from openfold.utils.argparse import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
31
32
33
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
34
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
35
from openfold.utils.loss import AlphaFoldLoss, lddt_ca, compute_drmsd
36
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
37
from openfold.utils.seed import seed_everything
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
38
from openfold.utils.superimposition import superimpose
39
from openfold.utils.tensor_utils import tensor_tree_map
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
40
41
42
43
from openfold.utils.validation_metrics import (
    gdt_ts,
    gdt_ha,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
44
45
46
from scripts.zero_to_fp32 import (
    get_fp32_state_dict_from_zero_checkpoint
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
47

Marta's avatar
Marta committed
48
49
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
50
51
52
53
54

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
55
        self.model = AlphaFold(config)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
56
        self.loss = AlphaFoldLoss(config.loss)
57
58
59
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
60
61
        
        self.cached_weights = None
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
62
63
64
65

    def forward(self, batch):
        return self.model(batch)

66
    def _log(self, loss_breakdown, batch, outputs, train=True):
67
68
69
70
71
72
73
74
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"{phase}/{loss_name}", 
                indiv_loss, 
                on_step=train, on_epoch=(not train), logger=True,
            )

75
76
77
78
79
80
            if(train):
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
                batch, 
                outputs,
                superimposition_metrics=(not train)
            )

        for k,v in other_metrics.items():
            self.log(
                f"{phase}/{k}", 
                v, 
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
96
    def training_step(self, batch, batch_idx):
97
98
99
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
100
101
102
103
104
105
106
        # Run the model
        outputs = self(batch)
        
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

        # Compute loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
107
108
109
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
110

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
111
        # Log it
112
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
113

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
114
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
115

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
116
117
118
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)

119
120
121
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
122
123
124
125
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
126
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
127
            self.model.load_state_dict(self.ema.state_dict()["params"])
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
128
       
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
129
        # Run the model
130
131
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
132

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
133
        # Compute loss and other metrics
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
134
        batch["use_clamped_fape"] = 0.
135
        _, loss_breakdown = self.loss(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
136
137
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
138

139
        self._log(loss_breakdown, batch, outputs, train=False)
140
        
141
142
143
144
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
145

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
146
    def _compute_validation_metrics(self, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
172
   
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
173
        metrics["lddt_ca"] = lddt_ca_score
174
   
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
175
176
177
        drmsd_ca_score = compute_drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
178
            mask=all_atom_mask_ca, # still required here to compute n
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
179
        )
180
   
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
181
182
183
184
185
186
187
188
189
190
191
192
193
194
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
            superimposed_pred, _ = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["gdt_ts"] = gdt_ts_score
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
195
            metrics["gdt_ha"] = gdt_ha_score
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
196
197
198
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
199
200
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
201
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
202
203
    ) -> torch.optim.Adam:
        # Ignored as long as a DeepSpeed optimizer is configured
204
        optimizer = torch.optim.Adam(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
205
206
207
208
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )
209
210
211
212
213
214
215
216
217
218
219
220
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
        )

        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
221

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
222
223
224
    def on_load_checkpoint(self, checkpoint):
        self.ema.load_state_dict(checkpoint["ema"])

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
225
226
227
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

228

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
229
def main(args):
230
231
232
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
233
    config = model_config(
234
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
235
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
236
        low_prec=(args.precision == "16")
237
    ) 
Gustaf's avatar
Gustaf committed
238
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
239
240
241
242
243
244
    model_module = OpenFoldWrapper(config)
    if(args.resume_from_ckpt and args.resume_model_weights_only):
        sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        sd = {k[len("module."):]:v for k,v in sd.items()}
        model_module.load_state_dict(sd)
        logging.info("Successfully loaded model weights...")
245
 
246
    # TorchScript components of the model
247
248
    if(args.script_modules):
        script_preset_(model_module)
249

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
250
    #data_module = DummyDataLoader("new_batch.pickle")
251
252
253
254
255
    data_module = OpenFoldDataModule(
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
256

257
258
    data_module.prepare_data()
    data_module.setup()
259
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
260
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
261
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
262
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
263
            every_n_epochs=1,
264
265
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
266
267
268
269
270
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
271
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
272
273
274
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
275
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
276
277
278
279
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
280

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
281
    if(args.log_performance):
Marta's avatar
Marta committed
282
283
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
284
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
285
286
287
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
288

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
289
290
291
292
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
293
294
295
296
297
298
299
300
301
302
303
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

304
    if(args.deepspeed_config_path is not None):
305
306
307
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
308
309
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
310
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
311
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
312
        strategy = DDPPlugin(find_unused_parameters=False)
313
314
    else:
        strategy = None
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
315
   
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
316
317
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
318
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
319
        strategy=strategy,
Marta's avatar
Marta committed
320
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
321
        logger=loggers,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
322
323
324
325
326
327
328
329
330
331
332
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
333
334
335
    )


Marta's avatar
Marta committed
336
337
338
339
340
341
342
343
344
345
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
346
347
348
349
350
351
352
353
354
355
356
357
358
359
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
360
361
362
363
364
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
365
366
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
367
368
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
369
    )
370
371
372
373
374
375
376
377
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
378
379
380
381
382
383
384
385
386
387
388
389
390
391
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
        "--train_mapping_path", type=str, default=None,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
392
        help='''Optional path to a .json file containing a mapping from
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
393
                consecutive numerical indices to sample names. Used to filter
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
394
                the training set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
395
396
    )
    parser.add_argument(
397
398
399
        "--distillation_mapping_path", type=str, default=None,
        help="""See --train_mapping_path"""
    )
400
401
402
403
404
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
405
406
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
407
408
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
409
410
    )
    parser.add_argument(
Marta's avatar
Marta committed
411
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
412
413
414
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
415
416
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
417
    )
418
419
420
421
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
422
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
423
424
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
425
426
    )
    parser.add_argument(
Marta's avatar
Marta committed
427
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
428
429
430
431
432
433
434
435
436
437
438
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
439
440
441
442
443
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
444
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
445
446
        help="Whether to load just model weights as opposed to training state"
    )
Marta's avatar
Marta committed
447
    parser.add_argument(
448
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
449
450
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
451
452
    parser.add_argument(
        "--wandb", action="store_true", default=False,
453
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
454
455
456
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
457
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
458
459
460
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
461
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
462
463
464
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
465
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
466
467
468
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
469
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
470
    )
471
472
473
474
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
475
    parser.add_argument(
476
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
477
478
    )
    parser.add_argument(
479
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
480
481
482
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
483
484
485
486
487
488
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
489
490
491
    )
    parser.add_argument(
        "--log_lr", action="store_true", default=False,
492
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
493
    )
494
    parser.add_argument(
495
        "--config_preset", type=str, default="initial_training",
496
497
498
499
500
501
502
503
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_alignment_index_path", type=str, default=None,
504
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
505
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
506
507
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
508
509
510
511
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
512
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
513
514
515
516
517
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
518
519
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
520
521
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
522

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
523
524
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
525
526
527
528
529
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
530
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
531
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
532

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
533
    main(args)