train_openfold.py 16.1 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
4
import argparse
import logging
import os

5
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
6
7
8
9
#os.environ["MASTER_ADDR"]="10.119.81.14"
#os.environ["MASTER_PORT"]="42069"
#os.environ["NODE_RANK"]="0"

10
import random
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
12
import time

13
import numpy as np
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
14
import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
15
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
16
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
17
from pytorch_lightning.loggers import WandbLogger
18
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
19
from pytorch_lightning.plugins.environments import SLURMEnvironment
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
20
21
22
import torch

from openfold.config import model_config
23
24
from openfold.data.data_modules import (
    OpenFoldDataModule,
25
    DummyDataLoader,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
26
)
27
from openfold.model.model import AlphaFold
28
from openfold.model.torchscript import script_preset_
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
29
from openfold.np import residue_constants
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
30
31
32
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
33
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
34
from openfold.utils.argparse import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
35
from openfold.utils.loss import AlphaFoldLoss, lddt_ca, compute_drmsd
36
from openfold.utils.seed import seed_everything
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
37
from openfold.utils.superimposition import superimpose
38
from openfold.utils.tensor_utils import tensor_tree_map
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
39
40
41
42
from openfold.utils.validation_metrics import (
    gdt_ts,
    gdt_ha,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43
44
45
from scripts.zero_to_fp32 import (
    get_fp32_state_dict_from_zero_checkpoint
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
46

Marta's avatar
Marta committed
47
48
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
49
50
51
52
53

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
54
        self.model = AlphaFold(config)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
55
        self.loss = AlphaFoldLoss(config.loss)
56
57
58
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
59
60
        
        self.cached_weights = None
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
61
        self.last_lr_step = 0
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
62
63
64
65
66

    def forward(self, batch):
        return self.model(batch)

    def training_step(self, batch, batch_idx):
67
68
69
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
70
71
72
73
74
75
76
        # Run the model
        outputs = self(batch)
        
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

        # Compute loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
77
78
79
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
80

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
81
        # Log it
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
        self.log(
            "train/loss", 
            loss, 
            on_step=True, logger=True,
        )
        self.log(
            "train/loss_epoch", 
            loss, 
            on_step=False, on_epoch=True, logger=True,
        )
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"train/{loss_name}", 
                indiv_loss, 
                on_step=True, logger=True,
            )

        with torch.no_grad():
            other_metrics = self.compute_validation_metrics(batch, outputs) 

        for k,v in other_metrics.items():
            self.log(f"train/{k}", v, on_step=False, on_epoch=True, logger=True)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
104

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
105
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
106

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
107
108
109
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
110
111
112
113
114
115
#    def training_step_end(self, outputs):
#        # Temporary measure to address DeepSpeed scheduler bug
#        if(self.trainer.global_step != self.last_lr_step):
#            self.lr_schedulers().step()
#            self.last_lr_step = self.trainer.global_step

116
117
118
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
119
            self.cached_weights = self.model.state_dict()
120
            self.model.load_state_dict(self.ema.state_dict()["params"])
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
121
       
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
122
        # Run the model
123
124
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
125

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
126
        # Compute loss and other metrics
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
127
        batch["use_clamped_fape"] = 0.
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
        self.log("val/loss", loss, on_step=False, on_epoch=True, logger=True)
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"val/{loss_name}", 
                indiv_loss, 
                on_step=False, on_epoch=True, logger=True,
            )

        other_metrics = self.compute_validation_metrics(
            batch, outputs, superimposition_metrics=True,
        ) 
        for k,v in other_metrics.items():
            self.log(f"val/{k}", v, on_step=False, on_epoch=True, logger=True)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
144

145
146
147
148
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
149

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
    def compute_validation_metrics(self, 
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
    
        metrics["lddt_ca"] = lddt_ca_score
    
        drmsd_ca_score = compute_drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
            mask=all_atom_mask_ca,
        )
    
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
            superimposed_pred, _ = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["gdt_ts"] = gdt_ts_score
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
199
            metrics["gdt_ha"] = gdt_ha_score
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
200
201
202
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
203
204
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
205
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
206
207
208
209
210
211
212
213
    ) -> torch.optim.Adam:
        # Ignored as long as a DeepSpeed optimizer is configured
        return torch.optim.Adam(
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
214
215
216
    def on_load_checkpoint(self, checkpoint):
        self.ema.load_state_dict(checkpoint["ema"])

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
217
218
219
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

220

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
221
def main(args):
222
223
224
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
225
    config = model_config(
226
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
227
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
228
        low_prec=(args.precision == "16")
229
    ) 
Gustaf's avatar
Gustaf committed
230
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
231
232
233
234
235
236
    model_module = OpenFoldWrapper(config)
    if(args.resume_from_ckpt and args.resume_model_weights_only):
        sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        sd = {k[len("module."):]:v for k,v in sd.items()}
        model_module.load_state_dict(sd)
        logging.info("Successfully loaded model weights...")
237
238

    # TorchScript components of the model
239
240
    if(args.script_modules):
        script_preset_(model_module)
241

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
242
    #data_module = DummyDataLoader("new_batch.pickle")
243
244
245
246
247
    data_module = OpenFoldDataModule(
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
248

249
250
    data_module.prepare_data()
    data_module.setup()
251
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
252
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
253
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
254
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
255
            every_n_epochs=1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
256
257
258
259
260
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
261
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
262
263
264
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
265
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
266
267
268
269
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
270

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
271
    if(args.log_performance):
Marta's avatar
Marta committed
272
273
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
274
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
275
276
277
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
278

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
279
280
281
282
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
283
284
285
286
287
288
289
290
291
292
293
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

294
    if(args.deepspeed_config_path is not None):
295
296
297
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
298
299
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
300
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
301
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
302
        strategy = DDPPlugin(find_unused_parameters=False)
303
304
    else:
        strategy = None
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
305
   
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
306
307
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
308
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
309
        strategy=strategy,
Marta's avatar
Marta committed
310
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
311
        logger=loggers,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
312
313
314
315
316
317
318
319
320
321
322
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
323
324
325
    )


Marta's avatar
Marta committed
326
327
328
329
330
331
332
333
334
335
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
336
337
338
339
340
341
342
343
344
345
346
347
348
349
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
350
351
352
353
354
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
355
356
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
357
358
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
359
    )
360
361
362
363
364
365
366
367
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
368
369
370
371
372
373
374
375
376
377
378
379
380
381
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
        "--train_mapping_path", type=str, default=None,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
382
        help='''Optional path to a .json file containing a mapping from
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
383
                consecutive numerical indices to sample names. Used to filter
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
384
                the training set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
385
386
    )
    parser.add_argument(
387
388
389
        "--distillation_mapping_path", type=str, default=None,
        help="""See --train_mapping_path"""
    )
390
391
392
393
394
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
395
396
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
397
398
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
399
400
    )
    parser.add_argument(
Marta's avatar
Marta committed
401
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
402
403
404
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
405
406
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
407
    )
408
409
410
411
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
412
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
413
414
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
415
416
    )
    parser.add_argument(
Marta's avatar
Marta committed
417
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
418
419
420
421
422
423
424
425
426
427
428
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
429
430
431
432
433
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
434
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
435
436
        help="Whether to load just model weights as opposed to training state"
    )
Marta's avatar
Marta committed
437
    parser.add_argument(
438
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
439
440
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
    parser.add_argument(
        "--wandb", action="store_true", default=False,
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
    )
456
457
458
459
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
460
    parser.add_argument(
461
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
462
463
    )
    parser.add_argument(
464
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
465
466
467
468
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
469
470
471
472
473
474
    parser.add_argument(
        "--_alignment_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--log_lr", action="store_true", default=False,
    )
475
    parser.add_argument(
476
477
        "--config_preset", type=str, default="initial_training",
        help='Config setting. Choose e.g. "initial_training", "finetuning", "model_1", etc.'
478
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
479
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
480
481
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
482
483
484
485
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
486
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
487
488
489
490
491
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
492
493
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
494
495
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
496

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
497
498
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
499
500
501
502
503
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
504
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
505
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
506

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
507
    main(args)