"tools/vscode:/vscode.git/clone" did not exist on "acc9dd2639c20a99bfb673e6d3b2528d9b2b0dbe"
train_openfold.py 18.3 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
4
import argparse
import logging
import os

5
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
6
7
8
9
#os.environ["MASTER_ADDR"]="10.119.81.14"
#os.environ["MASTER_PORT"]="42069"
#os.environ["NODE_RANK"]="0"

10
import random
11
import sys
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
12
13
import time

14
import numpy as np
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
15
import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
16
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
17
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
18
from pytorch_lightning.loggers import WandbLogger
19
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
20
from pytorch_lightning.plugins.environments import SLURMEnvironment
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
21
22
23
import torch

from openfold.config import model_config
24
25
from openfold.data.data_modules import (
    OpenFoldDataModule,
26
    DummyDataLoader,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
27
)
28
from openfold.model.model import AlphaFold
29
from openfold.model.torchscript import script_preset_
30
31
from openfold.np import residue_constants
from openfold.utils.argparse import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
32
33
34
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
35
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
36
from openfold.utils.loss import AlphaFoldLoss, lddt_ca
37
from openfold.utils.lr_schedulers import AlphaFoldLRScheduler
38
from openfold.utils.seed import seed_everything
39
from openfold.utils.superimposition import superimpose
40
from openfold.utils.tensor_utils import tensor_tree_map
41
42
43
44
45
from openfold.utils.validation_metrics import (
    drmsd,
    gdt_ts,
    gdt_ha,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
46
47
48
from scripts.zero_to_fp32 import (
    get_fp32_state_dict_from_zero_checkpoint
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
49

Marta's avatar
Marta committed
50
51
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
52
53
54
55
56

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
57
        self.model = AlphaFold(config)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
58
        self.loss = AlphaFoldLoss(config.loss)
59
60
61
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
62
63
        
        self.cached_weights = None
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
64
        self.last_lr_step = 0
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
65
66
67
68

    def forward(self, batch):
        return self.model(batch)

69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
    def _log(self, loss_breakdown, batch, outputs, train=True):
        phase = "train" if train else "val"
        for loss_name, indiv_loss in loss_breakdown.items():
            self.log(
                f"{phase}/{loss_name}", 
                indiv_loss, 
                on_step=train, on_epoch=(not train), logger=True,
            )

            if(train):
                self.log(
                    f"{phase}/{loss_name}_epoch",
                    indiv_loss,
                    on_step=False, on_epoch=True, logger=True,
                )

        with torch.no_grad():
            other_metrics = self._compute_validation_metrics(
                batch, 
                outputs,
                superimposition_metrics=(not train)
            )

        for k,v in other_metrics.items():
            self.log(
                f"{phase}/{k}", 
                v, 
                on_step=False, on_epoch=True, logger=True
            )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
99
    def training_step(self, batch, batch_idx):
100
101
102
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
103
104
        # Run the model
        outputs = self(batch)
105

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
106
107
108
109
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

        # Compute loss
110
111
112
        loss, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
113

114
115
        # Log it
        self._log(loss_breakdown, batch, outputs)
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
116

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
117
        return loss
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
118

119
120
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
121

122
123
124
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
125
126
127
128
129
            # model.state_dict() contains references to model weights rather
            # than copies. Therefore, we need to clone them before calling 
            # load_state_dict().
            clone_param = lambda t: t.detach().clone()
            self.cached_weights = tensor_tree_map(clone_param, self.model.state_dict())
130
            self.model.load_state_dict(self.ema.state_dict()["params"])
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
131
       
132
        # Run the model
133
134
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
135
136
137
138
139

        # Compute loss and other metrics
        batch["use_clamped_fape"] = 0.
        _, loss_breakdown = self.loss(
            outputs, batch, _return_breakdown=True
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
140
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
141

142
143
        self._log(loss_breakdown, batch, outputs, train=False)
        
144
145
146
147
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
148

149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
    def _compute_validation_metrics(self, 
        batch, 
        outputs, 
        superimposition_metrics=False
    ):
        metrics = {}
        
        gt_coords = batch["all_atom_positions"]
        pred_coords = outputs["final_atom_positions"]
        all_atom_mask = batch["all_atom_mask"]
    
        # This is super janky for superimposition. Fix later
        gt_coords_masked = gt_coords * all_atom_mask[..., None]
        pred_coords_masked = pred_coords * all_atom_mask[..., None]
        ca_pos = residue_constants.atom_order["CA"]
        gt_coords_masked_ca = gt_coords_masked[..., ca_pos, :]
        pred_coords_masked_ca = pred_coords_masked[..., ca_pos, :]
        all_atom_mask_ca = all_atom_mask[..., ca_pos]
    
        lddt_ca_score = lddt_ca(
            pred_coords,
            gt_coords,
            all_atom_mask,
            eps=self.config.globals.eps,
            per_residue=False,
        )
   
        metrics["lddt_ca"] = lddt_ca_score
   
        drmsd_ca_score = drmsd(
            pred_coords_masked_ca,
            gt_coords_masked_ca,
            mask=all_atom_mask_ca, # still required here to compute n
        )
   
        metrics["drmsd_ca"] = drmsd_ca_score
    
        if(superimposition_metrics):
            superimposed_pred, alignment_rmsd = superimpose(
                gt_coords_masked_ca, pred_coords_masked_ca, all_atom_mask_ca,
            )
            gdt_ts_score = gdt_ts(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )
            gdt_ha_score = gdt_ha(
                superimposed_pred, gt_coords_masked_ca, all_atom_mask_ca
            )

            metrics["alignment_rmsd"] = alignment_rmsd
            metrics["gdt_ts"] = gdt_ts_score
            metrics["gdt_ha"] = gdt_ha_score
    
        return metrics

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
203
204
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
205
        eps: float = 1e-5,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
206
    ) -> torch.optim.Adam:
207
208
209
210
211
#        return torch.optim.Adam(
#            self.model.parameters(),
#            lr=learning_rate,
#            eps=eps
#        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
212
        # Ignored as long as a DeepSpeed optimizer is configured
213
        optimizer = torch.optim.Adam(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
214
215
216
217
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )
218
219
220
        lr_scheduler = AlphaFoldLRScheduler(
            optimizer,
        )
221

222
223
224
225
226
227
228
229
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": "step",
                "name": "AlphaFoldLRScheduler",
            }
        }
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
230

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
231
    def on_load_checkpoint(self, checkpoint):
232
233
234
235
        ema = checkpoint["ema"]
        if(not self.model.template_config.enabled):
            ema["params"] = {k:v for k,v in ema["params"].items() if not "template" in k}
        self.ema.load_state_dict(ema)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
236

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
237
238
239
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

240

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
241
def main(args):
242
243
244
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
245
    config = model_config(
246
        args.config_preset, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
247
        train=True, 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
248
        low_prec=(str(args.precision) == "16")
249
    ) 
Gustaf's avatar
Gustaf committed
250
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
251
252
    model_module = OpenFoldWrapper(config)
    if(args.resume_from_ckpt and args.resume_model_weights_only):
253
254
255
256
        if(os.path.isdir(args.resume_from_ckpt)):
            sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        else:
            sd = torch.load(args.resume_from_ckpt)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
257
258
259
        sd = {k[len("module."):]:v for k,v in sd.items()}
        model_module.load_state_dict(sd)
        logging.info("Successfully loaded model weights...")
260
 
261
    # TorchScript components of the model
262
263
    if(args.script_modules):
        script_preset_(model_module)
264

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
265
    #data_module = DummyDataLoader("new_batch.pickle")
266
267
268
269
270
    data_module = OpenFoldDataModule(
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
271

272
273
    data_module.prepare_data()
    data_module.setup()
274
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
275
    callbacks = []
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
276
    if(args.checkpoint_every_epoch):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
277
        mc = ModelCheckpoint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
278
            every_n_epochs=1,
279
280
            auto_insert_metric_name=False,
            save_top_k=-1,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
281
282
283
284
285
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
286
            monitor="val/lddt_ca",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
287
288
289
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
290
            mode="max",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
291
292
293
294
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
295

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
296
    if(args.log_performance):
Marta's avatar
Marta committed
297
298
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
299
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
300
301
302
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
303

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
304
305
306
307
    if(args.log_lr):
        lr_monitor = LearningRateMonitor(logging_interval="step")
        callbacks.append(lr_monitor)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
308
309
310
311
312
313
314
315
316
317
318
    loggers = []
    if(args.wandb):
        wdb_logger = WandbLogger(
            name=args.experiment_name,
            save_dir=args.output_dir,
            id=args.wandb_id,
            project=args.wandb_project,
            **{"entity": args.wandb_entity}
        )
        loggers.append(wdb_logger)

319
    if(args.deepspeed_config_path is not None):
320
321
322
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
323
324
        if(args.wandb):
            wdb_logger.experiment.save(args.deepspeed_config_path)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
325
            wdb_logger.experiment.save("openfold/config.py")
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
326
    elif (args.gpus is not None and args.gpus > 1) or args.num_nodes > 1:
327
        strategy = DDPPlugin(find_unused_parameters=False)
328
329
    else:
        strategy = None
330
331
332
333
334
335
 
    if(args.wandb):
        freeze_path = f"{wdb_logger.experiment.dir}/package_versions.txt"
        os.system(f"{sys.executable} -m pip freeze > {freeze_path}")
        wdb_logger.experiment.save(f"{freeze_path}")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
336
337
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
338
        default_root_dir=args.output_dir,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
339
        strategy=strategy,
Marta's avatar
Marta committed
340
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
341
        logger=loggers,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
342
343
344
345
346
347
348
349
350
351
352
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
353
354
355
    )


Marta's avatar
Marta committed
356
357
358
359
360
361
362
363
364
365
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
366
367
368
369
370
371
372
373
374
375
376
377
378
379
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
380
381
382
383
384
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
385
386
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
387
388
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
389
    )
390
391
392
393
394
395
396
397
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
398
399
400
401
402
403
404
405
406
407
408
409
410
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
411
412
413
414
        "--train_filter_path", type=str, default=None,
        help='''Optional path to a text file containing names of training
                examples to include, one per line. Used to filter the training 
                set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
415
416
    )
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
417
418
        "--distillation_filter_path", type=str, default=None,
        help="""See --train_filter_path"""
419
    )
420
421
422
423
424
    parser.add_argument(
        "--obsolete_pdbs_file_path", type=str, default=None,
        help="""Path to obsolete.dat file containing list of obsolete PDBs and 
             their replacements."""
    )
425
426
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
427
428
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
429
430
    )
    parser.add_argument(
Marta's avatar
Marta committed
431
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
432
433
434
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
435
436
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
437
    )
438
439
440
441
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
442
    parser.add_argument(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
443
444
        "--checkpoint_every_epoch", action="store_true", default=False,
        help="""Whether to checkpoint at the end of every training epoch"""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
445
446
    )
    parser.add_argument(
Marta's avatar
Marta committed
447
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
448
449
450
451
452
453
454
455
456
457
458
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
459
460
461
462
463
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
464
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
465
466
        help="Whether to load just model weights as opposed to training state"
    )
Marta's avatar
Marta committed
467
    parser.add_argument(
468
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
469
470
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
471
472
    parser.add_argument(
        "--wandb", action="store_true", default=False,
473
        help="Whether to log metrics to Weights & Biases"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
474
475
476
    )
    parser.add_argument(
        "--experiment_name", type=str, default=None,
477
        help="Name of the current experiment. Used for wandb logging"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
478
479
480
    )
    parser.add_argument(
        "--wandb_id", type=str, default=None,
481
        help="ID of a previous run to be resumed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
482
483
484
    )
    parser.add_argument(
        "--wandb_project", type=str, default=None,
485
        help="Name of the wandb project to which this run will belong"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
486
487
488
    )
    parser.add_argument(
        "--wandb_entity", type=str, default=None,
489
        help="wandb username or team name to which runs are attributed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
490
    )
491
492
493
494
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
495
    parser.add_argument(
496
        "--train_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
497
498
    )
    parser.add_argument(
499
        "--distillation_chain_data_cache_path", type=str, default=None,
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
500
501
502
    )
    parser.add_argument(
        "--train_epoch_len", type=int, default=10000,
503
504
505
506
507
508
        help=(
            "The virtual length of each training epoch. Stochastic filtering "
            "of training data means that training datasets have no "
            "well-defined length. This virtual length affects frequency of "
            "validation & checkpointing (by default, one of each per epoch)."
        )
509
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
510
    parser.add_argument(
511
512
        "--log_lr", action="store_true", default=False,
        help="Whether to log the actual learning rate"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
513
    )
514
    parser.add_argument(
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
        "--config_preset", type=str, default="initial_training",
        help=(
            'Config setting. Choose e.g. "initial_training", "finetuning", '
            '"model_1", etc. By default, the actual values in the config are '
            'used.'
        )
    )
    parser.add_argument(
        "--_distillation_structure_index_path", type=str, default=None,
    )
    parser.add_argument(
        "--alignment_index_path", type=str, default=None,
        help="Training alignment index. See the README for instructions."
    )
    parser.add_argument(
        "--distillation_alignment_index_path", type=str, default=None,
        help="Distillation alignment index. See the README for instructions."
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
532
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
533
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
534
535
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
536
537
538
539
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
540
    # Remove some buggy/redundant arguments introduced by the Trainer
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
541
542
543
544
545
    remove_arguments(
        parser, 
        [
            "--accelerator", 
            "--resume_from_checkpoint",
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
546
547
            "--reload_dataloaders_every_epoch",
            "--reload_dataloaders_every_n_epochs",
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
548
549
        ]
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
550

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
551
552
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
553
554
555
556
557
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
558
    # This re-applies the training-time filters at the beginning of every epoch
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
559
    args.reload_dataloaders_every_n_epochs = 1
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
560

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
561
    main(args)