train_openfold.py 10.8 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
4
import argparse
import logging
import os

5
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
6
7
8
9
#os.environ["MASTER_ADDR"]="10.119.81.14"
#os.environ["MASTER_PORT"]="42069"
#os.environ["NODE_RANK"]="0"

10
import random
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
12
import time

13
import numpy as np
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
14
import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
15
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
16
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin, DDPPlugin
17
from pytorch_lightning.plugins.environments import SLURMEnvironment
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
18
19
20
import torch

from openfold.config import model_config
21
22
from openfold.data.data_modules import (
    OpenFoldDataModule,
23
    DummyDataLoader,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
24
)
25
from openfold.model.model import AlphaFold
26
from openfold.model.torchscript import script_preset_
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
27
28
29
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
30
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
31
from openfold.utils.argparse import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
32
from openfold.utils.loss import AlphaFoldLoss
33
from openfold.utils.seed import seed_everything
34
from openfold.utils.tensor_utils import tensor_tree_map
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
35
36
37
from scripts.zero_to_fp32 import (
    get_fp32_state_dict_from_zero_checkpoint
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
38

Marta's avatar
Marta committed
39
40
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
41
42
43
44
45

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
46
        self.model = AlphaFold(config)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
47
        self.loss = AlphaFoldLoss(config.loss)
48
49
50
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
51
52
        
        self.cached_weights = None
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
53
54
55
56
57

    def forward(self, batch):
        return self.model(batch)

    def training_step(self, batch, batch_idx):
58
59
60
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
61
62
63
64
65
66
67
68
        # Run the model
        outputs = self(batch)
        
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

        # Compute loss
        loss = self.loss(outputs, batch)
69
        self.log("loss", loss)
70
        return {"loss": loss}
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
71

72
73
74
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
75
            self.cached_weights = self.model.state_dict()
76
77
78
79
80
81
            self.model.load_state_dict(self.ema.state_dict()["params"])
        
        # Calculate validation loss
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
        loss = self.loss(outputs, batch)
82
        self.log("val_loss", loss, prog_bar=True)
83
        return {"val_loss": loss}
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
84

85
86
87
88
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
89

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
90
91
92
93
94
95
96
97
98
99
100
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
        eps: float = 1e-8
    ) -> torch.optim.Adam:
        # Ignored as long as a DeepSpeed optimizer is configured
        return torch.optim.Adam(
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )

101
102
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
103

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
104
105
106
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

107

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
108
def main(args):
109
110
111
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
112
113
114
115
    config = model_config(
        "model_1", 
        train=True, 
        low_prec=(args.precision == 16)
116
    ) 
Gustaf's avatar
Gustaf committed
117
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
118
119
120
121
122
123
    model_module = OpenFoldWrapper(config)
    if(args.resume_from_ckpt and args.resume_model_weights_only):
        sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        sd = {k[len("module."):]:v for k,v in sd.items()}
        model_module.load_state_dict(sd)
        logging.info("Successfully loaded model weights...")
124
125

    # TorchScript components of the model
126
127
    if(args.script_modules):
        script_preset_(model_module)
128

129
130
131
132
133
134
    #data_module = DummyDataLoader("batch.pickle")
    data_module = OpenFoldDataModule(
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
135

136
137
    data_module.prepare_data()
    data_module.setup()
138
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
    callbacks = []
    if(args.checkpoint_best_val):
        checkpoint_dir = os.path.join(args.output_dir, "checkpoints")
        mc = ModelCheckpoint(
            dirpath=checkpoint_dir,
            filename="openfold_{epoch}_{step}_{val_loss:.2f}",
            monitor="val_loss",
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
            monitor="val_loss",
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
            mode="min",
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
160
161
        
    if(args.log_performance):
Marta's avatar
Marta committed
162
163
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
164
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
165
166
167
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
168

169
    if(args.deepspeed_config_path is not None):
170
171
172
173
174
175
176
177
        if "SLURM_JOB_ID" in os.environ:
            cluster_environment = SLURMEnvironment()
        else:
            cluster_environment = None
        strategy = DeepSpeedPlugin(
            config=args.deepspeed_config_path,
            cluster_environment=cluster_environment,
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
178
    elif (args.gpus is not None and args.gpus) > 1 or args.num_nodes > 1:
179
        strategy = DDPPlugin(find_unused_parameters=False)
180
181
    else:
        strategy = None
182
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
183
184
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
185
        strategy=strategy,
Marta's avatar
Marta committed
186
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
187
188
189
190
191
192
193
194
195
196
197
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
198
199
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
200
201
202
    trainer.save_checkpoint(
        os.path.join(trainer.logger.log_dir, "checkpoints", "final.ckpt")
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
203
204


Marta's avatar
Marta committed
205
206
207
208
209
210
211
212
213
214
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
215
216
217
218
219
220
221
222
223
224
225
226
227
228
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
229
230
231
232
233
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
234
235
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
236
237
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
238
    )
239
240
241
242
243
244
245
246
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
247
248
249
250
251
252
253
254
255
256
257
258
259
260
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
        "--train_mapping_path", type=str, default=None,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
261
        help='''Optional path to a .json file containing a mapping from
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
262
                consecutive numerical indices to sample names. Used to filter
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
263
                the training set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
264
265
    )
    parser.add_argument(
266
267
268
269
270
        "--distillation_mapping_path", type=str, default=None,
        help="""See --train_mapping_path"""
    )
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
271
272
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
273
274
    )
    parser.add_argument(
Marta's avatar
Marta committed
275
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
276
277
278
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
279
280
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
281
    )
282
283
284
285
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
286
    parser.add_argument(
Marta's avatar
Marta committed
287
        "--checkpoint_best_val", type=bool_type, default=True,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
288
289
290
291
        help="""Whether to save the model parameters that perform best during
                validation"""
    )
    parser.add_argument(
Marta's avatar
Marta committed
292
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
293
294
295
296
297
298
299
300
301
302
303
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
304
305
306
307
308
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
309
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
310
311
        help="Whether to load just model weights as opposed to training state"
    )
Marta's avatar
Marta committed
312
    parser.add_argument(
313
        "--log_performance", type=bool_type, default=False,
Marta's avatar
Marta committed
314
315
        help="Measure performance"
    )
316
317
318
319
    parser.add_argument(
        "--script_modules", type=bool_type, default=False,
        help="Whether to TorchScript eligible components of them model"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
320
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
321
322
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
323
324
325
326
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
327
328
329
    # Remove some buggy/redundant arguments introduced by the Trainer
    remove_arguments(parser, ["--accelerator", "--resume_from_checkpoint"]) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
330
331
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
332
333
334
335
336
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
337
    main(args)