train_openfold.py 10.1 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
4
import argparse
import logging
import os

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
5
#os.environ["CUDA_VISIBLE_DEVICES"] = "7"
6
7
8
9
#os.environ["MASTER_ADDR"]="10.119.81.14"
#os.environ["MASTER_PORT"]="42069"
#os.environ["NODE_RANK"]="0"

10
import random
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
11
12
import time

13
import numpy as np
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
14
import pytorch_lightning as pl
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
15
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
16
17
18
19
from pytorch_lightning.plugins.training_type import DeepSpeedPlugin
import torch

from openfold.config import model_config
20
21
from openfold.data.data_modules import (
    OpenFoldDataModule,
22
    DummyDataLoader,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
23
)
24
from openfold.model.model import AlphaFold
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
25
26
27
from openfold.utils.callbacks import (
    EarlyStoppingVerbose,
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
28
from openfold.utils.exponential_moving_average import ExponentialMovingAverage
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
29
from openfold.utils.argparse import remove_arguments
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
30
from openfold.utils.loss import AlphaFoldLoss
31
from openfold.utils.seed import seed_everything
32
from openfold.utils.tensor_utils import tensor_tree_map
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
33
34
35
from scripts.zero_to_fp32 import (
    get_fp32_state_dict_from_zero_checkpoint
)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
36

Marta's avatar
Marta committed
37
38
from openfold.utils.logger import PerformanceLoggingCallback

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
39
40
41
42
43

class OpenFoldWrapper(pl.LightningModule):
    def __init__(self, config):
        super(OpenFoldWrapper, self).__init__()
        self.config = config
44
        self.model = AlphaFold(config)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
45
        self.loss = AlphaFoldLoss(config.loss)
46
47
48
        self.ema = ExponentialMovingAverage(
            model=self.model, decay=config.ema.decay
        )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
49
50
51
52
53

    def forward(self, batch):
        return self.model(batch)

    def training_step(self, batch, batch_idx):
54
55
56
        if(self.ema.device != batch["aatype"].device):
            self.ema.to(batch["aatype"].device)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
57
58
59
60
61
62
63
64
65
        # Run the model
        outputs = self(batch)
        
        # Remove the recycling dimension
        batch = tensor_tree_map(lambda t: t[..., -1], batch)

        # Compute loss
        loss = self.loss(outputs, batch)

66
67
68
        #if(torch.isnan(loss) or torch.isinf(loss)):
        #    logging.warning("loss is NaN. Skipping example...")
        #    loss = loss.new_tensor(0., requires_grad=True)
69

70
        return {"loss": loss}
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
71

72
73
74
75
76
77
78
79
80
81
82
    def validation_step(self, batch, batch_idx):
        # At the start of validation, load the EMA weights
        if(self.cached_weights is None):
            self.cached_weights = model.state_dict()
            self.model.load_state_dict(self.ema.state_dict()["params"])
        
        # Calculate validation loss
        outputs = self(batch)
        batch = tensor_tree_map(lambda t: t[..., -1], batch)
        loss = self.loss(outputs, batch)
        return {"val_loss": loss}
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
83

84
85
86
87
    def validation_epoch_end(self, _):
        # Restore the model weights to normal
        self.model.load_state_dict(self.cached_weights)
        self.cached_weights = None
88

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
89
90
91
92
93
94
95
96
97
98
99
    def configure_optimizers(self, 
        learning_rate: float = 1e-3,
        eps: float = 1e-8
    ) -> torch.optim.Adam:
        # Ignored as long as a DeepSpeed optimizer is configured
        return torch.optim.Adam(
            self.model.parameters(), 
            lr=learning_rate, 
            eps=eps
        )

100
101
    def on_before_zero_grad(self, *args, **kwargs):
        self.ema.update(self.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
102

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
103
104
105
    def on_save_checkpoint(self, checkpoint):
        checkpoint["ema"] = self.ema.state_dict()

106

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
107
def main(args):
108
109
110
    if(args.seed is not None):
        seed_everything(args.seed) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
111
112
113
114
    config = model_config(
        "model_1", 
        train=True, 
        low_prec=(args.precision == 16)
115
    ) 
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
116
117
118
119
120
121
    model_module = OpenFoldWrapper(config)
    if(args.resume_from_ckpt and args.resume_model_weights_only):
        sd = get_fp32_state_dict_from_zero_checkpoint(args.resume_from_ckpt)
        sd = {k[len("module."):]:v for k,v in sd.items()}
        model_module.load_state_dict(sd)
        logging.info("Successfully loaded model weights...")
122
123
124
125
126
127
    #data_module = DummyDataLoader("batch.pickle")
    data_module = OpenFoldDataModule(
        config=config.data, 
        batch_seed=args.seed,
        **vars(args)
    )
128
129
    data_module.prepare_data()
    data_module.setup()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
130

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
    callbacks = []
    if(args.checkpoint_best_val):
        checkpoint_dir = os.path.join(args.output_dir, "checkpoints")
        mc = ModelCheckpoint(
            dirpath=checkpoint_dir,
            filename="openfold_{epoch}_{step}_{val_loss:.2f}",
            monitor="val_loss",
        )
        callbacks.append(mc)

    if(args.early_stopping):
        es = EarlyStoppingVerbose(
            monitor="val_loss",
            min_delta=args.min_delta,
            patience=args.patience,
            verbose=False,
            mode="min",
            check_finite=True,
            strict=True,
        )
        callbacks.append(es)
Marta's avatar
Marta committed
152
153
154
    if args.log_performance:
        global_batch_size = args.num_nodes * args.gpus
        perf = PerformanceLoggingCallback(
Marta's avatar
Marta committed
155
            log_file=os.path.join(args.output_dir, "performance_log.json"),
Marta's avatar
Marta committed
156
157
158
            global_batch_size=global_batch_size,
        )
        callbacks.append(perf)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
159

160
    if(args.deepspeed_config_path is not None):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
161
        strategy = DeepSpeedPlugin(config=args.deepspeed_config_path)
162
    elif args.gpus > 1 or args.num_nodes > 1:
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
163
        strategy = "ddp"
164
165
    else:
        strategy = None
166
    
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
167
168
    trainer = pl.Trainer.from_argparse_args(
        args,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
169
        strategy=strategy,
Marta's avatar
Marta committed
170
        callbacks=callbacks,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
171
172
173
174
175
176
177
178
179
180
181
    )

    if(args.resume_model_weights_only):
        ckpt_path = None
    else:
        ckpt_path = args.resume_from_ckpt

    trainer.fit(
        model_module, 
        datamodule=data_module,
        ckpt_path=ckpt_path,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
182
183
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
184
185
186
    trainer.save_checkpoint(
        os.path.join(trainer.logger.log_dir, "checkpoints", "final.ckpt")
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
187
188


Marta's avatar
Marta committed
189
190
191
192
193
194
195
196
197
198
def bool_type(bool_str: str):
    bool_str_lower = bool_str.lower()
    if bool_str_lower in ('false', 'f', 'no', 'n', '0'):
        return False
    elif bool_str_lower in ('true', 't', 'yes', 'y', '1'):
        return True
    else:
        raise ValueError(f'Cannot interpret {bool_str} as bool')


Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
199
200
201
202
203
204
205
206
207
208
209
210
211
212
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "train_data_dir", type=str,
        help="Directory containing training mmCIF files"
    )
    parser.add_argument(
        "train_alignment_dir", type=str,
        help="Directory containing precomputed training alignments"
    )
    parser.add_argument(
        "template_mmcif_dir", type=str,
        help="Directory containing mmCIF files to search for templates"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
213
214
215
216
217
    parser.add_argument(
        "output_dir", type=str,
        help='''Directory in which to output checkpoints, logs, etc. Ignored
                if not on rank 0'''
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
218
219
    parser.add_argument(
        "max_template_date", type=str,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
220
221
        help='''Cutoff for all templates. In training mode, templates are also 
                filtered by the release date of the target'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
222
    )
223
224
225
226
227
228
229
230
    parser.add_argument(
        "--distillation_data_dir", type=str, default=None,
        help="Directory containing training PDB files"
    )
    parser.add_argument(
        "--distillation_alignment_dir", type=str, default=None,
        help="Directory containing precomputed distillation alignments"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
    parser.add_argument(
        "--val_data_dir", type=str, default=None,
        help="Directory containing validation mmCIF files"
    )
    parser.add_argument(
        "--val_alignment_dir", type=str, default=None,
        help="Directory containing precomputed validation alignments"
    )
    parser.add_argument(
        "--kalign_binary_path", type=str, default='/usr/bin/kalign',
        help="Path to the kalign binary"
    )
    parser.add_argument(
        "--train_mapping_path", type=str, default=None,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
245
        help='''Optional path to a .json file containing a mapping from
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
246
                consecutive numerical indices to sample names. Used to filter
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
247
                the training set'''
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
248
249
    )
    parser.add_argument(
250
251
252
253
254
        "--distillation_mapping_path", type=str, default=None,
        help="""See --train_mapping_path"""
    )
    parser.add_argument(
        "--template_release_dates_cache_path", type=str, default=None,
255
256
        help="""Output of scripts/generate_mmcif_cache.py run on template mmCIF
                files."""
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
257
258
    )
    parser.add_argument(
Marta's avatar
Marta committed
259
        "--use_small_bfd", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
260
261
262
        help="Whether to use a reduced version of the BFD database"
    )
    parser.add_argument(
263
264
        "--seed", type=int, default=None,
        help="Random seed"
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
265
    )
266
267
268
269
    parser.add_argument(
        "--deepspeed_config_path", type=str, default=None,
        help="Path to DeepSpeed config. If not provided, DeepSpeed is disabled"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
270
    parser.add_argument(
Marta's avatar
Marta committed
271
        "--checkpoint_best_val", type=bool_type, default=True,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
272
273
274
275
        help="""Whether to save the model parameters that perform best during
                validation"""
    )
    parser.add_argument(
Marta's avatar
Marta committed
276
        "--early_stopping", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
277
278
279
280
281
282
283
284
285
286
287
        help="Whether to stop training when validation loss fails to decrease"
    )
    parser.add_argument(
        "--min_delta", type=float, default=0,
        help="""The smallest decrease in validation loss that counts as an 
                improvement for the purposes of early stopping"""
    )
    parser.add_argument(
        "--patience", type=int, default=3,
        help="Early stopping patience"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
288
289
290
291
292
    parser.add_argument(
        "--resume_from_ckpt", type=str, default=None,
        help="Path to a model checkpoint from which to restore training state"
    )
    parser.add_argument(
Marta's avatar
Marta committed
293
        "--resume_model_weights_only", type=bool_type, default=False,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
294
295
        help="Whether to load just model weights as opposed to training state"
    )
Marta's avatar
Marta committed
296
297
298
299
    parser.add_argument(
        "--log_performance", action='store_true',
        help="Measure performance"
    )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
300
    parser = pl.Trainer.add_argparse_args(parser)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
301
302
   
    # Disable the initial validation pass
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
303
304
305
306
    parser.set_defaults(
        num_sanity_val_steps=0,
    )

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
307
308
309
    # Remove some buggy/redundant arguments introduced by the Trainer
    remove_arguments(parser, ["--accelerator", "--resume_from_checkpoint"]) 

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
310
311
    args = parser.parse_args()

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
312
313
314
315
316
    if(args.seed is None and 
        ((args.gpus is not None and args.gpus > 1) or 
         (args.num_nodes is not None and args.num_nodes > 1))):
        raise ValueError("For distributed training, --seed must be specified")

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
317
    main(args)