"examples/python_rs/vscode:/vscode.git/clone" did not exist on "fe83f8aa3c96e238ef97275d5fec94b216d26743"
Commit 0c3435cc authored by Jennifer's avatar Jennifer
Browse files

add metric logging to progress bar.

parent cfd2e719
...@@ -17,7 +17,6 @@ from openfold.data.data_modules import OpenFoldDataModule, OpenFoldMultimerDataM ...@@ -17,7 +17,6 @@ from openfold.data.data_modules import OpenFoldDataModule, OpenFoldMultimerDataM
from openfold.model.model import AlphaFold from openfold.model.model import AlphaFold
from openfold.model.torchscript import script_preset_ from openfold.model.torchscript import script_preset_
from openfold.np import residue_constants from openfold.np import residue_constants
from openfold.utils.argparse_utils import remove_arguments
from openfold.utils.callbacks import ( from openfold.utils.callbacks import (
EarlyStoppingVerbose, EarlyStoppingVerbose,
) )
...@@ -70,13 +69,14 @@ class OpenFoldWrapper(pl.LightningModule): ...@@ -70,13 +69,14 @@ class OpenFoldWrapper(pl.LightningModule):
self.log( self.log(
f"{phase}/{loss_name}", f"{phase}/{loss_name}",
indiv_loss, indiv_loss,
prog_bar=(loss_name == 'loss'),
on_step=train, on_epoch=(not train), logger=True, on_step=train, on_epoch=(not train), logger=True,
) )
if (train): if (train):
self.log( self.log(
f"{phase}/{loss_name}_epoch", f"{phase}/{loss_name}_epoch",
indiv_loss, indiv_loss,
on_step=False, on_epoch=True, logger=True, on_step=False, on_epoch=True, logger=True,
) )
...@@ -91,7 +91,8 @@ class OpenFoldWrapper(pl.LightningModule): ...@@ -91,7 +91,8 @@ class OpenFoldWrapper(pl.LightningModule):
self.log( self.log(
f"{phase}/{k}", f"{phase}/{k}",
torch.mean(v), torch.mean(v),
on_step=False, on_epoch=True, logger=True prog_bar = (k == 'loss'),
on_step=False, on_epoch=True, logger=True,
) )
def training_step(self, batch, batch_idx): def training_step(self, batch, batch_idx):
...@@ -629,12 +630,17 @@ if __name__ == "__main__": ...@@ -629,12 +630,17 @@ if __name__ == "__main__":
parser.add_argument( parser.add_argument(
"--experiment_config_json", default="", help="Path to a json file with custom config values to overwrite config setting", "--experiment_config_json", default="", help="Path to a json file with custom config values to overwrite config setting",
) )
# Trainer additional arguments
# Ideally we'd want something like config.add_trainer_args()
parser.add_argument( parser.add_argument(
"--num_nodes", type=int, default=1, "--num_nodes", type=int, default=1,
) )
parser.add_argument( parser.add_argument(
"--gpus", type=int, default=1, "--gpus", type=int, default=1,
) )
parser.add_argument(
"--num_workers", type=int, default=4, # interaction with num_data_workers?
)
parser.add_argument( parser.add_argument(
"--precision", type=str, default=None, "--precision", type=str, default=None,
) )
...@@ -647,6 +653,9 @@ if __name__ == "__main__": ...@@ -647,6 +653,9 @@ if __name__ == "__main__":
parser.add_argument( parser.add_argument(
"--log_every_n_steps", type=int, default=25, "--log_every_n_steps", type=int, default=25,
) )
parser.add_argument(
"--flush_logs_every_n_steps", type=int, default=5,
)
parser.add_argument( parser.add_argument(
"--num_sanity_val_steps", type=int, default=0, "--num_sanity_val_steps", type=int, default=0,
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment