Commit 2de2ecbc authored by Gao, Xiang's avatar Gao, Xiang Committed by Farhad Ramezanghorbani
Browse files

torch.autograd.profiler.emit_nvtx to show operators (#410)

parent eba1a18c
...@@ -30,3 +30,5 @@ dist ...@@ -30,3 +30,5 @@ dist
*.swo *.swo
/download /download
/download.tar.xz /download.tar.xz
*.qdrep
*.qdstrm
...@@ -147,7 +147,9 @@ if __name__ == "__main__": ...@@ -147,7 +147,9 @@ if __name__ == "__main__":
enable_timers(model) enable_timers(model)
torch.cuda.cudart().cudaProfilerStart() torch.cuda.cudart().cudaProfilerStart()
if total_batch_counter >= WARM_UP_BATCHES: PROFILING_STARTED = (total_batch_counter >= WARM_UP_BATCHES)
if PROFILING_STARTED:
torch.cuda.nvtx.range_push("batch{}".format(total_batch_counter)) torch.cuda.nvtx.range_push("batch{}".format(total_batch_counter))
true_energies = batch_y['energies'].to(parser.device) true_energies = batch_y['energies'].to(parser.device)
...@@ -155,14 +157,15 @@ if __name__ == "__main__": ...@@ -155,14 +157,15 @@ if __name__ == "__main__":
num_atoms = [] num_atoms = []
for j, (chunk_species, chunk_coordinates) in enumerate(batch_x): for j, (chunk_species, chunk_coordinates) in enumerate(batch_x):
if total_batch_counter >= WARM_UP_BATCHES: if PROFILING_STARTED:
torch.cuda.nvtx.range_push("chunk{}".format(j)) torch.cuda.nvtx.range_push("chunk{}".format(j))
chunk_species = chunk_species.to(parser.device) chunk_species = chunk_species.to(parser.device)
chunk_coordinates = chunk_coordinates.to(parser.device) chunk_coordinates = chunk_coordinates.to(parser.device)
num_atoms.append((chunk_species >= 0).to(true_energies.dtype).sum(dim=1)) num_atoms.append((chunk_species >= 0).to(true_energies.dtype).sum(dim=1))
_, chunk_energies = model((chunk_species, chunk_coordinates)) with torch.autograd.profiler.emit_nvtx(enabled=PROFILING_STARTED, record_shapes=True):
_, chunk_energies = model((chunk_species, chunk_coordinates))
predicted_energies.append(chunk_energies) predicted_energies.append(chunk_energies)
if total_batch_counter >= WARM_UP_BATCHES: if PROFILING_STARTED:
torch.cuda.nvtx.range_pop() torch.cuda.nvtx.range_pop()
num_atoms = torch.cat(num_atoms) num_atoms = torch.cat(num_atoms)
...@@ -170,21 +173,23 @@ if __name__ == "__main__": ...@@ -170,21 +173,23 @@ if __name__ == "__main__":
loss = (mse(predicted_energies, true_energies) / num_atoms.sqrt()).mean() loss = (mse(predicted_energies, true_energies) / num_atoms.sqrt()).mean()
rmse = hartree2kcal((mse(predicted_energies, true_energies)).mean()).detach().cpu().numpy() rmse = hartree2kcal((mse(predicted_energies, true_energies)).mean()).detach().cpu().numpy()
if total_batch_counter >= WARM_UP_BATCHES: if PROFILING_STARTED:
torch.cuda.nvtx.range_push("backward") torch.cuda.nvtx.range_push("backward")
loss.backward() with torch.autograd.profiler.emit_nvtx(enabled=PROFILING_STARTED, record_shapes=True):
if total_batch_counter >= WARM_UP_BATCHES: loss.backward()
if PROFILING_STARTED:
torch.cuda.nvtx.range_pop() torch.cuda.nvtx.range_pop()
if total_batch_counter >= WARM_UP_BATCHES: if PROFILING_STARTED:
torch.cuda.nvtx.range_push("optimizer.step()") torch.cuda.nvtx.range_push("optimizer.step()")
optimizer.step() with torch.autograd.profiler.emit_nvtx(enabled=PROFILING_STARTED, record_shapes=True):
if total_batch_counter >= WARM_UP_BATCHES: optimizer.step()
if PROFILING_STARTED:
torch.cuda.nvtx.range_pop() torch.cuda.nvtx.range_pop()
progbar.update(i, values=[("rmse", rmse)]) progbar.update(i, values=[("rmse", rmse)])
if total_batch_counter >= WARM_UP_BATCHES: if PROFILING_STARTED:
torch.cuda.nvtx.range_pop() torch.cuda.nvtx.range_pop()
total_batch_counter += 1 total_batch_counter += 1
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment