Unverified Commit 692c5be7 authored by Partho's avatar Partho Committed by GitHub
Browse files

wrap forward passes with torch.no_grad() (#19439)

parent a7bc4221
...@@ -568,6 +568,7 @@ class VisualBertModelIntegrationTest(unittest.TestCase): ...@@ -568,6 +568,7 @@ class VisualBertModelIntegrationTest(unittest.TestCase):
attention_mask = torch.tensor([1] * 6).reshape(1, -1) attention_mask = torch.tensor([1] * 6).reshape(1, -1)
visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1)
with torch.no_grad():
output = model( output = model(
input_ids=input_ids, input_ids=input_ids,
attention_mask=attention_mask, attention_mask=attention_mask,
...@@ -606,6 +607,7 @@ class VisualBertModelIntegrationTest(unittest.TestCase): ...@@ -606,6 +607,7 @@ class VisualBertModelIntegrationTest(unittest.TestCase):
attention_mask = torch.tensor([1] * 6).reshape(1, -1) attention_mask = torch.tensor([1] * 6).reshape(1, -1)
visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1)
with torch.no_grad():
output = model( output = model(
input_ids=input_ids, input_ids=input_ids,
attention_mask=attention_mask, attention_mask=attention_mask,
...@@ -637,6 +639,7 @@ class VisualBertModelIntegrationTest(unittest.TestCase): ...@@ -637,6 +639,7 @@ class VisualBertModelIntegrationTest(unittest.TestCase):
attention_mask = torch.tensor([1] * 6).reshape(1, -1) attention_mask = torch.tensor([1] * 6).reshape(1, -1)
visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1)
with torch.no_grad():
output = model( output = model(
input_ids=input_ids, input_ids=input_ids,
attention_mask=attention_mask, attention_mask=attention_mask,
...@@ -667,6 +670,7 @@ class VisualBertModelIntegrationTest(unittest.TestCase): ...@@ -667,6 +670,7 @@ class VisualBertModelIntegrationTest(unittest.TestCase):
visual_token_type_ids = torch.ones(size=(1, 4, 10), dtype=torch.long) visual_token_type_ids = torch.ones(size=(1, 4, 10), dtype=torch.long)
visual_attention_mask = torch.ones_like(visual_token_type_ids) visual_attention_mask = torch.ones_like(visual_token_type_ids)
with torch.no_grad():
output = model( output = model(
input_ids=input_ids, input_ids=input_ids,
attention_mask=attention_mask, attention_mask=attention_mask,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment