Commit b1ff0b2a authored by Andrey Kulagin's avatar Andrey Kulagin Committed by Julien Chaumond
Browse files

Fix bug in examples: double wrap into DataParallel during eval

parent 7f23af16
...@@ -255,7 +255,7 @@ def evaluate(args, model, tokenizer, prefix=""): ...@@ -255,7 +255,7 @@ def evaluate(args, model, tokenizer, prefix=""):
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval # multi-gpu eval
if args.n_gpu > 1: if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model) model = torch.nn.DataParallel(model)
# Eval! # Eval!
......
...@@ -278,7 +278,7 @@ def evaluate(args, model, tokenizer, criterion, prefix=""): ...@@ -278,7 +278,7 @@ def evaluate(args, model, tokenizer, criterion, prefix=""):
) )
# multi-gpu eval # multi-gpu eval
if args.n_gpu > 1: if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model) model = torch.nn.DataParallel(model)
# Eval! # Eval!
......
...@@ -253,7 +253,7 @@ def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix="" ...@@ -253,7 +253,7 @@ def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate # multi-gpu evaluate
if args.n_gpu > 1: if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model) model = torch.nn.DataParallel(model)
# Eval! # Eval!
......
...@@ -427,7 +427,7 @@ def evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefi ...@@ -427,7 +427,7 @@ def evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefi
) )
# multi-gpu evaluate # multi-gpu evaluate
if args.n_gpu > 1: if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model) model = torch.nn.DataParallel(model)
# Eval! # Eval!
......
...@@ -256,7 +256,7 @@ def evaluate(args, model, tokenizer, prefix="", test=False): ...@@ -256,7 +256,7 @@ def evaluate(args, model, tokenizer, prefix="", test=False):
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate # multi-gpu evaluate
if args.n_gpu > 1: if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model) model = torch.nn.DataParallel(model)
# Eval! # Eval!
......
...@@ -266,7 +266,7 @@ def evaluate(args, model, tokenizer, prefix=""): ...@@ -266,7 +266,7 @@ def evaluate(args, model, tokenizer, prefix=""):
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval # multi-gpu eval
if args.n_gpu > 1: if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model) model = torch.nn.DataParallel(model)
# Eval! # Eval!
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment