Unverified Commit 4c5417f7 authored by Yifan Xiong's avatar Yifan Xiong Committed by GitHub
Browse files

Add num_workers argument in model benchmark (#511)

Change num_workers to configurable in model benchmark data loader.
parent 10380709
......@@ -78,6 +78,13 @@ def add_parser_arguments(self):
required=False,
help='The number of batch size.',
)
self._parser.add_argument(
'--num_workers',
type=int,
default=8,
required=False,
help='Number of subprocesses to use for data loading.',
)
self._parser.add_argument(
'--precision',
type=Precision,
......
......@@ -181,7 +181,7 @@ def _init_dataloader(self):
dataset=self._dataset,
batch_size=self._args.batch_size,
shuffle=False,
num_workers=8,
num_workers=self._args.num_workers,
sampler=train_sampler,
drop_last=True,
pin_memory=self._args.pin_memory
......
......@@ -167,6 +167,7 @@ def test_arguments_related_interfaces():
--no_gpu Disable GPU training.
--num_steps int The number of test step.
--num_warmup int The number of warmup step.
--num_workers int Number of subprocesses to use for data loading.
--pin_memory Enable option to pin memory in data loader.
--precision Precision [Precision ...]
Model precision. E.g. fp8_hybrid fp8_e4m3 fp8_e5m2
......@@ -206,6 +207,7 @@ def test_preprocess():
--no_gpu Disable GPU training.
--num_steps int The number of test step.
--num_warmup int The number of warmup step.
--num_workers int Number of subprocesses to use for data loading.
--pin_memory Enable option to pin memory in data loader.
--precision Precision [Precision ...]
Model precision. E.g. fp8_hybrid fp8_e4m3 fp8_e5m2
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment