Unverified Commit 923ce277 authored by guoshzhao's avatar guoshzhao Committed by GitHub
Browse files

Benchmarks: Code Revision - Revise BenchmarkRegistry interfaces for...


Benchmarks: Code Revision - Revise BenchmarkRegistry interfaces for integration with executor. (#33)

* revise BenchmarkRegistry interfaces.
* address comments
Co-authored-by: default avatarGuoshuai Zhao <guzhao@microsoft.com>
parent 2871a68b
...@@ -3,25 +3,21 @@ ...@@ -3,25 +3,21 @@
"""Model benchmark example for bert-large.""" """Model benchmark example for bert-large."""
from superbench.benchmarks import Platform, Framework, BenchmarkRegistry, BenchmarkContext from superbench.benchmarks import Framework, BenchmarkRegistry
from superbench.common.utils import logger from superbench.common.utils import logger
if __name__ == '__main__': if __name__ == '__main__':
# Create context for bert-large benchmark and run it for 120 * 2 seconds. # Create context for bert-large benchmark and run it for 120 * 2 seconds.
context = BenchmarkContext( context = BenchmarkRegistry.create_benchmark_context(
'bert-large', 'bert-large',
Platform.CUDA,
parameters='--batch_size=1 --duration=120 --seq_len=512 --precision=float32 --run_count=2', parameters='--batch_size=1 --duration=120 --seq_len=512 --precision=float32 --run_count=2',
framework=Framework.PYTORCH framework=Framework.PYTORCH
) )
if BenchmarkRegistry.check_parameters(context): benchmark = BenchmarkRegistry.launch_benchmark(context)
benchmark = BenchmarkRegistry.launch_benchmark(context) if benchmark:
if benchmark: logger.info(
logger.info( 'benchmark: {}, return code: {}, result: {}'.format(
'benchmark: {}, return code: {}, result: {}'.format( benchmark.name, benchmark.return_code, benchmark.result
benchmark.name, benchmark.return_code, benchmark.result
)
) )
else: )
logger.error('bert-large benchmark does not exist or context/parameters are invalid.')
...@@ -74,14 +74,14 @@ def parse_args(self): ...@@ -74,14 +74,14 @@ def parse_args(self):
logger.error('Invalid argument - benchmark: {}, message: {}.'.format(self._name, str(e))) logger.error('Invalid argument - benchmark: {}, message: {}.'.format(self._name, str(e)))
return False, None, None return False, None, None
ret = True
if len(unknown) > 0: if len(unknown) > 0:
logger.warning( logger.error(
'Benchmark has unknown arguments - benchmark: {}, unknown arguments: {}'.format( 'Unknown arguments - benchmark: {}, unknown arguments: {}'.format(self._name, ' '.join(unknown))
self._name, ' '.join(unknown)
)
) )
ret = False
return True, args, unknown return ret, args, unknown
def _preprocess(self): def _preprocess(self):
"""Preprocess/preparation operations before the benchmarking. """Preprocess/preparation operations before the benchmarking.
......
...@@ -105,32 +105,19 @@ def __get_benchmark_name(cls, benchmark_context): ...@@ -105,32 +105,19 @@ def __get_benchmark_name(cls, benchmark_context):
return benchmark_name return benchmark_name
@classmethod @classmethod
def check_parameters(cls, benchmark_context): def create_benchmark_context(cls, name, platform=Platform.CPU, parameters='', framework=Framework.NONE):
"""Check the validation of customized parameters. """Constructor.
Args: Args:
benchmark_context (BenchmarkContext): the benchmark context. name (str): name of benchmark in config file.
platform (Platform): Platform types like Platform.CPU, Platform.CUDA, Platform.ROCM.
parameters (str): predefined parameters of benchmark.
framework (Framework): Framework types like Framework.PYTORCH, Framework.ONNX.
Return: Return:
Return True if benchmark exists and context/parameters are valid. benchmark_context (BenchmarkContext): the benchmark context.
""" """
if not cls.is_benchmark_context_valid(benchmark_context): return BenchmarkContext(name, platform, parameters, framework)
return False
benchmark_name = cls.__get_benchmark_name(benchmark_context)
platform = benchmark_context.platform
customized_parameters = benchmark_context.parameters
if benchmark_name:
(benchmark_class, params) = cls.__select_benchmark(benchmark_name, platform)
if benchmark_class:
benchmark = benchmark_class(benchmark_name, customized_parameters)
benchmark.add_parser_arguments()
ret, args, unknown = benchmark.parse_args()
if ret and len(unknown) < 1:
return True
return False
@classmethod @classmethod
def get_benchmark_configurable_settings(cls, benchmark_context): def get_benchmark_configurable_settings(cls, benchmark_context):
......
...@@ -3,8 +3,7 @@ ...@@ -3,8 +3,7 @@
"""Tests for BenchmarkRegistry module.""" """Tests for BenchmarkRegistry module."""
from superbench.benchmarks import Platform, Framework, Precision, \ from superbench.benchmarks import Platform, Framework, Precision, BenchmarkRegistry, BenchmarkType, ReturnCode
BenchmarkContext, BenchmarkRegistry, BenchmarkType, ReturnCode
from superbench.benchmarks.model_benchmarks import ModelBenchmark from superbench.benchmarks.model_benchmarks import ModelBenchmark
...@@ -111,7 +110,9 @@ def create_benchmark(params='--num_steps=8'): ...@@ -111,7 +110,9 @@ def create_benchmark(params='--num_steps=8'):
parameters='--hidden_size=2', parameters='--hidden_size=2',
platform=Platform.CUDA, platform=Platform.CUDA,
) )
context = BenchmarkContext('fake-model', Platform.CUDA, parameters=params, framework=Framework.PYTORCH) context = BenchmarkRegistry.create_benchmark_context(
'fake-model', platform=Platform.CUDA, parameters=params, framework=Framework.PYTORCH
)
name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context) name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context)
assert (name) assert (name)
(benchmark_class, predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(name, context.platform) (benchmark_class, predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(name, context.platform)
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
import torch import torch
from superbench.common.utils import logger from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkRegistry, Precision, Platform, BenchmarkContext, ReturnCode from superbench.benchmarks import BenchmarkRegistry, Precision, ReturnCode
from superbench.benchmarks.model_benchmarks.model_base import Optimizer, DistributedImpl, DistributedBackend from superbench.benchmarks.model_benchmarks.model_base import Optimizer, DistributedImpl, DistributedBackend
from superbench.benchmarks.model_benchmarks.pytorch_base import PytorchBase from superbench.benchmarks.model_benchmarks.pytorch_base import PytorchBase
from superbench.benchmarks.model_benchmarks.random_dataset import TorchRandomDataset from superbench.benchmarks.model_benchmarks.random_dataset import TorchRandomDataset
...@@ -174,62 +174,58 @@ def test_pytorch_base(): ...@@ -174,62 +174,58 @@ def test_pytorch_base():
# Register BERT Base benchmark. # Register BERT Base benchmark.
BenchmarkRegistry.register_benchmark('pytorch-mnist', PytorchMNIST) BenchmarkRegistry.register_benchmark('pytorch-mnist', PytorchMNIST)
# Launch benchmark for testing. # Launch benchmark with --no_gpu for testing.
context = BenchmarkContext( context = BenchmarkRegistry.create_benchmark_context(
'pytorch-mnist', 'pytorch-mnist',
Platform.CPU,
parameters='--batch_size=32 --num_warmup=8 --num_steps=64 --model_action train inference --no_gpu' parameters='--batch_size=32 --num_warmup=8 --num_steps=64 --model_action train inference --no_gpu'
) )
assert (BenchmarkRegistry.check_parameters(context)) benchmark = BenchmarkRegistry.launch_benchmark(context)
assert (benchmark)
if BenchmarkRegistry.check_parameters(context): assert (benchmark.name == 'pytorch-mnist')
benchmark = BenchmarkRegistry.launch_benchmark(context) assert (benchmark.return_code == ReturnCode.SUCCESS)
assert (benchmark.name == 'pytorch-mnist') # Test results.
assert (benchmark.return_code == ReturnCode.SUCCESS) for metric in [
'steptime_train_float32', 'steptime_inference_float32', 'throughput_train_float32',
# Test results. 'throughput_inference_float32'
for metric in [ ]:
'steptime_train_float32', 'steptime_inference_float32', 'throughput_train_float32', assert (len(benchmark.raw_data[metric]) == 1)
'throughput_inference_float32' assert (len(benchmark.raw_data[metric][0]) == 64)
]: assert (len(benchmark.result[metric]) == 1)
assert (len(benchmark.raw_data[metric]) == 1) assert (isinstance(benchmark.result[metric][0], numbers.Number))
assert (len(benchmark.raw_data[metric][0]) == 64)
assert (len(benchmark.result[metric]) == 1) # Test _cal_params_count().
assert (isinstance(benchmark.result[metric][0], numbers.Number)) assert (benchmark._cal_params_count() == 1199882)
# Test _cal_params_count(). # Test _judge_gpu_availability().
assert (benchmark._cal_params_count() == 1199882) assert (benchmark._gpu_available is False)
# Test _judge_gpu_availability(). # Test _init_distributed_setting().
assert (benchmark._gpu_available is False) assert (benchmark._args.distributed_impl is None)
assert (benchmark._args.distributed_backend is None)
# Test _init_distributed_setting(). assert (benchmark._init_distributed_setting() is True)
assert (benchmark._args.distributed_impl is None) benchmark._args.distributed_impl = DistributedImpl.DDP
assert (benchmark._args.distributed_backend is None) benchmark._args.distributed_backend = DistributedBackend.NCCL
assert (benchmark._init_distributed_setting() is True) assert (benchmark._init_distributed_setting() is False)
benchmark._args.distributed_impl = DistributedImpl.DDP benchmark._args.distributed_impl = DistributedImpl.MIRRORED
benchmark._args.distributed_backend = DistributedBackend.NCCL assert (benchmark._init_distributed_setting() is False)
assert (benchmark._init_distributed_setting() is False)
benchmark._args.distributed_impl = DistributedImpl.MIRRORED # Test _init_dataloader().
assert (benchmark._init_distributed_setting() is False) benchmark._args.distributed_impl = None
assert (benchmark._init_dataloader() is True)
# Test _init_dataloader(). benchmark._args.distributed_impl = DistributedImpl.DDP
benchmark._args.distributed_impl = None assert (benchmark._init_dataloader() is False)
assert (benchmark._init_dataloader() is True) benchmark._args.distributed_impl = DistributedImpl.MIRRORED
benchmark._args.distributed_impl = DistributedImpl.DDP assert (benchmark._init_dataloader() is False)
assert (benchmark._init_dataloader() is False)
benchmark._args.distributed_impl = DistributedImpl.MIRRORED # Test _create_optimizer().
assert (benchmark._init_dataloader() is False) assert (isinstance(benchmark._optimizer, torch.optim.AdamW))
benchmark._optimizer_type = Optimizer.ADAM
# Test _create_optimizer(). assert (benchmark._create_optimizer() is True)
assert (isinstance(benchmark._optimizer, torch.optim.AdamW)) assert (isinstance(benchmark._optimizer, torch.optim.Adam))
benchmark._optimizer_type = Optimizer.ADAM benchmark._optimizer_type = Optimizer.SGD
assert (benchmark._create_optimizer() is True) assert (benchmark._create_optimizer() is True)
assert (isinstance(benchmark._optimizer, torch.optim.Adam)) assert (isinstance(benchmark._optimizer, torch.optim.SGD))
benchmark._optimizer_type = Optimizer.SGD benchmark._optimizer_type = None
assert (benchmark._create_optimizer() is True) assert (benchmark._create_optimizer() is False)
assert (isinstance(benchmark._optimizer, torch.optim.SGD))
benchmark._optimizer_type = None
assert (benchmark._create_optimizer() is False)
...@@ -3,21 +3,20 @@ ...@@ -3,21 +3,20 @@
"""Tests for BERT model benchmarks.""" """Tests for BERT model benchmarks."""
from superbench.benchmarks import BenchmarkRegistry, Precision, Platform, Framework, BenchmarkContext from superbench.benchmarks import BenchmarkRegistry, Precision, Platform, Framework
import superbench.benchmarks.model_benchmarks.pytorch_bert as pybert import superbench.benchmarks.model_benchmarks.pytorch_bert as pybert
def test_pytorch_bert_base(): def test_pytorch_bert_base():
"""Test pytorch-bert-base benchmark.""" """Test pytorch-bert-base benchmark."""
context = BenchmarkContext( context = BenchmarkRegistry.create_benchmark_context(
'bert-base', 'bert-base',
Platform.CUDA, platform=Platform.CUDA,
parameters='--batch_size=32 --num_classes=5 --seq_len=512', parameters='--batch_size=32 --num_classes=5 --seq_len=512',
framework=Framework.PYTORCH framework=Framework.PYTORCH
) )
assert (BenchmarkRegistry.is_benchmark_context_valid(context)) assert (BenchmarkRegistry.is_benchmark_context_valid(context))
assert (BenchmarkRegistry.check_parameters(context))
benchmark_name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context) benchmark_name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context)
assert (benchmark_name == 'pytorch-bert-base') assert (benchmark_name == 'pytorch-bert-base')
...@@ -54,15 +53,14 @@ def test_pytorch_bert_base(): ...@@ -54,15 +53,14 @@ def test_pytorch_bert_base():
def test_pytorch_bert_large(): def test_pytorch_bert_large():
"""Test pytorch-bert-large benchmark.""" """Test pytorch-bert-large benchmark."""
context = BenchmarkContext( context = BenchmarkRegistry.create_benchmark_context(
'bert-large', 'bert-large',
Platform.CUDA, platform=Platform.CUDA,
parameters='--batch_size=32 --num_classes=5 --seq_len=512', parameters='--batch_size=32 --num_classes=5 --seq_len=512',
framework=Framework.PYTORCH framework=Framework.PYTORCH
) )
assert (BenchmarkRegistry.is_benchmark_context_valid(context)) assert (BenchmarkRegistry.is_benchmark_context_valid(context))
assert (BenchmarkRegistry.check_parameters(context))
benchmark_name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context) benchmark_name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context)
assert (benchmark_name == 'pytorch-bert-large') assert (benchmark_name == 'pytorch-bert-large')
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
import re import re
from superbench.benchmarks import Platform, Framework, BenchmarkType, BenchmarkContext, BenchmarkRegistry, ReturnCode from superbench.benchmarks import Platform, Framework, BenchmarkType, BenchmarkRegistry, ReturnCode
from superbench.benchmarks.micro_benchmarks import MicroBenchmark from superbench.benchmarks.micro_benchmarks import MicroBenchmark
...@@ -60,21 +60,21 @@ def test_register_benchmark(): ...@@ -60,21 +60,21 @@ def test_register_benchmark():
# Register the benchmark for all platform if use default platform. # Register the benchmark for all platform if use default platform.
BenchmarkRegistry.register_benchmark('accumulation', AccumulationBenchmark) BenchmarkRegistry.register_benchmark('accumulation', AccumulationBenchmark)
for platform in Platform: for platform in Platform:
context = BenchmarkContext('accumulation', platform) context = BenchmarkRegistry.create_benchmark_context('accumulation', platform=platform)
assert (BenchmarkRegistry.is_benchmark_registered(context)) assert (BenchmarkRegistry.is_benchmark_registered(context))
# Register the benchmark for CUDA platform if use platform=Platform.CUDA. # Register the benchmark for CUDA platform if use platform=Platform.CUDA.
BenchmarkRegistry.register_benchmark('accumulation-cuda', AccumulationBenchmark, platform=Platform.CUDA) BenchmarkRegistry.register_benchmark('accumulation-cuda', AccumulationBenchmark, platform=Platform.CUDA)
context = BenchmarkContext('accumulation-cuda', Platform.CUDA) context = BenchmarkRegistry.create_benchmark_context('accumulation-cuda', platform=Platform.CUDA)
assert (BenchmarkRegistry.is_benchmark_registered(context)) assert (BenchmarkRegistry.is_benchmark_registered(context))
context = BenchmarkContext('accumulation-cuda', Platform.ROCM) context = BenchmarkRegistry.create_benchmark_context('accumulation-cuda', platform=Platform.ROCM)
assert (BenchmarkRegistry.is_benchmark_registered(context) is False) assert (BenchmarkRegistry.is_benchmark_registered(context) is False)
def test_is_benchmark_context_valid(): def test_is_benchmark_context_valid():
"""Test interface BenchmarkRegistry.is_benchmark_context_valid().""" """Test interface BenchmarkRegistry.is_benchmark_context_valid()."""
# Positive case. # Positive case.
context = BenchmarkContext('accumulation', Platform.CPU) context = BenchmarkRegistry.create_benchmark_context('accumulation', platform=Platform.CPU)
assert (BenchmarkRegistry.is_benchmark_context_valid(context)) assert (BenchmarkRegistry.is_benchmark_context_valid(context))
# Negative case. # Negative case.
...@@ -94,25 +94,13 @@ def test_get_benchmark_name(): ...@@ -94,25 +94,13 @@ def test_get_benchmark_name():
# Test benchmark name for different Frameworks. # Test benchmark name for different Frameworks.
benchmark_frameworks = [Framework.NONE, Framework.PYTORCH, Framework.TENSORFLOW1, Framework.ONNX] benchmark_frameworks = [Framework.NONE, Framework.PYTORCH, Framework.TENSORFLOW1, Framework.ONNX]
for i in range(len(benchmark_names)): for i in range(len(benchmark_names)):
context = BenchmarkContext('accumulation', Platform.CPU, framework=benchmark_frameworks[i]) context = BenchmarkRegistry.create_benchmark_context(
'accumulation', platform=Platform.CPU, framework=benchmark_frameworks[i]
)
name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context) name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context)
assert (name == benchmark_names[i]) assert (name == benchmark_names[i])
def test_check_parameters():
"""Test interface BenchmarkRegistry.check_parameters()."""
# Register benchmarks for testing.
BenchmarkRegistry.register_benchmark('accumulation', AccumulationBenchmark)
# Positive case.
context = BenchmarkContext('accumulation', Platform.CPU, parameters='--lower_bound=1')
assert (BenchmarkRegistry.check_parameters(context))
# Negative case.
context = BenchmarkContext('accumulation', Platform.CPU, parameters='--lower=1')
assert (BenchmarkRegistry.check_parameters(context) is False)
def test_get_benchmark_configurable_settings(): def test_get_benchmark_configurable_settings():
"""Test BenchmarkRegistry interface. """Test BenchmarkRegistry interface.
...@@ -121,7 +109,7 @@ def test_get_benchmark_configurable_settings(): ...@@ -121,7 +109,7 @@ def test_get_benchmark_configurable_settings():
# Register benchmarks for testing. # Register benchmarks for testing.
BenchmarkRegistry.register_benchmark('accumulation', AccumulationBenchmark) BenchmarkRegistry.register_benchmark('accumulation', AccumulationBenchmark)
context = BenchmarkContext('accumulation', Platform.CPU) context = BenchmarkRegistry.create_benchmark_context('accumulation', platform=Platform.CPU)
settings = BenchmarkRegistry.get_benchmark_configurable_settings(context) settings = BenchmarkRegistry.get_benchmark_configurable_settings(context)
expected = """optional arguments: expected = """optional arguments:
...@@ -140,52 +128,71 @@ def test_launch_benchmark(): ...@@ -140,52 +128,71 @@ def test_launch_benchmark():
) )
# Launch benchmark. # Launch benchmark.
context = BenchmarkContext('accumulation', Platform.CPU, parameters='--lower_bound=1') context = BenchmarkRegistry.create_benchmark_context(
'accumulation', platform=Platform.CPU, parameters='--lower_bound=1'
if BenchmarkRegistry.check_parameters(context): )
benchmark = BenchmarkRegistry.launch_benchmark(context)
assert (benchmark) benchmark = BenchmarkRegistry.launch_benchmark(context)
assert (benchmark.name == 'accumulation') assert (benchmark)
assert (benchmark.type == BenchmarkType.MICRO) assert (benchmark.name == 'accumulation')
assert (benchmark.run_count == 1) assert (benchmark.type == BenchmarkType.MICRO)
assert (benchmark.return_code == ReturnCode.SUCCESS) assert (benchmark.run_count == 1)
assert (benchmark.raw_data == {'accumulation_result': ['1,3,6,10']}) assert (benchmark.return_code == ReturnCode.SUCCESS)
assert (benchmark.result == {'accumulation_result': [10]}) assert (benchmark.raw_data == {'accumulation_result': ['1,3,6,10']})
assert (benchmark.result == {'accumulation_result': [10]})
# Replace the timestamp as null.
result = re.sub(r'\"\d+-\d+-\d+ \d+:\d+:\d+\"', 'null', benchmark.serialized_result) # Replace the timestamp as null.
expected = ( result = re.sub(r'\"\d+-\d+-\d+ \d+:\d+:\d+\"', 'null', benchmark.serialized_result)
'{"name": "accumulation", "type": "micro", "run_count": 1, ' expected = (
'"return_code": 0, "start_time": null, "end_time": null, ' '{"name": "accumulation", "type": "micro", "run_count": 1, '
'"raw_data": {"accumulation_result": ["1,3,6,10"]}, ' '"return_code": 0, "start_time": null, "end_time": null, '
'"result": {"accumulation_result": [10]}}' '"raw_data": {"accumulation_result": ["1,3,6,10"]}, '
) '"result": {"accumulation_result": [10]}}'
assert (result == expected) )
assert (result == expected)
# Launch benchmark with overridden parameters. # Launch benchmark with overridden parameters.
context = BenchmarkContext('accumulation', Platform.CPU, parameters='--lower_bound=1 --upper_bound=4') context = BenchmarkRegistry.create_benchmark_context(
if BenchmarkRegistry.check_parameters(context): 'accumulation', platform=Platform.CPU, parameters='--lower_bound=1 --upper_bound=4'
benchmark = BenchmarkRegistry.launch_benchmark(context) )
assert (benchmark) benchmark = BenchmarkRegistry.launch_benchmark(context)
assert (benchmark.name == 'accumulation') assert (benchmark)
assert (benchmark.type == BenchmarkType.MICRO) assert (benchmark.name == 'accumulation')
assert (benchmark.run_count == 1) assert (benchmark.type == BenchmarkType.MICRO)
assert (benchmark.return_code == ReturnCode.SUCCESS) assert (benchmark.run_count == 1)
assert (benchmark.raw_data == {'accumulation_result': ['1,3,6']}) assert (benchmark.return_code == ReturnCode.SUCCESS)
assert (benchmark.result == {'accumulation_result': [6]}) assert (benchmark.raw_data == {'accumulation_result': ['1,3,6']})
assert (benchmark.result == {'accumulation_result': [6]})
# Replace the timestamp as null.
result = re.sub(r'\"\d+-\d+-\d+ \d+:\d+:\d+\"', 'null', benchmark.serialized_result) # Replace the timestamp as null.
expected = ( result = re.sub(r'\"\d+-\d+-\d+ \d+:\d+:\d+\"', 'null', benchmark.serialized_result)
'{"name": "accumulation", "type": "micro", "run_count": 1, ' expected = (
'"return_code": 0, "start_time": null, "end_time": null, ' '{"name": "accumulation", "type": "micro", "run_count": 1, '
'"raw_data": {"accumulation_result": ["1,3,6"]}, ' '"return_code": 0, "start_time": null, "end_time": null, '
'"result": {"accumulation_result": [6]}}' '"raw_data": {"accumulation_result": ["1,3,6"]}, '
) '"result": {"accumulation_result": [6]}}'
assert (result == expected) )
assert (result == expected)
# Failed to launch benchmark due to 'benchmark not found'. # Failed to launch benchmark due to 'benchmark not found'.
context = BenchmarkContext( context = BenchmarkRegistry.create_benchmark_context(
'accumulation-fail', Platform.CPU, parameters='--lower_bound=1 --upper_bound=4', framework=Framework.PYTORCH 'accumulation-fail', Platform.CPU, parameters='--lower_bound=1 --upper_bound=4', framework=Framework.PYTORCH
) )
assert (BenchmarkRegistry.check_parameters(context) is False) benchmark = BenchmarkRegistry.launch_benchmark(context)
assert (benchmark is None)
# Failed to launch benchmark due to 'unknown arguments'.
context = BenchmarkRegistry.create_benchmark_context(
'accumulation', platform=Platform.CPU, parameters='--lower_bound=1 --test=4'
)
benchmark = BenchmarkRegistry.launch_benchmark(context)
assert (benchmark)
assert (benchmark.return_code == ReturnCode.INVALID_ARGUMENT)
# Failed to launch benchmark due to 'invalid arguments'.
context = BenchmarkRegistry.create_benchmark_context(
'accumulation', platform=Platform.CPU, parameters='--lower_bound=1 --upper_bound=x'
)
benchmark = BenchmarkRegistry.launch_benchmark(context)
assert (benchmark)
assert (benchmark.return_code == ReturnCode.INVALID_ARGUMENT)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment