Unverified Commit 3367c4f6 authored by Yuting Jiang's avatar Yuting Jiang Committed by GitHub
Browse files

Benchmarks - Add support to allow list of custom config string in...

Benchmarks - Add support to allow list of custom config string in cudnn-functions and cublas-functions (#414)

**Description**
Add support to allow list of custom config string in cudnn-functions and cublas-functions.
parent 63e9b2d1
......@@ -28,15 +28,15 @@ jobs:
- javascript
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v1
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
uses: github/codeql-action/analyze@v2
analyze-cpp:
name: CodeQL analyze cpp
runs-on: ubuntu-latest
......@@ -48,12 +48,12 @@ jobs:
security-events: write
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
uses: github/codeql-action/init@v2
with:
languages: cpp
- name: Build
run: make cppbuild -j
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
uses: github/codeql-action/analyze@v2
......@@ -154,7 +154,7 @@ def run(self):
'numpy>=1.19.2',
'omegaconf==2.0.6',
'openpyxl>=3.0.7',
'pandas>=1.1.5',
'pandas==1.1.5',
'pssh @ git+https://github.com/lilydjwg/pssh.git@v2.3.4',
'pyyaml>=5.3',
'requests>=2.27.1',
......
......@@ -218,6 +218,7 @@ def add_parser_arguments(self):
self._parser.add_argument(
'--config_json_str',
type=str,
nargs='+',
default=None,
required=False,
help='The custom json string defining the params in a cublas function.',
......@@ -246,7 +247,10 @@ def _preprocess(self):
self._commands.append(complete_command)
else:
custom_config_str = yaml.safe_load(self._args.config_json_str)
if not isinstance(self._args.config_json_str, list):
self._args.config_json_str = [self._args.config_json_str]
for config_json_str in self._args.config_json_str:
custom_config_str = yaml.safe_load(config_json_str)
config_json_str = "\'" + json.dumps(custom_config_str).replace(' ', '') + "\'"
complete_command = command + (' --config_json ') + config_json_str
self._commands.append(complete_command)
......
......@@ -352,6 +352,7 @@ def add_parser_arguments(self):
self._parser.add_argument(
'--config_json_str',
type=str,
nargs='+',
default=None,
required=False,
help='The custom json string defining the params in a cudnn function.',
......@@ -380,7 +381,10 @@ def _preprocess(self):
self._commands.append(complete_command)
else:
custom_config_str = yaml.safe_load(self._args.config_json_str)
if not isinstance(self._args.config_json_str, list):
self._args.config_json_str = [self._args.config_json_str]
for config_json_str in self._args.config_json_str:
custom_config_str = yaml.safe_load(config_json_str)
config_json_str = "\'" + json.dumps(custom_config_str).replace(' ', '') + "\'"
complete_command = command + (' --config_json ') + config_json_str
self._commands.append(complete_command)
......
......@@ -80,3 +80,41 @@ def test_cublas_functions():
assert (isinstance(benchmark.result[metric][0], numbers.Number))
if metric != 'return_code':
assert (len(benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
# Test for custom list configuration
custom_config_str2 = '{"name":"cublasCgemm3mStridedBatched","m":64,"n":32,"k":3,' + \
'"transa":0,"transb":1,"batchCount":544}'
context = BenchmarkRegistry.create_benchmark_context(
'cublas-function',
platform=Platform.CUDA,
parameters='--num_warmup 10 --num_steps 10 --num_in_step 100 --config_json_str ' +
f"'{custom_config_str}' '{custom_config_str2}'"
)
assert (BenchmarkRegistry.is_benchmark_context_valid(context))
benchmark = BenchmarkRegistry.launch_benchmark(context)
# Check basic information.
assert (benchmark)
assert (benchmark.name == 'cublas-function')
assert (benchmark.type == BenchmarkType.MICRO)
# Check parameters specified in BenchmarkContext.
assert (benchmark._args.num_warmup == 10)
assert (benchmark._args.num_steps == 10)
assert (benchmark._args.num_in_step == 100)
# Check results and metrics.
assert (benchmark.run_count == 1)
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert ('raw_output_0' in benchmark.raw_data)
assert (len(benchmark.raw_data['raw_output_0']) == 1)
assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))
assert (2 + benchmark.default_metric_count == len(benchmark.result))
for metric in list(benchmark.result.keys()):
assert (len(benchmark.result[metric]) == 1)
assert (isinstance(benchmark.result[metric][0], numbers.Number))
if metric != 'return_code':
assert (len(benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
......@@ -84,3 +84,45 @@ def test_cudnn_functions():
assert (isinstance(benchmark.result[metric][0], numbers.Number))
if metric != 'return_code':
assert (len(benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
# Test for custom list configuration
custom_config_str2 = '{"algo":1,"arrayLength":2,"convType":0,"dilationA":[1,1],"filterStrideA":[1,1],' \
+ '"filterDims":[32,128,3,3],"inputDims":[32,32,14,14],"inputStride":[6272, 196, 14, 1],"inputType":2,'\
+ '"mode":1,"name":"cudnnConvolutionBackwardData","outputDims":[32, 128, 14, 14],'\
+ '"outputStride":[25088, 196, 14, 1],"padA":[1,1],"tensorOp":true}'
context = BenchmarkRegistry.create_benchmark_context(
'cudnn-function',
platform=Platform.CUDA,
parameters='--num_warmup 10 --num_steps 10 --num_in_step 100 --config_json_str ' +
f"'{custom_config_str}' '{custom_config_str2}'"
)
assert (BenchmarkRegistry.is_benchmark_context_valid(context))
benchmark = BenchmarkRegistry.launch_benchmark(context)
# Check basic information.
assert (benchmark)
assert (benchmark.name == 'cudnn-function')
assert (benchmark.type == BenchmarkType.MICRO)
# Check parameters specified in BenchmarkContext.
assert (benchmark._args.num_warmup == 10)
assert (benchmark._args.num_steps == 10)
assert (benchmark._args.num_in_step == 100)
# Check results and metrics.
assert (benchmark.run_count == 1)
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert ('raw_output_0' in benchmark.raw_data)
assert (len(benchmark.raw_data['raw_output_0']) == 1)
assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))
assert (2 + benchmark.default_metric_count == len(benchmark.result))
for metric in list(benchmark.result.keys()):
assert (len(benchmark.result[metric]) == 1)
assert (isinstance(benchmark.result[metric][0], numbers.Number))
if metric != 'return_code':
assert (len(benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment