Unverified Commit afea9913 authored by guoshzhao's avatar guoshzhao Committed by GitHub
Browse files

Benchmarks: Fix Bug - Set reduce_op type for metirc return_code (#261)

**Description**
Set the `reduce_op` type for metirc `return_code` as `None`.
parent ed2f3c3c
......@@ -33,6 +33,7 @@ def __init__(self, name, type, return_code, run_count=0):
self.__result = dict()
self.__result['return_code'] = [return_code.value]
self.__reduce_op = dict()
self.__reduce_op['return_code'] = None
def __eq__(self, rhs):
"""Override equal function for deep comparison.
......
......@@ -226,7 +226,7 @@ def test_train():
'"fp32_train_step_time": [[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]], '
'"fp32_train_throughput": [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]]}, '
'"result": {"return_code": [0], "fp32_train_step_time": [2.0], "fp32_train_throughput": [16000.0]}, '
'"reduce_op": {"fp32_train_step_time": "max", "fp32_train_throughput": "min"}}'
'"reduce_op": {"return_code": null, "fp32_train_step_time": "max", "fp32_train_throughput": "min"}}'
)
assert (benchmark._preprocess())
assert (benchmark._ModelBenchmark__train(Precision.FLOAT32))
......@@ -236,7 +236,8 @@ def test_train():
benchmark = create_benchmark('--num_steps 0')
expected_result = (
'{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 3, '
'"start_time": null, "end_time": null, "raw_data": {}, "result": {"return_code": [3]}, "reduce_op": {}}'
'"start_time": null, "end_time": null, "raw_data": {}, '
'"result": {"return_code": [3]}, "reduce_op": {"return_code": null}}'
)
assert (benchmark._preprocess())
assert (benchmark._ModelBenchmark__train(Precision.FLOAT32) is False)
......@@ -253,7 +254,7 @@ def test_inference():
'"fp16_inference_throughput": [[8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0]]}, '
'"result": {"return_code": [0], '
'"fp16_inference_step_time": [4.0], "fp16_inference_throughput": [8000.0]}, '
'"reduce_op": {"fp16_inference_step_time": null, "fp16_inference_throughput": null}}'
'"reduce_op": {"return_code": null, "fp16_inference_step_time": null, "fp16_inference_throughput": null}}'
)
assert (benchmark._preprocess())
assert (benchmark._ModelBenchmark__inference(Precision.FLOAT16))
......@@ -263,7 +264,8 @@ def test_inference():
benchmark = create_benchmark('--num_steps 0')
expected_result = (
'{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 3, '
'"start_time": null, "end_time": null, "raw_data": {}, "result": {"return_code": [3]}, "reduce_op": {}}'
'"start_time": null, "end_time": null, "raw_data": {}, '
'"result": {"return_code": [3]}, "reduce_op": {"return_code": null}}'
)
assert (benchmark._preprocess())
assert (benchmark._ModelBenchmark__inference(Precision.FLOAT16) is False)
......@@ -304,7 +306,7 @@ def test_benchmark():
'"fp16_train_throughput": [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]]}, '
'"result": {"return_code": [0], "fp32_train_step_time": [2.0], "fp32_train_throughput": [16000.0], '
'"fp16_train_step_time": [2.0], "fp16_train_throughput": [16000.0]}, '
'"reduce_op": {"fp32_train_step_time": "max", "fp32_train_throughput": "min", '
'"reduce_op": {"return_code": null, "fp32_train_step_time": "max", "fp32_train_throughput": "min", '
'"fp16_train_step_time": "max", "fp16_train_throughput": "min"}}'
)
assert (benchmark.serialized_result == expected_serialized_result)
......
......@@ -148,7 +148,7 @@ def test_launch_benchmark():
'"return_code": 0, "start_time": null, "end_time": null, '
'"raw_data": {"accumulation_result": ["1,3,6,10"]}, '
'"result": {"return_code": [0], "accumulation_result": [10]}, '
'"reduce_op": {"accumulation_result": null}}'
'"reduce_op": {"return_code": null, "accumulation_result": null}}'
)
assert (result == expected)
......@@ -172,7 +172,7 @@ def test_launch_benchmark():
'"return_code": 0, "start_time": null, "end_time": null, '
'"raw_data": {"accumulation_result": ["1,3,6"]}, '
'"result": {"return_code": [0], "accumulation_result": [6]}, '
'"reduce_op": {"accumulation_result": null}}'
'"reduce_op": {"return_code": null, "accumulation_result": null}}'
)
assert (result == expected)
......
......@@ -86,6 +86,6 @@ def test_serialize_deserialize():
'"start_time": "2021-02-03 16:59:49", "end_time": "2021-02-03 17:00:08", '
'"raw_data": {"metric1": [[1, 2, 3], [4, 5, 6], [7, 8, 9]]}, '
'"result": {"return_code": [0], "metric1": [300, 200], "metric2": [100]}, '
'"reduce_op": {"metric1": "max", "metric2": "avg"}}'
'"reduce_op": {"return_code": null, "metric1": "max", "metric2": "avg"}}'
)
assert (result.to_string() == expected)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment