"...git@developer.sourcefind.cn:OpenDAS/mmdetection3d.git" did not exist on "1185f0b1a09abe59756f73ac5ed16eaf5c9c381f"
Unverified Commit d612e976 authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

integrate onnx backend test suit to migraphx (#574)



* initial progress

* formatting

* add pooling changes

* formatting

* change eliminate_pad

* formatting

* rename var

* fomratting

* update op shape test and compute

* formatting

* revert conv constructor

* formatting

* change initializer

* formatting

* fix tidy

* change quant conv and shape check

* add tests and fixes

* formatting

* fix type

* fix conv test

* formatting

* add pooling and bn tests

* formatting

* add inconsistent attr tests

* fix padding issue

* formatting

* progress on 1d to 2d

* formatting

* change compute and compile functions

* formatting

* fix duplicate

* fix conflict

* fix issue with 1d conv

* formatting

* add check for 3d limit

* rename function

* formatting

* update to MIOPen 2.3

* add support for nd pooling

* formatting

* test miopen 2.4

* change function name

* rename functions

* formatting

* add op_shape test

* add gpu ops tests

* formatting

* initial progress

* formatting

* add pkg-config

* add to support asymmetric padding of averagepool

* clang format

* fix bug for average pooling

* clang format

* fix a bug

* add unit tests for the asymmetric padding of averagepool

* clang format

* change functions

* formatting

* additional code refinement

* clang format

* check existing tests

* formatting

* change to copy_backward

* formatting

* change for loop to transform

* formatting

* add tests

* formatting

* remove comment

* add more tests

* remove an optimization for pooling

* clang format

* add and fix unit tests

* clang format

* update gpu miopen calls

* formatting

* initial progress

* add cpu impl and tests

* formatting

* add NOLINT

* add 3d test

* formatting

* add more op_shape tests

* test diff miopen version

* add submodule onnx

* add pooling shape tests

* fix error msg

* add onnx_test_backend

* reorganize python code

* temp disable test

* fix cppcheck error

* fix cppcheck error

* code backup

* add support device choice

* refine onnx backend test

* revert to miopen 2.4

* fix review comments

* fix review comments

* clang format

* fixed review comments

* clang format

* fix cppcheck error

* copy onnx_backend_test to dest when building

* add testdata folder

* fix bounds

* formatting

* code backup

* code backup

* remove unnecessary file

* fix various bugs

* remove unnecessary changes

* remove unnecessary submodule

* remove unnecessary lines

* fix algorithm

* formatting

* refine onnx backend unit tests

* pin numpy version

* fix build issue

* fixed a filename to be copied

* add the onnx dependency in docker image

* ensure results are copied back correctly

* specify onnx version

* update excluded tests

* remove unnecessary log info

* turn on more unit tests

* restrict onnx backend test to python 3.x

* clang format

* refine retrieving the input parameters

* clang format

* fix program input parameter names

* clang format

* avoid running onnx test in python 2.x

* fix cppcheck error

* fix python2.7 backend unit tests error

* clang format

* resolve the issue of ensure data copy to be completed

* clang format

* fix review comments

* fix onnx backend unit test error

* another change to make onnx backend test pass

* clang format

* fix onnx backend test error

* clang format

* disable onnx backend test to try

* build try

* update Dockerfile to try onnx backend test

* remove unnecessary code

* fix a bug in copying program

* clang format

* update dockerfile to include onnx

* fix review comments

* add the pytest module to the container

* exclude real model to avoid to be downloaded

* resolve the sync device for data copy from gpu to cpu

* clang format

* fix review comments

* clang format

* move sync_device after memory_coloring
Co-authored-by: default avatarKhalique <15948690+kahmed10@users.noreply.github.com>
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
Co-authored-by: default avatarPaul Fultz II <pfultz2@yahoo.com>
parent 0a347dff
......@@ -192,8 +192,15 @@ rocm_create_package(
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin)
add_subdirectory(src)
add_subdirectory(doc)
add_subdirectory(test)
add_subdirectory(tools)
set(DEST_DIR ${CMAKE_BINARY_DIR})
file(GLOB backend_files ${CMAKE_SOURCE_DIR}/src/py/backend/*.py)
file(MAKE_DIRECTORY ${DEST_DIR}/lib/onnx_migraphx)
foreach(py_file ${backend_files})
configure_file(${py_file} ${DEST_DIR}/lib/onnx_migraphx/. COPYONLY)
endforeach(py_file)
configure_file(${CMAKE_SOURCE_DIR}/test/py/onnx_backend_test.py ${DEST_DIR}/onnx_backend_test.py COPYONLY)
......@@ -62,7 +62,7 @@ ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
# Install cget
RUN pip3 install cget && pip3 install numpy==1.18.5
RUN pip3 install cget
# Install rclone
RUN pip install https://github.com/pfultz2/rclone/archive/master.tar.gz
......@@ -90,6 +90,8 @@ ADD dev-requirements.txt /dev-requirements.txt
ADD requirements.txt /requirements.txt
RUN cget -p $PREFIX install -f /dev-requirements.txt -DMIOPEN_CACHE_DIR=""
RUN pip3 install onnx==1.7.0 numpy==1.18.5 typing==3.7.4 pytest==6.0.1
# Install newer cmake for onnx runtime
RUN cget -p /opt/cmake install kitware/cmake@v3.13.0
......
......@@ -90,6 +90,8 @@ struct program
instruction_ref add_return(std::vector<instruction_ref> args);
std::vector<std::string> get_parameter_names() const;
shape get_parameter_shape(std::string name) const;
instruction_ref get_parameter(std::string name) const;
......
......@@ -13,6 +13,7 @@
#include <algorithm>
#include <set>
#include <utility>
#include <unordered_set>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -21,6 +22,7 @@ struct program_impl
{
// A list is used to keep references to an instruction stable
std::list<instruction> instructions;
std::vector<std::string> input_names;
context ctx;
};
......@@ -115,7 +117,8 @@ void program::assign(const program& p)
{
impl->instructions.clear();
}
impl->ctx = p.impl->ctx;
impl->ctx = p.impl->ctx;
impl->input_names = p.impl->input_names;
std::unordered_map<instruction_ref, instruction_ref> ins_map;
for(auto ins : iterator_for(p))
......@@ -284,6 +287,8 @@ instruction_ref program::add_outline(const shape& s)
instruction_ref program::add_parameter(std::string name, shape s)
{
assert(get_parameter_shape(name) == shape{});
impl->input_names.push_back(name);
impl->instructions.push_front({builtin::param{std::move(name)}, std::move(s), {}});
return impl->instructions.begin();
}
......@@ -297,6 +302,7 @@ instruction_ref program::add_return(std::vector<instruction_ref> args)
auto result = std::prev(impl->instructions.end());
instruction::backreference(result);
assert(result->valid(begin()));
return result;
}
......@@ -319,6 +325,22 @@ shape program::get_parameter_shape(std::string name) const
return {};
}
std::vector<std::string> program::get_parameter_names() const
{
std::vector<std::string> result = impl->input_names;
std::unordered_set<std::string> params;
for(auto&& ins : impl->instructions)
{
if(ins.name() == "@param")
{
auto&& name = any_cast<builtin::param>(ins.get_operator()).parameter;
params.insert(name);
}
}
erase_if(result, [&](auto&& name) { return params.count(name) == 0; });
return result;
}
instruction_ref program::get_parameter(std::string name) const
{
auto ins = std::find_if(
......
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from .backend import is_compatible, prepare, run, supports_device
# -------------------------------------------------------------------------
# Copyright (c) Advanced Micro Devices. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
"""
Implements ONNX's backend API.
"""
import sys
if sys.version_info < (3, 0):
sys.exit()
from onnx import ModelProto
from onnx.checker import check_model
from onnx.backend.base import Backend
import migraphx
from onnx_migraphx.backend_rep import MIGraphXBackendRep
def get_device():
return ("CPU", "GPU")
class MIGraphXBackend(Backend):
_device = "GPU"
_input_names = []
@classmethod
def set_device(cls, device):
cls._device = device
"""
Implements
`ONNX's backend API <https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md>`_
with *ONNX Runtime*.
The backend is mostly used when you need to switch between
multiple runtimes with the same API.
`Importing models from ONNX to Caffe2 <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxCaffe2Import.ipynb>`_
shows how to use *caffe2* as a backend for a converted model.
Note: This is not the official Python API.
""" # noqa: E501
@classmethod
def is_compatible(cls, model, device=None, **kwargs):
"""
Return whether the model is compatible with the backend.
:param model: unused
:param device: None to use the default device or a string (ex: `'CPU'`)
:return: boolean
"""
device = cls._device
return cls.supports_device(device)
@classmethod
def supports_device(cls, device):
"""
Check whether the backend is compiled with particular device support.
In particular it's used in the testing suite.
"""
return device in get_device()
@classmethod
def prepare(cls, model, device=None, **kwargs):
"""
Load the model and creates a :class:`migraphx.program`
ready to be used as a backend.
:param model: ModelProto (returned by `onnx.load`),
string for a filename or bytes for a serialized model
:param device: requested device for the computation,
None means the default one which depends on
the compilation settings
:param kwargs: see :class:`onnxruntime.SessionOptions`
:return: :class:`migraphx.program`
"""
if isinstance(model, MIGraphXBackendRep):
return model
elif isinstance(model, migraphx.program):
return MIGraphXBackendRep(model, cls._input_names)
elif isinstance(model, (str, bytes)):
for k, v in kwargs.items():
if hasattr(options, k):
setattr(options, k, v)
if device is not None and not cls.supports_device(device):
raise RuntimeError(
"Incompatible device expected '{0}', got '{1}'".format(
device, get_device()))
inf = migraphx.parse_onnx_buffer(model)
device = cls._device
cls._input_names = inf.get_parameter_names()
inf.compile(migraphx.get_target(device.lower()))
return cls.prepare(inf, device, **kwargs)
else:
# type: ModelProto
check_model(model)
bin = model.SerializeToString()
return cls.prepare(bin, device, **kwargs)
@classmethod
def run_model(cls, model, inputs, device=None, **kwargs):
"""
Compute the prediction.
:param model: :class:`migraphx.program` returned
by function *prepare*
:param inputs: inputs
:param device: requested device for the computation,
None means the default one which depends on
the compilation settings
:param kwargs: see :class:`migraphx.program`
:return: predictions
"""
rep = cls.prepare(model, device, **kwargs)
return rep.run(inputs, **kwargs)
@classmethod
def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs):
'''
This method is not implemented as it is much more efficient
to run a whole model than every node independently.
'''
raise NotImplementedError(
"It is much more efficient to run a whole model than every node independently."
)
is_compatible = MIGraphXBackend.is_compatible
prepare = MIGraphXBackend.prepare
run = MIGraphXBackend.run_model
supports_device = MIGraphXBackend.supports_device
# -------------------------------------------------------------------------
# Copyright (c) Advanced Micro Device Inc. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
"""
Implements ONNX's backend API.
"""
import sys
if sys.version_info < (3, 0):
sys.exit()
import migraphx
from onnx.backend.base import BackendRep
import numpy as np
from typing import Any, Tuple
class MIGraphXBackendRep(BackendRep):
"""
Computes the prediction for a pipeline converted into
an :class:`onnxruntime.InferenceSession` node.
"""
def __init__(self, prog, input_names):
"""
:param session: :class:`migraphx.program`
"""
self._program = prog
self._input_names = input_names
def run(self, inputs, **kwargs): # type: (Any, **Any) -> Tuple[Any, ...]
"""
Computes the prediction.
See :meth:`migraphx.program.run`.
"""
if isinstance(inputs, list):
inps = {}
for i, name in enumerate(self._input_names):
inps[name] = migraphx.argument(inputs[i])
mgx_outputs = self._program.run(inps)
outs = []
for out in mgx_outputs:
outs.append(np.array(out))
return outs
else:
inp = self._program.get_parameter_shapes().keys()
if len(inp) != 1:
raise RuntimeError("Model expect {0} inputs".format(len(inp)))
inps = {inp[0]: migraphx.argument(inputs)}
mgx_outputs = self._program.run(inps)
outs = []
for out in mgx_outputs:
outs.append(np.array(out))
return self._program.run(inps)
......@@ -166,6 +166,7 @@ PYBIND11_MODULE(migraphx, m)
py::class_<migraphx::program>(m, "program")
.def("clone", [](migraphx::program& p) { return *(new migraphx::program(p)); })
.def("get_parameter_names", &migraphx::program::get_parameter_names)
.def("get_parameter_shapes", &migraphx::program::get_parameter_shapes)
.def("get_output_shapes", &migraphx::program::get_output_shapes)
.def("compile",
......@@ -210,6 +211,26 @@ PYBIND11_MODULE(migraphx, m)
py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false);
m.def("parse_onnx_buffer",
[](const std::string& onnx_buffer,
unsigned int default_dim_value,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
bool skip_unknown_operators,
bool print_program_on_error) {
migraphx::onnx_options options;
options.default_dim_value = default_dim_value;
options.map_input_dims = map_input_dims;
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = print_program_on_error;
return migraphx::parse_onnx_buffer(onnx_buffer, options);
},
"Parse onnx file",
py::arg("filename"),
py::arg("default_dim_value") = 1,
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false);
m.def("get_target", [](const std::string& name) -> migraphx::target {
if(name == "cpu")
return migraphx::cpu::target{};
......
......@@ -125,6 +125,7 @@ add_library(migraphx_gpu
gemm_impl.cpp
preallocate_param.cpp
rnn_variable_seq_lens.cpp
sync_device.cpp
)
set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu)
rocm_set_soversion(migraphx_gpu ${MIGRAPHX_SO_VERSION})
......
......@@ -54,7 +54,7 @@ struct hip_allocate
}
};
struct hip_sync
struct hip_sync_device
{
std::string tag{};
......@@ -64,21 +64,13 @@ struct hip_sync
return pack(f(self.tag, "tag"));
}
std::string name() const { return "hip::sync"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
if(inputs.empty())
return {};
else
return inputs.front();
}
argument compute(context&, const shape&, const std::vector<argument>& args) const
std::string name() const { return "hip::sync_device"; }
shape compute_shape(const std::vector<shape>&) const { return {}; }
argument compute(context&, const shape&, const std::vector<argument>&) const
{
gpu_sync();
if(args.empty())
return {};
else
return args.front();
return {};
}
};
......@@ -126,6 +118,7 @@ struct hip_copy_from_gpu
return result;
}
copy_from_gpu(ctx, args[0], args[1]);
return args[1];
}
std::ptrdiff_t output_alias(const std::vector<shape>& args) const
......
#ifndef MIGRAPHX_GUARD_RTGLIB_GPU_SYNC_DEVICE_HPP
#define MIGRAPHX_GUARD_RTGLIB_GPU_SYNC_DEVICE_HPP
#include <string>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct program;
namespace gpu {
struct sync_device
{
std::string name() const { return "sync_device"; }
void apply(program& p) const;
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#include <migraphx/gpu/sync_device.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
void sync_device::apply(program& p) const
{
auto last = std::prev(p.end());
if(last->name() == "@return")
{
auto inputs = last->inputs();
if(std::any_of(inputs.begin(), inputs.end(), [](auto i) {
return (i->name() == "hip::copy_from_gpu");
}))
{
p.insert_instruction(last, hip_sync_device{}, inputs);
}
}
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -24,6 +24,7 @@
#include <migraphx/gpu/adjust_allocation.hpp>
#include <migraphx/gpu/preallocate_param.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/sync_device.hpp>
#include <migraphx/eliminate_pad.hpp>
#include <migraphx/decompose.hpp>
#include <migraphx/remap.hpp>
......@@ -78,6 +79,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
write_literals{&ctx},
schedule{gpu::schedule_model{ctx.get_current_device().nstreams()}, not enabled(MIGRAPHX_DISABLE_SCHEDULE_PASS{})},
memory_coloring{"hip::allocate"},
sync_device{},
preallocate_param{"scratch", &ctx},
dead_code_elimination{},
eliminate_workspace{},
......
......@@ -82,6 +82,8 @@ TEST_CASE(program_copy)
p1.compile(migraphx::cpu::target{});
EXPECT(p1 == p2);
EXPECT(p1.get_parameter_names() == p2.get_parameter_names());
}
{
......
......@@ -22,4 +22,5 @@ if(MIGRAPHX_ENABLE_GPU)
add_py_test(gpu_offload test_gpu_offload.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu test_gpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(array test_array.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(backend onnx_backend_test.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
endif()
import sys
if sys.version_info < (3, 0):
sys.exit()
import argparse
import os
import platform
import unittest
import onnx
import onnx.backend.test
import numpy as np
from onnx_migraphx.backend import MIGraphXBackend as c2
pytest_plugins = 'onnx.backend.test.report',
class MIGraphXBackendTest(onnx.backend.test.BackendTest):
def __init__(self, backend, parent_module=None):
super(MIGraphXBackendTest, self).__init__(backend, parent_module)
@classmethod
def assert_similar_outputs(cls, ref_outputs, outputs, rtol, atol):
np.testing.assert_equal(len(ref_outputs), len(outputs))
for i in range(len(outputs)):
np.testing.assert_equal(ref_outputs[i].dtype, outputs[i].dtype)
if ref_outputs[i].dtype == np.object:
np.testing.assert_array_equal(ref_outputs[i], outputs[i])
else:
np.testing.assert_allclose(ref_outputs[i],
outputs[i],
rtol=1e-3,
atol=1e-5)
def create_backend_test(testname=None, target_device=None):
if target_device is not None:
c2.set_device(target_device)
backend_test = MIGraphXBackendTest(c2, __name__)
if testname:
backend_test.include(testname + '.*')
else:
# Include all of the nodes that we support.
# Onnx native node tests
backend_test.include(r'.*test_abs.*')
backend_test.include(r'.*test_acos.*')
backend_test.include(r'.*test_acosh.*')
backend_test.include(r'.*test_add.*')
backend_test.include(r'.*test_argmax.*')
backend_test.include(r'.*test_argmin.*')
backend_test.include(r'.*test_asin.*')
backend_test.include(r'.*test_asinh.*')
backend_test.include(r'.*test_atan.*')
backend_test.include(r'.*test_atanh.*')
backend_test.include(r'.*test_averagepool.*')
backend_test.include(r'.*test_AvgPool.*')
backend_test.include(r'.*test_BatchNorm.*eval.*')
backend_test.include(r'.*test_ceil.*')
backend_test.include(r'.*test_clip.*')
backend_test.include(r'.*test_concat.*')
backend_test.include(r'.*test_constant.*')
backend_test.include(r'.*test_Conv[1-3]d*')
backend_test.include(r'.*test_cos.*')
backend_test.include(r'.*test_cosh.*')
backend_test.include(r'.*test_depthtospace.*')
backend_test.include(r'.*test_div.*')
backend_test.include(r'.*test_dropout.*')
backend_test.include(r'.*test_ELU*')
backend_test.include(r'.*test_elu.*')
backend_test.include(r'.*test_equal.*')
backend_test.include(r'.*test_Embedding*')
backend_test.include(r'.*test_exp.*')
backend_test.include(r'.*test_flatten.*')
backend_test.include(r'.*test_floor.*')
backend_test.include(r'.*test_gather.*')
backend_test.include(r'.*test_gemm.*')
backend_test.include(r'.*test_globalaveragepool.*')
backend_test.include(r'.*test_globalmaxpool.*')
backend_test.include(r'.*test_greater.*')
backend_test.include(r'.*test_hardsigmoid.*')
backend_test.include(r'.*test_identity.*')
backend_test.include(r'.*test_LeakyReLU*')
backend_test.include(r'.*test_leakyrelu.*')
backend_test.include(r'.*test_less.*')
backend_test.include(r'.*test_Linear.*')
backend_test.include(r'.*test_log.*')
backend_test.include(r'.*test_logsoftmax.*')
backend_test.include(r'.*test_LogSoftmax.*')
backend_test.include(r'.*test_log_softmax.*')
backend_test.include(r'.*test_lrn.*')
backend_test.include(r'.*test_matmul.*')
backend_test.include(r'.*test_max.*')
backend_test.include(r'.*test_MaxPool[1-9]d.*')
backend_test.include(r'.*test_mean.*')
backend_test.include(r'.*test_min.*')
backend_test.include(r'.*test_mul.*')
backend_test.include(r'.*test_neg.*')
backend_test.include(r'.*test_not.*')
backend_test.include(r'.*test_operator_addmm.*')
backend_test.include(r'.*test_operator_basic.*')
backend_test.include(r'.*test_operator_chunk.*')
backend_test.include(r'.*test_operator_clip.*')
backend_test.include(r'.*test_operator_concat2.*')
backend_test.include(r'.*test_operator_conv_.*')
backend_test.include(r'.*test_operator_exp.*')
backend_test.include(r'.*test_operator_flatten.*')
backend_test.include(r'.*test_operator_index.*')
backend_test.include(r'.*test_operator_max_.*')
backend_test.include(r'.*test_operator_maxpool.*')
backend_test.include(r'.*test_operator_min.*')
backend_test.include(r'.*test_operator_mm.*')
backend_test.include(r'.*test_operator_non_float_params.*')
backend_test.include(r'.*test_operator_params.*')
backend_test.include(r'.*test_operator_permute2.*')
backend_test.include(r'.*test_operator_pow.*')
backend_test.include(r'.*test_operator_reduced_mean_.*')
backend_test.include(r'.*test_operator_reduced_mean_keepdim.*')
backend_test.include(r'.*test_operator_reduced_sum_.*')
backend_test.include(r'.*test_operator_reduced_sum_keepdim.*')
backend_test.include(r'.*test_operator_selu.*')
backend_test.include(r'.*test_operator_sqrt.*')
backend_test.include(r'.*test_operator_symbolic_override.*')
backend_test.include(r'.*test_operator_symbolic_override_nested.*')
backend_test.include(r'.*test_operator_view.*')
backend_test.include(r'.*test_pow.*')
backend_test.include(r'.*test_PoissonNLLLLoss_no_reduce*')
backend_test.include(r'.*test_reciprocal.*')
backend_test.include(r'.*test_reduce.*')
backend_test.include(r'.*test_ReLU*')
backend_test.include(r'.*test_relu.*')
backend_test.include(r'.*test_selu.*')
backend_test.include(r'.*test_shape.*')
backend_test.include(r'.*test_Sigmoid*')
backend_test.include(r'.*test_sigmoid.*')
backend_test.include(r'.*test_sin.*')
backend_test.include(r'.*test_sinh.*')
backend_test.include(r'.*test_size.*')
backend_test.include(r'.*test_Softmax*')
backend_test.include(r'.*test_softmax.*')
backend_test.include(r'.*test_Softmin*')
backend_test.include(r'.*test_Softplus*')
backend_test.include(r'.*test_softplus.*')
backend_test.include(r'.*test_softsign.*')
backend_test.include(r'.*test_sqrt.*')
backend_test.include(r'.*test_squeeze_cuda')
backend_test.include(r'.*test_sub.*')
backend_test.include(r'.*test_sum.*')
backend_test.include(r'.*test_tan.*')
backend_test.include(r'.*test_Tanh*')
backend_test.include(r'.*test_tanh.*')
backend_test.include(r'.*test_thresholdedrelu.*')
backend_test.include(r'.*test_transpose.*')
backend_test.include(r'.*test_unsqueeze.*')
backend_test.include(r'.*test_ZeroPad2d*')
# # Onnx native model tests
backend_test.include(r'.*test_bvlc_alexnet.*')
backend_test.include(r'.*test_densenet121.*')
backend_test.include(r'.*test_inception_v1.*')
backend_test.include(r'.*test_inception_v2.*')
backend_test.include(r'.*test_resnet50.*')
backend_test.include(r'.*test_shufflenet.*')
backend_test.include(r'.*test_squeezenet.*')
backend_test.include(r'.*test_vgg19.*')
backend_test.include(r'.*test_zfnet512.*')
# exclude unenabled ops get pulled in with wildcards
# test_constant_pad gets pulled in with the test_constant* wildcard. Explicitly disable padding tests for now.
# Operator MATMULINTEGER is not supported by TRT
backend_test.exclude(r'.*test_matmulinteger.*')
backend_test.exclude(r'.*test_maxunpool.*')
# Absolute diff failed because
# numpy compares the difference between actual and desired to atol + rtol * abs(desired)
# failed test cases
backend_test.exclude(
r'test_argmax_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmax_negative_axis_keepdims_example_select_last_index_cpu'
)
backend_test.exclude(
r'test_argmax_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmin_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmin_negative_axis_keepdims_example_select_last_index_cpu'
)
backend_test.exclude(
r'test_argmin_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(r'test_dropout_default_mask_cpu')
backend_test.exclude(r'test_dropout_default_mask_ratio_cpu')
backend_test.exclude(r'test_logsoftmax_axis_0_cpu')
backend_test.exclude(r'test_logsoftmax_axis_1_cpu')
backend_test.exclude(r'test_logsoftmax_default_axis_cpu')
backend_test.exclude(r'test_lrn_cpu')
backend_test.exclude(r'test_lrn_default_cpu')
backend_test.exclude(r'test_maxpool_2d_dilations_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_pads_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_strides_cpu')
backend_test.exclude(r'test_softmax_axis_0_cpu')
backend_test.exclude(r'test_softmax_axis_1_cpu')
backend_test.exclude(r'test_softmax_default_axis_cpu')
# error cases
backend_test.exclude(r'test_averagepool_2d_ceil_cpu')
backend_test.exclude(r'test_clip_default_inbounds_cpu')
backend_test.exclude(r'test_clip_default_int8_inbounds_cpu')
backend_test.exclude(r'test_clip_default_int8_max_cpu')
backend_test.exclude(r'test_clip_default_max_cpu')
backend_test.exclude(r'test_constant_pad_cpu')
backend_test.exclude(r'test_constantofshape_float_ones_cpu')
backend_test.exclude(r'test_constantofshape_int_shape_zero_cpu')
backend_test.exclude(r'test_constantofshape_int_zeros_cpu')
backend_test.exclude(r'test_depthtospace_crd_mode_cpu')
backend_test.exclude(r'test_depthtospace_crd_mode_example_cpu')
backend_test.exclude(r'test_depthtospace_dcr_mode_cpu')
backend_test.exclude(r'test_depthtospace_example_cpu')
backend_test.exclude(r'test_equal_bcast_cpu')
backend_test.exclude(r'test_equal_cpu')
backend_test.exclude(r'test_expand_dim_changed_cpu')
backend_test.exclude(r'test_expand_dim_unchanged_cpu')
backend_test.exclude(r'test_gather_0_cpu')
backend_test.exclude(r'test_gather_1_cpu')
backend_test.exclude(r'test_gather_elements_0_cpu')
backend_test.exclude(r'test_gather_elements_1_cpu')
backend_test.exclude(r'test_gather_elements_negative_indices_cpu')
backend_test.exclude(r'test_gather_negative_indices_cpu')
backend_test.exclude(r'test_gathernd_example_float32_cpu')
backend_test.exclude(r'test_gathernd_example_int32_batch_dim1_cpu')
backend_test.exclude(r'test_gathernd_example_int32_cpu')
backend_test.exclude(r'test_greater_bcast_cpu')
backend_test.exclude(r'test_greater_cpu')
backend_test.exclude(r'test_greater_equal_bcast_cpu')
backend_test.exclude(r'test_greater_equal_bcast_expanded_cpu')
backend_test.exclude(r'test_greater_equal_cpu')
backend_test.exclude(r'test_greater_equal_expanded_cpu')
backend_test.exclude(r'test_hardsigmoid_cpu')
backend_test.exclude(r'test_hardsigmoid_default_cpu')
backend_test.exclude(r'test_hardsigmoid_example_cpu')
backend_test.exclude(r'test_less_bcast_cpu')
backend_test.exclude(r'test_less_cpu')
backend_test.exclude(r'test_less_equal_bcast_cpu')
backend_test.exclude(r'test_less_equal_bcast_expanded_cpu')
backend_test.exclude(r'test_less_equal_cpu')
backend_test.exclude(r'test_less_equal_expanded_cpu')
backend_test.exclude(r'test_max_float16_cpu')
backend_test.exclude(r'test_max_int64_cpu')
backend_test.exclude(r'test_max_uint64_cpu')
backend_test.exclude(r'test_maxpool_2d_ceil_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_mean_example_cpu')
backend_test.exclude(r'test_mean_one_input_cpu')
backend_test.exclude(r'test_mean_two_inputs_cpu')
backend_test.exclude(r'test_min_float16_cpu')
backend_test.exclude(r'test_min_int64_cpu')
backend_test.exclude(r'test_min_uint64_cpu')
backend_test.exclude(r'test_negative_log_likelihood_loss_*')
backend_test.exclude(r'test_not_2d_cpu')
backend_test.exclude(r'test_not_3d_cpu')
backend_test.exclude(r'test_not_4d_cpu')
backend_test.exclude(r'test_pow_types_*')
backend_test.exclude(r'test_selu_cpu')
backend_test.exclude(r'test_selu_default_cpu')
backend_test.exclude(r'test_selu_example_cpu')
backend_test.exclude(r'test_size_cpu')
backend_test.exclude(r'test_size_example_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_*')
backend_test.exclude(r'test_softplus_cpu')
backend_test.exclude(r'test_softplus_example_cpu')
backend_test.exclude(r'test_softsign_cpu')
backend_test.exclude(r'test_softsign_example_cpu')
backend_test.exclude(r'test_thresholdedrelu_cpu')
backend_test.exclude(r'test_thresholdedrelu_default_cpu')
backend_test.exclude(r'test_thresholdedrelu_example_cpu')
backend_test.exclude(r'test_Embedding_cpu')
backend_test.exclude(r'test_Embedding_sparse_cpu')
backend_test.exclude(r'test_Softplus_cpu')
backend_test.exclude(r'test_operator_non_float_params_cpu')
backend_test.exclude(r'test_operator_selu_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
backend_test.exclude(r'test_bvlc_alexnet_cpu')
backend_test.exclude(r'test_densenet121_cpu')
backend_test.exclude(r'test_inception_v1_cpu')
backend_test.exclude(r'test_inception_v2_cpu')
backend_test.exclude(r'test_resnet50_cpu')
backend_test.exclude(r'test_shufflenet_cpu')
backend_test.exclude(r'test_squeezenet_cpu')
backend_test.exclude(r'test_vgg19_cpu')
backend_test.exclude(r'test_zfnet512_cpu')
# import all test cases at global scope to make
# them visible to python.unittest.
globals().update(backend_test.enable_report().test_cases)
return backend_test
def parse_args():
parser = argparse.ArgumentParser(
os.path.basename(__file__),
description='Run the ONNX backend tests using MIGraphX.')
# Add an argument to match a single test name, by adding the name to the 'include' filter.
# Using -k with python unittest (https://docs.python.org/3/library/unittest.html#command-line-options)
# doesn't work as it filters on the test method name (Runner._add_model_test) rather than inidividual
# test case names.
parser.add_argument(
'-t',
'--test-name',
dest='testname',
type=str,
help=
"Only run tests that match this value. Matching is regex based, and '.*' is automatically appended"
)
parser.add_argument('-d',
'--device',
dest='device',
type=str,
help="Specify the device to run test on")
# parse just our args. python unittest has its own args and arg parsing, and that runs inside unittest.main()
args, left = parser.parse_known_args()
sys.argv = sys.argv[:1] + left
if args.device is not None:
print("run on {} device....".format(args.device))
else:
print("Default GPU device is used ....")
return args
if __name__ == '__main__':
if sys.version_info < (3, 0):
sys.exit()
args = parse_args()
backend_test = create_backend_test(args.testname, args.device)
unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment