"...sambert-hifigan_pytorch.git" did not exist on "8b4e9acd84d8fec4a11600ef7f77e89c8f4d6520"
Commit cf85b4c6 authored by kahmed10's avatar kahmed10 Committed by mvermeulen
Browse files

Conv transpose op (#429)



* initial testing

* add shape op

* formatting

* add env variable for batch sizes

* formatting

* progress on driver

* progress on driver

* cleanup

* cleanup

* add and modified prev tests

* formatting

* remove comment

* add shape op test

* formatting

* manually insert shape op in test

* formatting

* create options struct for parsers

* formatting

* Add documentation for python

* Fix c++ documentaion

* add documentation to parser

* formatting

* add argmin and tests

* fix doc and definitions

* formatting

* revert test functions

* formatting

* cpu impl of conv_transpose

* more work on conv_transpose

* rename files, added extratests

* formatting

* add more tests

* formatting

* changes

* fix tests

* fix tidy

* formatting

* fixed function parameter

* fix function parameter

* add cpu ops test

* formatting
Co-authored-by: default avatarPaul Fultz II <pfultz2@yahoo.com>
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent 12ff54a7
#ifndef MIGRAPHX_GUARD_OPERATORS_DECONVOLUTION_HPP
#define MIGRAPHX_GUARD_OPERATORS_DECONVOLUTION_HPP
#include <array>
#include <migraphx/op/common.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/streamutils.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <cmath>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct deconvolution
{
std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> dilation = {{1, 1}};
padding_mode_t padding_mode = default_;
int group = 1;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.padding, "padding"),
f(self.stride, "stride"),
f(self.dilation, "dilation"),
f(self.padding_mode, "padding_mode"),
f(self.group, "group"));
}
std::string name() const { return "deconvolution"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(2).same_type().same_ndims().only_dims(4);
const shape& input = inputs.at(0);
const shape& weights = inputs.at(1);
auto t = input.type();
return {t,
{
input.lens()[0],
weights.lens()[1],
std::size_t(std::max<std::ptrdiff_t>(
1,
stride[0] * (input.lens()[2] - 1) +
((weights.lens()[2] - 1) * dilation[0] + 1) - 2 * padding[0])),
std::size_t(std::max<std::ptrdiff_t>(
1,
stride[1] * (input.lens()[3] - 1) +
((weights.lens()[3] - 1) * dilation[1] + 1) - 2 * padding[1])),
}};
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -23,6 +23,7 @@
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/cosh.hpp>
#include <migraphx/op/cos.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/div.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp>
......
......@@ -82,6 +82,7 @@ struct onnx_parser
add_mem_op("Expand", &onnx_parser::parse_expand);
add_mem_op("Constant", &onnx_parser::parse_constant);
add_mem_op("Conv", &onnx_parser::parse_conv);
add_mem_op("ConvTranspose", &onnx_parser::parse_conv_transpose);
add_mem_op("MaxPool", &onnx_parser::parse_pooling);
add_mem_op("AveragePool", &onnx_parser::parse_pooling);
add_mem_op("GlobalMaxPool", &onnx_parser::parse_pooling);
......@@ -277,6 +278,25 @@ struct onnx_parser
});
}
template <class T>
std::vector<int64_t> to_int64_vector(const std::vector<T>& input_vector)
{
std::vector<int64_t> output_vector(input_vector.begin(), input_vector.end());
return output_vector;
}
instruction_ref
add_bias(const std::vector<instruction_ref>& args, instruction_ref curr_ins, uint64_t axis)
{
if(args.size() == 3)
{
auto bias_bcast =
prog.add_instruction(op::broadcast{axis, curr_ins->get_shape().lens()}, args[2]);
return prog.add_instruction(op::add{}, curr_ins, bias_bcast);
}
return curr_ins;
}
instruction_ref parse_clip(const std::string&,
const attribute_map& attributes,
std::vector<instruction_ref> args)
......@@ -452,14 +472,114 @@ struct onnx_parser
{
op.group = parse_value(attributes.at("group")).at<int>();
}
if(args.size() == 3)
auto l1 = prog.add_instruction(op, l0, args[1]);
return add_bias(args, l1, 1);
}
instruction_ref parse_conv_transpose(const std::string&,
attribute_map attributes,
std::vector<instruction_ref> args)
{
op::deconvolution op;
auto l0 = args[0];
std::vector<std::int64_t> padding;
bool asymm_padding = false;
if(contains(attributes, "pads"))
{
if(contains(attributes, "auto_pad"))
{
auto s = attributes["auto_pad"].s();
if(contains(attributes, "pads") and to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
}
}
copy(attributes["pads"].ints(), std::back_inserter(padding));
if(padding.size() != 4)
{
MIGRAPHX_THROW("padding should have 4 values");
}
if(padding[0] != padding[2] || padding[1] != padding[3])
{
asymm_padding = true;
}
else
{
op.padding[0] = padding[0];
op.padding[1] = padding[1];
}
}
if(contains(attributes, "strides"))
{
copy(attributes["strides"].ints(), op.stride.begin());
}
if(contains(attributes, "dilations"))
{
copy(attributes["dilations"].ints(), op.dilation.begin());
}
if(contains(attributes, "auto_pad"))
{
auto s = attributes["auto_pad"].s();
if(contains(attributes, "pads") and to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
}
if(s.find("SAME") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::same;
}
}
if(contains(attributes, "group"))
{
op.group = parse_value(attributes.at("group")).at<int>();
}
auto l1 = prog.add_instruction(op, l0, args[1]);
std::vector<int64_t> dims = to_int64_vector(l1->get_shape().lens());
std::vector<int64_t> curr_shape{dims[2], dims[3]};
if(asymm_padding)
{
uint64_t axis = 1;
auto l1 = prog.add_instruction(op, l0, args[1]);
auto l2 = prog.add_instruction(op::broadcast{axis, l1->get_shape().lens()}, args[2]);
return prog.add_instruction(op::add{}, l1, l2);
op::slice slice_op;
slice_op.axes = {0, 1, 2, 3};
slice_op.starts = {0, 0, 0 + padding[0], 0 + padding[1]};
slice_op.ends = {
dims[0], dims[1], curr_shape[0] - padding[2], curr_shape[1] - padding[3]};
l1 = prog.add_instruction(slice_op, l1);
}
if(contains(attributes, "output_padding"))
{
std::vector<int64_t> output_padding;
copy(attributes["output_padding"].ints(), std::back_inserter(output_padding));
output_padding = {0, 0, 0, 0, 0, 0, output_padding[0], output_padding[1]};
l1 = prog.add_instruction(op::pad{output_padding}, l1);
}
if(contains(attributes, "output_shape"))
{
std::vector<int64_t> output_shape;
copy(attributes["output_shape"].ints(), std::back_inserter(output_shape));
dims = to_int64_vector(l1->get_shape().lens());
curr_shape = {dims[2], dims[3]};
if(curr_shape != output_shape)
{
std::vector<int64_t> target_padding = {0,
0,
0,
0,
0,
0,
output_shape[0] - curr_shape[0],
output_shape[1] - curr_shape[1]};
l1 = prog.add_instruction(op::pad{target_padding}, l1);
}
}
return prog.add_instruction(op, l0, args[1]);
return add_bias(args, l1, 1);
}
instruction_ref parse_pooling(const std::string& name,
......
......@@ -4,6 +4,7 @@
#include <migraphx/dfor.hpp>
#include <migraphx/op/batch_norm.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/quant_dot.hpp>
......@@ -221,6 +222,67 @@ struct cpu_convolution
}
};
template <class Op>
struct cpu_deconvolution
{
Op op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "cpu::" + op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument result{output_shape};
visit_all(result, args[0], args[1])([&](auto output, auto input, auto weights) {
using type = typename decltype(output)::value_type;
std::fill(output.begin(), output.end(), type{0});
auto out_lens = output_shape.lens();
auto out_h = out_lens[2];
auto out_w = out_lens[3];
auto in = input.get_shape().lens();
auto in_n = in[0];
auto in_c = in[1];
auto in_h = in[2];
auto in_w = in[3];
auto wei = weights.get_shape().lens();
auto wei_n = wei[0];
auto wei_c = wei[1];
auto wei_h = wei[2];
auto wei_w = wei[3];
par_dfor(in_n, wei_c)([&](std::size_t o, std::size_t k) {
dfor(in_c, in_h, in_w, wei_h, wei_w)(
[&](std::size_t w, std::size_t i, std::size_t j, std::size_t x, std::size_t y) {
const int start_x = i * op.stride[0] - op.padding[0];
const int start_y = j * op.stride[1] - op.padding[1];
const int out_x = start_x + x * op.dilation[0];
const int out_y = start_y + y * op.dilation[1];
const auto group_id = w / (wei_n / op.group);
const auto in_ch = group_id * wei_c + k;
if(out_x >= 0 && out_x < out_h && out_y >= 0 && out_y < out_w)
{
output(o, in_ch, out_x, out_y) +=
input(o, w, i, j) * weights(w, k, x, y);
}
});
});
});
return result;
}
};
struct cpu_im2col
{
op::im2col op;
......@@ -666,8 +728,10 @@ struct cpu_apply
apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["convolution"] = extend_op<cpu_convolution<op::convolution>, op::convolution>();
apply_map["dot"] = extend_op<cpu_gemm, op::dot>();
apply_map["quant_dot"] = extend_op<cpu_quant_gemm, op::quant_dot>();
apply_map["deconvolution"] =
extend_op<cpu_deconvolution<op::deconvolution>, op::deconvolution>();
apply_map["dot"] = extend_op<cpu_gemm, op::dot>();
apply_map["quant_dot"] = extend_op<cpu_quant_gemm, op::quant_dot>();
apply_map["quant_convolution"] =
extend_op<cpu_convolution<op::quant_convolution>, op::quant_convolution>();
apply_map["elu"] = extend_op<cpu_unary<elu_op>, op::elu>();
......
......@@ -80,6 +80,7 @@ add_library(migraphx_gpu
lowering.cpp
pooling.cpp
convolution.cpp
deconvolution.cpp
quant_convolution.cpp
softmax.cpp
logsoftmax.cpp
......
#include <migraphx/gpu/deconvolution.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/generate.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape miopen_deconvolution::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(4).standard();
return op.compute_shape({inputs.at(0), inputs.at(1)});
}
argument miopen_deconvolution::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
auto x_desc = make_tensor(args[0].get_shape());
auto w_desc = make_tensor(args[1].get_shape());
auto y_desc = make_tensor(output_shape);
float alpha = 1;
float beta = 0;
auto status = miopenConvolutionForward(ctx.get_stream().get_miopen(),
&alpha,
x_desc.get(),
args[0].implicit(),
w_desc.get(),
args[1].implicit(),
cd.get(),
algo,
&beta,
y_desc.get(),
args[3].implicit(),
args[2].implicit(),
args[2].get_shape().bytes());
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("Running deconvolution failed");
return args[3];
}
shape miopen_deconvolution::compile(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
{
shape workspace_shape{};
auto x_desc = make_tensor(inputs[0]);
auto w_desc = make_tensor(inputs[1]);
auto y_desc = make_tensor(output_shape);
std::size_t workspace_size = 0;
miopenConvolutionForwardGetWorkSpaceSize(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
&workspace_size);
workspace_shape = shape{shape::int8_type, {workspace_size}};
auto x = to_gpu(generate_argument(inputs[0]));
auto w = to_gpu(generate_argument(inputs[1]));
auto y = allocate_gpu(output_shape);
auto workspace = allocate_gpu(workspace_shape);
int algo_count = 1;
miopenConvAlgoPerf_t perf;
auto status = miopenFindConvolutionForwardAlgorithm(ctx.get_stream().get_miopen(),
x_desc.get(),
x.implicit(),
w_desc.get(),
w.implicit(),
cd.get(),
y_desc.get(),
y.implicit(),
1,
&algo_count,
&perf,
workspace.implicit(),
workspace_size,
false);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("Find deconvolution failed");
handle = ctx.get_stream().get_miopen();
algo = perf.fwd_algo;
return shape{shape::int8_type, {perf.memory}};
}
void miopen_deconvolution::finalize(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
{
if(handle == ctx.get_stream().get_miopen())
return;
// Check that workspace hasn't changed
auto size = inputs.at(2).bytes();
auto ws = compile(ctx, output_shape, std::move(inputs));
if(ws.bytes() > size)
MIGRAPHX_THROW("Workspace has changed during finalization.");
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_DECONVOLUTION_HPP
#define MIGRAPHX_GUARD_RTGLIB_DECONVOLUTION_HPP
#include <migraphx/shape.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
struct miopen_deconvolution
{
op::deconvolution op;
shared<convolution_descriptor> cd;
miopenConvFwdAlgorithm_t algo{};
miopenHandle_t handle = nullptr;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
// TODO: Add algo
return op::convolution::reflect(self.op, f);
}
std::string name() const { return "gpu::deconv"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
shape compile(context& ctx, const shape& output_shape, std::vector<shape> inputs);
void finalize(context& ctx, const shape& output_shape, std::vector<shape> inputs);
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -94,6 +94,24 @@ inline convolution_descriptor make_conv(const T& op)
return c;
}
template <class T>
inline convolution_descriptor make_deconv(const T& op)
{
auto c = make_obj<convolution_descriptor>(&miopenCreateConvolutionDescriptor);
miopenConvolutionMode_t c_mode = miopenTranspose;
miopenInitConvolutionDescriptor(c.get(),
c_mode,
op.padding[0],
op.padding[1],
op.stride[0],
op.stride[1],
op.dilation[0],
op.dilation[1]);
if(op.group > 1)
miopenSetConvolutionGroupCount(c.get(), op.group);
return c;
}
inline pooling_descriptor make_pooling(const migraphx::op::pooling& op)
{
miopenPoolingMode_t mode;
......
......@@ -16,6 +16,7 @@
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/deconvolution.hpp>
#include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/contiguous.hpp>
#include <migraphx/gpu/relu.hpp>
......@@ -155,6 +156,7 @@ struct miopen_apply
add_lrn_op();
add_convolution_op();
add_deconvolution_op();
add_quant_convolution_op();
add_pooling_op();
add_batch_norm_inference_op();
......@@ -220,6 +222,22 @@ struct miopen_apply
});
}
void add_deconvolution_op()
{
apply_map.emplace("deconvolution", [=](instruction_ref ins) {
auto&& op = any_cast<op::deconvolution>(ins->get_operator());
auto conv = miopen_deconvolution{op, make_deconv(op)};
auto ws = conv.compile(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
auto workspace = insert_allocation(ins, ws, "workspace");
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
});
}
template <class Op>
void add_gemm_op(std::string name)
{
......
......@@ -1711,6 +1711,28 @@ TEST_CASE(quant_conv2d_padding_stride_test)
EXPECT(migraphx::verify_range(results_vector, s));
}
TEST_CASE(deconv_test)
{
migraphx::shape s{migraphx::shape::float_type, {1, 1, 3, 3}};
std::vector<float> x_data{0, 1, 2, 3, 4, 5, 6, 7, 8};
std::vector<float> w_data{1, 1, 1, 1, 1, 1, 1, 1, 1};
std::vector<float> gold{0, 1, 3, 3, 2, 3, 8, 15, 12, 7, 9, 21, 36,
27, 15, 9, 20, 33, 24, 13, 6, 13, 21, 15, 8};
migraphx::program p;
auto x = p.add_literal(migraphx::literal{s, x_data});
auto w = p.add_literal(migraphx::literal{s, w_data});
p.add_instruction(migraphx::op::deconvolution{}, x, w);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(transpose_test)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 2, 2, 3}};
......
constant-scalar-example:R
constant_scalar_test:Y
00"Constant*!
value**B const_tensor  test-constantb
value**B const_tensor constant_scalar_testb
0

......
 conv-example:­
conv_bias_test:²
8
0
1
23"Conv*
dilations@@ *
strides@@  test_convZ
strides@@ conv_bias_testZ
0


......
deconv_bias_test:ž
"
x
w
byconv1" ConvTransposedeconv_bias_testZ
x




Z
w




Z
b

b
y




B
deconv_input_pads_strides_test:¶
=
x
wy" ConvTranspose*
pads@@@@ *
strides@@ deconv_input_pads_strides_testZ
x




Z
w




b
y




B
deconv_input_pads_test:®
=
x
wy" ConvTranspose*
pads@@@@ *
strides@@ deconv_input_pads_testZ
x




Z
w




b
y




B
deconv_output_padding_test:
C
x
wy" ConvTranspose*
output_padding@@*
strides@@deconv_output_padding_testZ
x




Z
w




b
y




B
deconv_output_shape_test:
A
x
wy" ConvTranspose*
output_shape@
@*
strides@@deconv_output_shape_testZ
x




Z
w




b
y




B
 deconv_test:…

x
wyconv1" ConvTranspose deconv_testZ
x




Z
w




b
y




B
......@@ -600,6 +600,109 @@ def cosh_test():
return ([node], [x], [y])
@onnx_test
def deconv_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('ConvTranspose',
name='conv1',
inputs=['x', 'w'],
outputs=['y'])
return ([node], [x, w], [y])
@onnx_test
def deconv_bias_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3])
b = helper.make_tensor_value_info('b', TensorProto.FLOAT, [1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('ConvTranspose',
name='conv1',
inputs=['x', 'w', 'b'],
outputs=['y'])
return ([node], [x, w, b], [y])
@onnx_test
def deconv_input_pads_strides_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 5])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
pads=[1, 1, 1, 1])
return ([node], [x, w], [y])
@onnx_test
def deconv_input_pads_asymm_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 8, 6])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
pads=[0, 0, 1, 1])
return ([node], [x, w], [y])
@onnx_test
def deconv_output_shape_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
output_shape=[10, 8])
return ([node], [x, w], [y])
@onnx_test
def deconv_output_padding_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
output_padding=[1, 1])
return ([node], [x, w], [y])
@onnx_test
def deconv_stride_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 3])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2])
return ([node], [x, w], [y])
@onnx_test
def dropout_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 2, 2])
......@@ -1449,7 +1552,7 @@ def shape_test():
@onnx_test
def shape_gather_test():
values = np.array([1])
value = helper.make_tensor_value_info('value', TensorProto.INT32, [1])
# value = helper.make_tensor_value_info('value', TensorProto.INT32, [1])
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [7, 3, 10])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [3])
z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [1])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment