Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
3d57cfed
Unverified
Commit
3d57cfed
authored
Oct 31, 2018
by
Paul Fultz II
Committed by
GitHub
Oct 31, 2018
Browse files
Merge pull request #97 from ROCmSoftwarePlatform/imagescaler
Imagescaler
parents
bfd8f6f9
1095ee28
Changes
20
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
328 additions
and
14 deletions
+328
-14
src/dead_code_elimination.cpp
src/dead_code_elimination.cpp
+27
-3
src/include/migraph/operation.hpp
src/include/migraph/operation.hpp
+0
-1
src/include/migraph/operators.hpp
src/include/migraph/operators.hpp
+21
-4
src/include/migraph/shape.hpp
src/include/migraph/shape.hpp
+2
-0
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+30
-1
src/opt/memory_coloring_impl.cpp
src/opt/memory_coloring_impl.cpp
+1
-0
src/shape.cpp
src/shape.cpp
+9
-2
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+3
-1
src/targets/gpu/device/include/migraph/gpu/device/nary.hpp
src/targets/gpu/device/include/migraph/gpu/device/nary.hpp
+2
-1
src/targets/gpu/device/mul.cpp
src/targets/gpu/device/mul.cpp
+24
-0
src/targets/gpu/include/migraph/gpu/device/mul.hpp
src/targets/gpu/include/migraph/gpu/device/mul.hpp
+24
-0
src/targets/gpu/include/migraph/gpu/mul.hpp
src/targets/gpu/include/migraph/gpu/mul.hpp
+34
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+12
-0
src/targets/gpu/mul.cpp
src/targets/gpu/mul.cpp
+25
-0
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+48
-0
test/dead_code_elimination_test.cpp
test/dead_code_elimination_test.cpp
+17
-0
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+29
-0
test/onnx/imagescaler_test.onnx
test/onnx/imagescaler_test.onnx
+0
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+19
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+1
-1
No files found.
src/dead_code_elimination.cpp
View file @
3d57cfed
...
...
@@ -3,9 +3,30 @@
#include <migraph/instruction.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/functional.hpp>
#include <migraph/ranges.hpp>
namespace
migraph
{
template
<
class
Range
,
class
Iterator
>
std
::
ptrdiff_t
bidistance
(
const
Range
&
r
,
Iterator
start
,
Iterator
last
)
{
auto
start_forward
=
start
;
auto
start_backwards
=
start
;
std
::
size_t
n
=
0
;
while
(
start_forward
!=
last
and
start_backwards
!=
last
)
{
n
++
;
if
(
start_forward
!=
r
.
end
())
start_forward
++
;
if
(
start_backwards
!=
r
.
begin
())
start_backwards
--
;
}
if
(
start_forward
==
last
)
return
n
;
else
return
-
n
;
}
void
dead_code_elimination
::
apply
(
program
&
p
)
const
{
auto
last
=
std
::
prev
(
p
.
end
());
...
...
@@ -16,18 +37,21 @@ void dead_code_elimination::apply(program& p) const
if
(
ins
==
p
.
begin
())
continue
;
const
auto
i
=
std
::
prev
(
ins
);
// Skip instruction with empty shape as output unless its a builtin
if
(
i
->
get_shape
().
elements
()
==
0
and
not
(
i
->
name
().
front
()
==
'@'
))
continue
;
// Skip the last instruction
if
(
i
==
last
)
break
;
// Skip instruction with empty shape as output unless its a builtin
if
(
i
->
get_shape
().
elements
()
==
0
and
not
(
i
->
name
().
front
()
==
'@'
))
continue
;
assert
(
bidistance
(
p
,
i
,
last
)
>
0
);
fix
([
&
](
auto
self
,
auto
leaf
)
{
assert
(
p
.
has_instruction
(
leaf
));
if
(
leaf
->
outputs
().
empty
())
{
auto
args
=
leaf
->
inputs
();
leaf
->
clear_arguments
();
assert
(
bidistance
(
p
,
last
,
leaf
)
<
0
);
assert
(
leaf
!=
ins
);
p
.
move_instruction
(
leaf
,
p
.
end
());
for
(
auto
arg
:
args
)
self
(
arg
);
...
...
src/include/migraph/operation.hpp
View file @
3d57cfed
...
...
@@ -256,7 +256,6 @@ struct operation
shape
compute_shape
(
const
std
::
vector
<
shape
>&
input
)
const
override
{
return
private_detail_te_value
.
compute_shape
(
input
);
}
...
...
src/include/migraph/operators.hpp
View file @
3d57cfed
...
...
@@ -306,10 +306,6 @@ struct contiguous
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
auto
lens
=
inputs
.
at
(
0
).
lens
();
auto
t
=
inputs
.
at
(
0
).
type
();
if
(
lens
.
size
()
<
2
)
{
MIGRAPH_THROW
(
"Number of dimensions should exceed 1"
);
}
return
{
t
,
lens
};
}
};
...
...
@@ -761,6 +757,27 @@ struct broadcast
}
};
struct
scalar
{
shape
scalar_bcast
;
std
::
string
name
()
const
{
return
"scalar"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
assert
(
check_shapes
{
inputs
}.
has
(
1
).
only_dims
(
1
).
size
()
==
1
);
auto
t
=
inputs
.
at
(
0
).
type
();
auto
input
=
inputs
.
at
(
0
);
std
::
vector
<
std
::
size_t
>
strides
(
scalar_bcast
.
lens
().
size
(),
0
);
return
{
t
,
scalar_bcast
.
lens
(),
strides
};
}
argument
compute
(
context
&
,
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
return
{
std
::
move
(
output_shape
),
std
::
move
(
args
.
at
(
0
).
data
)};
}
};
struct
binary
{
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
...
...
src/include/migraph/shape.hpp
View file @
3d57cfed
...
...
@@ -92,6 +92,8 @@ struct shape
/// Returns true if the shape is in its standard format. That is, the shape is both packed and
/// not transposed.
bool
standard
()
const
;
/// Returns true if all strides are equal to 0 (scalar tensor)
bool
scalar
()
const
;
friend
bool
operator
==
(
const
shape
&
x
,
const
shape
&
y
);
friend
bool
operator
!=
(
const
shape
&
x
,
const
shape
&
y
);
...
...
src/onnx/onnx.cpp
View file @
3d57cfed
...
...
@@ -56,6 +56,7 @@ struct onnx_parser
add_generic_op
(
"Sub"
,
op
::
sub
{});
add_generic_op
(
"Sum"
,
op
::
add
{});
add_mem_op
(
"ImageScaler"
,
&
onnx_parser
::
parse_imagescaler
);
add_mem_op
(
"LeakyRelu"
,
&
onnx_parser
::
parse_leaky_relu
);
add_mem_op
(
"Constant"
,
&
onnx_parser
::
parse_constant
);
add_mem_op
(
"Conv"
,
&
onnx_parser
::
parse_conv
);
...
...
@@ -315,7 +316,7 @@ struct onnx_parser
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
float
alpha
=
0.01
;
float
alpha
=
0.01
;
// default alpha val for leaky relu
if
(
contains
(
attributes
,
"alpha"
))
{
alpha
=
parse_value
(
attributes
.
at
(
"alpha"
)).
at
<
float
>
();
...
...
@@ -324,6 +325,34 @@ struct onnx_parser
return
prog
.
add_instruction
(
op
,
args
.
front
());
}
instruction_ref
parse_imagescaler
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
float
scale
=
1.0
;
std
::
vector
<
float
>
bias
{};
if
(
contains
(
attributes
,
"scale"
))
{
scale
=
parse_value
(
attributes
.
at
(
"scale"
)).
at
<
float
>
();
}
if
(
contains
(
attributes
,
"bias"
))
{
auto
&&
bias_floats
=
attributes
[
"bias"
].
floats
();
bias
=
std
::
vector
<
float
>
(
bias_floats
.
begin
(),
bias_floats
.
end
());
}
auto
input_shape
=
args
.
front
()
->
get_shape
();
auto
scale_val
=
prog
.
add_literal
(
scale
);
auto
bias_vals
=
prog
.
add_literal
(
migraph
::
literal
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
bias
.
size
()}},
bias
});
auto
scale_tensor
=
prog
.
add_instruction
(
migraph
::
op
::
scalar
{
input_shape
},
scale_val
);
auto
img_scaled
=
prog
.
add_instruction
(
migraph
::
op
::
mul
{},
args
.
front
(),
scale_tensor
);
auto
bias_bcast
=
prog
.
add_instruction
(
migraph
::
op
::
broadcast
{
1
,
input_shape
},
bias_vals
);
return
prog
.
add_instruction
(
migraph
::
op
::
add
{},
img_scaled
,
bias_bcast
);
}
void
parse_from
(
std
::
istream
&
is
)
{
onnx
::
ModelProto
model
;
...
...
src/opt/memory_coloring_impl.cpp
View file @
3d57cfed
...
...
@@ -195,6 +195,7 @@ void memory_coloring_impl::register_operand_alias()
operand_alias
[
"broadcast"
]
=
0
;
operand_alias
[
"reshape"
]
=
0
;
operand_alias
[
"pass"
]
=
0
;
operand_alias
[
"scalar"
]
=
0
;
}
void
memory_coloring_impl
::
rewrite
()
...
...
src/shape.cpp
View file @
3d57cfed
...
...
@@ -29,8 +29,8 @@ struct shape_impl
:
m_type
(
t
),
m_lens
(
std
::
move
(
l
)),
m_strides
(
std
::
move
(
s
))
{
assert
(
m_lens
.
size
()
==
m_strides
.
size
());
assert
(
std
::
any_of
(
m_strides
.
begin
(),
m_strides
.
end
(),
[](
auto
x
)
{
return
x
>
0
;
})
and
"At least one stride must be non-zero"
);
//
assert(std::any_of(m_strides.begin(), m_strides.end(), [](auto x) { return x > 0; }) and
//
"At least one stride must be non-zero");
m_standard
=
this
->
elements
()
==
this
->
element_space
()
and
std
::
is_sorted
(
m_strides
.
rbegin
(),
m_strides
.
rend
());
}
...
...
@@ -153,6 +153,13 @@ bool shape::broadcasted() const
std
::
multiplies
<
std
::
size_t
>
())
==
0
;
}
bool
shape
::
scalar
()
const
{
assert
(
this
->
lens
().
size
()
==
this
->
strides
().
size
());
// if any stride > 0, then accumulate will return false
return
std
::
accumulate
(
this
->
strides
().
begin
(),
this
->
strides
().
end
(),
std
::
size_t
(
0
))
==
0
;
}
bool
shape
::
standard
()
const
{
return
impl
->
m_standard
;
}
std
::
size_t
shape
::
element_space
()
const
{
return
impl
->
element_space
();
}
...
...
src/targets/gpu/CMakeLists.txt
View file @
3d57cfed
...
...
@@ -14,6 +14,7 @@ add_library(migraph_device
device/add.cpp
device/add_relu.cpp
device/contiguous.cpp
device/mul.cpp
device/concat.cpp
)
rocm_clang_tidy_check
(
migraph_device
)
...
...
@@ -36,6 +37,7 @@ add_library(migraph_gpu
relu.cpp
leaky_relu.cpp
add.cpp
mul.cpp
batchnorm.cpp
write_literals.cpp
rocblas.cpp
...
...
src/targets/gpu/device/include/migraph/gpu/device/nary.hpp
View file @
3d57cfed
...
...
@@ -333,7 +333,8 @@ nary(hipStream_t stream, const argument& result, const argument& arg1, const arg
{
return
[
=
](
auto
f
)
{
// TODO: Check result and arg1 shape is the same
if
(
arg1
.
get_shape
().
standard
()
and
arg2
.
get_shape
().
broadcasted
())
if
(
arg1
.
get_shape
().
standard
()
and
arg2
.
get_shape
().
broadcasted
()
and
not
arg2
.
get_shape
().
scalar
())
{
auto
not_zero
=
[](
auto
x
)
{
return
x
!=
0
;
};
const
auto
&
strides
=
arg2
.
get_shape
().
strides
();
...
...
src/targets/gpu/device/mul.cpp
0 → 100644
View file @
3d57cfed
#include <migraph/gpu/device/mul.hpp>
#include <migraph/gpu/device/nary.hpp>
namespace
migraph
{
namespace
gpu
{
namespace
device
{
void
mul
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg1
,
const
argument
&
arg2
)
{
nary
(
stream
,
result
,
arg1
,
arg2
)([](
auto
x
,
auto
y
)
{
return
x
*
y
;
});
}
void
mul
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg1
,
const
argument
&
arg2
,
const
argument
&
arg3
)
{
nary
(
stream
,
result
,
arg1
,
arg2
,
arg3
)([](
auto
x
,
auto
y
,
auto
z
)
{
return
x
*
y
*
z
;
});
}
}
// namespace device
}
// namespace gpu
}
// namespace migraph
src/targets/gpu/include/migraph/gpu/device/mul.hpp
0 → 100644
View file @
3d57cfed
#ifndef MIGRAPH_GUARD_RTGLIB_DEVICE_MUL_HPP
#define MIGRAPH_GUARD_RTGLIB_DEVICE_MUL_HPP
#include <migraph/argument.hpp>
#include <hip/hip_runtime_api.h>
namespace
migraph
{
namespace
gpu
{
namespace
device
{
void
mul
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg1
,
const
argument
&
arg2
);
void
mul
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg1
,
const
argument
&
arg2
,
const
argument
&
arg3
);
}
// namespace device
}
// namespace gpu
}
// namespace migraph
#endif
src/targets/gpu/include/migraph/gpu/mul.hpp
0 → 100644
View file @
3d57cfed
#ifndef MIGRAPH_GUARD_RTGLIB_MUL_HPP
#define MIGRAPH_GUARD_RTGLIB_MUL_HPP
#include <migraph/gpu/lowering.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/generate.hpp>
#include <migraph/shape_for_each.hpp>
#include <migraph/gpu/miopen.hpp>
#include <migraph/gpu/hip.hpp>
#include <migraph/dfor.hpp>
#include <migraph/gpu/device/contiguous.hpp>
#include <migraph/gpu/device/mul.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/gpu/rocblas.hpp>
#include <migraph/gpu/context.hpp>
#include <utility>
namespace
migraph
{
namespace
gpu
{
struct
hip_mul
{
std
::
string
name
()
const
{
return
"gpu::mul"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
;
};
}
// namespace gpu
}
// namespace migraph
#endif
src/targets/gpu/lowering.cpp
View file @
3d57cfed
...
...
@@ -19,6 +19,7 @@
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/gpu/softmax.hpp>
#include <migraph/gpu/add.hpp>
#include <migraph/gpu/mul.hpp>
#include <migraph/gpu/batchnorm.hpp>
#include <migraph/gpu/pooling.hpp>
#include <migraph/gpu/gemm.hpp>
...
...
@@ -65,6 +66,10 @@ struct miopen_apply
{
check_shape
(
s
,
apply_add
(
it
));
}
else
if
(
it
->
name
()
==
"mul"
)
{
check_shape
(
s
,
apply_mul
(
it
));
}
else
if
(
it
->
name
()
==
"dot"
)
{
check_shape
(
s
,
apply_gemm
(
it
));
...
...
@@ -163,6 +168,13 @@ struct miopen_apply
ins
,
hip_add
{},
ins
->
inputs
().
at
(
0
),
ins
->
inputs
().
at
(
1
),
output
);
}
instruction_ref
apply_mul
(
instruction_ref
ins
)
{
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
hip_mul
{},
ins
->
inputs
().
at
(
0
),
ins
->
inputs
().
at
(
1
),
output
);
}
instruction_ref
apply_gemm
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
dot
>
(
ins
->
get_operator
());
...
...
src/targets/gpu/mul.cpp
0 → 100644
View file @
3d57cfed
#include <migraph/gpu/mul.hpp>
#include <migraph/operators.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/gpu/miopen.hpp>
#include <utility>
namespace
migraph
{
namespace
gpu
{
shape
hip_mul
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
// check_shapes{inputs, *this}.has(3).standard();
check_shapes
{
inputs
,
*
this
}.
has
(
3
);
return
inputs
.
at
(
0
);
}
argument
hip_mul
::
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
{
device
::
mul
(
ctx
.
get_stream
().
get
(),
args
[
2
],
args
[
0
],
args
[
1
]);
return
args
[
2
];
}
}
// namespace gpu
}
// namespace migraph
test/cpu_ops_test.cpp
View file @
3d57cfed
...
...
@@ -539,6 +539,53 @@ void leaky_relu_test()
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
imagescaler_test
()
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
}};
auto
img
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
0.2
,
0.3
,
0.5
,
0.4
,
0.7
,
0.8
,
0.1
,
0.9
,
0.15
,
0.25
,
0.35
,
0.45
}});
auto
scale_val
=
p
.
add_literal
(
2.
f
);
auto
scaled_tensor
=
p
.
add_instruction
(
migraph
::
op
::
scalar
{
s
},
scale_val
);
auto
img_scaled
=
p
.
add_instruction
(
migraph
::
op
::
mul
{},
img
,
scaled_tensor
);
auto
bias_vals
=
p
.
add_literal
(
migraph
::
literal
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
3
}},
{
0.01
,
0.02
,
0.03
}});
auto
bias_bcast
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
1
,
s
},
bias_vals
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
img_scaled
,
bias_bcast
);
p
.
compile
(
migraph
::
cpu
::
cpu_target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
12
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.41
,
0.61
,
1.01
,
0.81
,
1.42
,
1.62
,
0.22
,
1.82
,
0.33
,
0.53
,
0.73
,
0.93
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
reshape_test
()
{
migraph
::
shape
a_shape
{
migraph
::
shape
::
float_type
,
{
24
,
1
,
1
,
1
}};
...
...
@@ -994,6 +1041,7 @@ int main()
add_test
();
broadcast_test
();
add_broadcast_test
();
imagescaler_test
();
sub_test
();
mul_test
();
div_test
();
...
...
test/dead_code_elimination_test.cpp
View file @
3d57cfed
...
...
@@ -43,6 +43,22 @@ void simple_test_nop()
EXPECT
(
result
!=
migraph
::
literal
{
4
});
}
void
simple_test_nop2
()
{
migraph
::
program
p
;
auto
one
=
p
.
add_literal
(
1
);
auto
two
=
p
.
add_literal
(
2
);
p
.
add_instruction
(
nop
{});
p
.
add_instruction
(
sum_op
{},
one
,
two
);
p
.
add_instruction
(
nop
{});
p
.
compile
(
dce_target
{});
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
==
2
);
auto
result
=
p
.
eval
({});
EXPECT
(
result
==
migraph
::
literal
{});
EXPECT
(
result
!=
migraph
::
literal
{
4
});
}
void
duplicate_test1
()
{
migraph
::
program
p
;
...
...
@@ -99,6 +115,7 @@ int main()
{
simple_test
();
simple_test_nop
();
simple_test_nop2
();
duplicate_test1
();
duplicate_test2
();
depth_test
();
...
...
test/gpu/miopen.cpp
View file @
3d57cfed
...
...
@@ -175,6 +175,33 @@ struct test_add
}
};
struct
test_mul
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
auto
y
=
p
.
add_parameter
(
"y"
,
s
);
p
.
add_instruction
(
migraph
::
op
::
mul
{},
x
,
y
);
return
p
;
}
};
struct
test_scale
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
auto
y
=
p
.
add_parameter
(
"y"
,
migraph
::
shape
::
float_type
);
auto
scale
=
p
.
add_instruction
(
migraph
::
op
::
scalar
{
s
},
y
);
p
.
add_instruction
(
migraph
::
op
::
mul
{},
x
,
scale
);
return
p
;
}
};
struct
test_triadd
{
migraph
::
program
create_program
()
const
...
...
@@ -653,6 +680,8 @@ int main()
verify_program
<
test_concat
>
();
verify_program
<
test_concat2
>
();
verify_program
<
test_add
>
();
verify_program
<
test_mul
>
();
verify_program
<
test_scale
>
();
verify_program
<
test_triadd
>
();
verify_program
<
test_triadd2
>
();
verify_program
<
test_add_broadcast
>
();
...
...
test/onnx/imagescaler_test.onnx
0 → 100644
View file @
3d57cfed
File added
test/onnx/onnx_test.cpp
View file @
3d57cfed
...
...
@@ -100,6 +100,24 @@ void leaky_relu_test()
EXPECT
(
p
==
prog
);
}
void
imagescaler_test
()
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}};
auto
l0
=
p
.
add_parameter
(
"0"
,
s
);
auto
scale_val
=
p
.
add_literal
(
0.5
f
);
auto
bias_vals
=
p
.
add_literal
(
migraph
::
literal
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
3
}},
{
0.01
,
0.02
,
0.03
}});
auto
scaled_tensor
=
p
.
add_instruction
(
migraph
::
op
::
scalar
{
s
},
scale_val
);
auto
img_scaled
=
p
.
add_instruction
(
migraph
::
op
::
mul
{},
l0
,
scaled_tensor
);
auto
bias_bcast
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
1
,
s
},
bias_vals
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
img_scaled
,
bias_bcast
);
auto
prog
=
migraph
::
parse_onnx
(
"imagescaler_test.onnx"
);
EXPECT
(
p
==
prog
);
}
int
main
()
{
pytorch_conv_bias_test
();
...
...
@@ -107,4 +125,5 @@ int main()
pytorch_conv_bn_relu_maxpool
();
pytorch_conv_relu_maxpool_x2
();
leaky_relu_test
();
imagescaler_test
();
}
test/op_shape_test.cpp
View file @
3d57cfed
...
...
@@ -93,7 +93,7 @@ void contiguous_shape()
throws_shape
(
migraph
::
op
::
contiguous
{},
input
,
input
);
migraph
::
shape
single
{
migraph
::
shape
::
float_type
,
{
2
}};
throws
_shape
(
migraph
::
op
::
contiguous
{},
single
);
expect
_shape
(
single
,
migraph
::
op
::
contiguous
{},
single
);
}
void
reshape_shape
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment