"experiments/pyexps/farmem.py" did not exist on "32a74b8eb0183e6911e87292795f8a8968d214af"
Unverified Commit a5dab12c authored by turneram's avatar turneram Committed by GitHub
Browse files

Merge branch 'develop' into ck-flash-attn

parents 321367be c7f0fbc4
constant-of-shape:
!const_of_shape_no_value_attr_test:
6shape"Constant*#
value**B shape_tensor 
value*:B shape_tensor

shapey"ConstantOfShapeconstant_of_shapeb
shapey"ConstantOfShape!const_of_shape_no_value_attr_testb
y



B
B
\ No newline at end of file
......@@ -582,6 +582,29 @@ def cast_test():
return ([node], [x], [y])
@onnx_test()
def castlike_test():
input = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [10])
target_type = helper.make_tensor_value_info('1', TensorProto.FLOAT, [10])
output = helper.make_tensor_value_info('out', TensorProto.FLOAT, [10])
node = onnx.helper.make_node('CastLike',
inputs=['0', '1'],
outputs=['out'])
return ([node], [input, target_type], [output])
@onnx_test()
def castlike_error_test():
input = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [10])
output = helper.make_tensor_value_info('out', TensorProto.FLOAT, [10])
node = onnx.helper.make_node('CastLike', inputs=['0'], outputs=['out'])
return ([node], [input], [output])
@onnx_test()
def ceil_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......@@ -1007,9 +1030,9 @@ def const_of_shape_empty_input_test():
[10])
empty_val = np.array([]).astype(np.int64)
empty_ts = helper.make_tensor(name='empty_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=empty_val.shape,
vals=empty_val.flatten().astype(int))
vals=empty_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -1035,9 +1058,9 @@ def const_of_shape_float_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
......@@ -1055,22 +1078,44 @@ def const_of_shape_float_test():
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_default_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
outputs=['y'])
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
......@@ -1084,9 +1129,9 @@ def const_of_shape_int64_test():
def const_of_shape_no_value_attr_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -1104,6 +1149,40 @@ def const_of_shape_no_value_attr_test():
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_dyn_float_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.FLOAT, [1],
[10])
output_dims = helper.make_tensor_value_info('output_dims',
TensorProto.INT64, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['output_dims'],
outputs=['y'],
value=tensor_val)
return ([node], [output_dims], [y])
@onnx_test()
def const_of_shape_dyn_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
output_dims = helper.make_tensor_value_info('output_dims',
TensorProto.INT64, [3])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['output_dims'],
outputs=['y'],
value=tensor_val)
return ([node], [output_dims], [y])
@onnx_test()
def conv_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
......
......@@ -687,6 +687,26 @@ TEST_CASE(cast_test)
EXPECT(p == prog);
}
TEST_CASE(castlike_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {10}});
mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {10}});
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l);
auto prog = optimize_onnx("castlike_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(castlike_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("castlike_error_test.onnx"); }));
}
TEST_CASE(ceil_test)
{
migraphx::program p;
......@@ -1040,11 +1060,25 @@ TEST_CASE(constant_one_val_int64_test)
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape output_dims_shape(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(output_dims_shape, {2, 3, 4}));
migraphx::shape output_shape{migraphx::shape::float_type, {2, 3, 4}};
std::vector<float> vec(output_shape.elements(), 0.0);
mm->add_literal(migraphx::literal(output_shape, vec));
auto prog = optimize_onnx("const_of_shape_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_empty_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal(migraphx::shape::int32_type));
mm->add_literal(migraphx::literal(migraphx::shape::int64_type));
migraphx::shape s(migraphx::shape::int64_type, {1}, {0});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -1057,7 +1091,7 @@ TEST_CASE(const_of_shape_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
std::vector<float> vec(s.elements(), 10.0f);
......@@ -1071,8 +1105,10 @@ TEST_CASE(const_of_shape_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
// output_dims
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
// constant shape literal
migraphx::shape s(migraphx::shape::int64_type, {2, 3, 4});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -1085,7 +1121,7 @@ TEST_CASE(const_of_shape_no_value_attr_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
std::vector<float> vec(s.elements(), 0.0f);
......@@ -1095,6 +1131,42 @@ TEST_CASE(const_of_shape_no_value_attr_test)
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_dyn_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto od_param =
mm->add_parameter("output_dims", migraphx::shape{migraphx::shape::int64_type, {3}});
auto alloc_ins = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}), od_param);
migraphx::shape dv_shape(migraphx::shape::float_type, {1}, {0});
auto dv_lit = mm->add_literal(migraphx::literal(dv_shape, {10}));
auto fill_ins = mm->add_instruction(migraphx::make_op("fill"), dv_lit, alloc_ins);
mm->add_return({fill_ins});
migraphx::onnx_options options;
auto prog = parse_onnx("const_of_shape_dyn_float_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_dyn_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto od_param =
mm->add_parameter("output_dims", migraphx::shape{migraphx::shape::int64_type, {3}});
auto alloc_ins = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::int64_type}}), od_param);
migraphx::shape dv_shape(migraphx::shape::int64_type, {1}, {0});
auto dv_lit = mm->add_literal(migraphx::literal(dv_shape, {10}));
auto fill_ins = mm->add_instruction(migraphx::make_op("fill"), dv_lit, alloc_ins);
mm->add_return({fill_ins});
migraphx::onnx_options options;
auto prog = parse_onnx("const_of_shape_dyn_int64_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_autopad_fail_test)
{
EXPECT(test::throws([&] { optimize_onnx("conv_autopad_fail_test.onnx"); }));
......
This diff is collapsed.
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -23,10 +23,55 @@
#####################################################################################
import migraphx
import ctypes
import os
import glob
def test_conv_relu():
hip = ctypes.cdll.LoadLibrary("libamdhip64.so")
# Full path of the library is needed to fix an issue on sles
# where the library is not loaded otherwise.
# We check for the presence of library at the following paths,
# in the order listed below:
#
# 1. 'rocm_path' environment variable
# 2. /opt/rocm
# 3. /opt/rocm-*
#
# If the library is not found at any of these paths, we fall back
# to the library path being detected automatically.
library = "libamdhip64.so"
# Environment variable containing path to rocm
rocm_path_env_var = "rocm_path"
# Check for rocm_path, default to /opt/rocm if it does not exist.
rocm_path_var = os.getenv(rocm_path_env_var, default="/opt/rocm")
# Join the paths to the library to get full path,
# e.g. /opt/rocm/lib/libamdhip64.so
library_file = os.path.join(rocm_path_var, "lib", library)
# Check if the library file exists at the specified path
if os.path.exists(library_file):
# Replace library name by full path to the library
library = library_file
else:
# Pattern match to look for path to different
# rocm versions: /opt/rocm-*
rocm_path_pattern = "/opt/rocm-*/lib/libamdhip64.so"
matching_libraries = glob.glob(rocm_path_pattern)
if matching_libraries:
# Replace library name by full path to the first
# library found.
library = matching_libraries[0]
# Loads library either by using the full path to the
# library, if it has been detected earlier,
# or, proceeds to load the library based on the name
# of the library.
hip = ctypes.cdll.LoadLibrary(library)
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
......
......@@ -82,6 +82,27 @@ def parse_args():
default=False,
help='Turn on ort VERBOSE logging via session options')
parser.add_argument(
'--disable-offload-copy',
dest="offload_copy",
action='store_false',
default=True,
help=
'Disable offload copying (user must handle copy to and from device)')
parser.add_argument(
'--disable-fast-math',
dest="fast_math",
action='store_false',
default=True,
help='Disable fast math optimizations (etc: rewrite_gelu)')
parser.add_argument('--exhaustive_tune',
dest="exhaustive_tune",
action='store_true',
default=False,
help='Enable exhaustive tuning for solutions')
args = parser.parse_args()
return args
......@@ -177,7 +198,12 @@ def main():
print(model)
if not args.ort_run:
model.compile(migraphx.get_target(args.target))
model.compile(
migraphx.get_target(args.target),
offload_copy=args.offload_copy,
fast_math=args.fast_math,
exhaustive_tune=args.exhaustive_tune,
)
params = {}
test_inputs = {}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment