Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
8f568801
Commit
8f568801
authored
Oct 04, 2022
by
Paul
Browse files
Merge branch 'develop' into layout-nhwc
parents
7393cf1e
f7d987ba
Changes
259
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
913 additions
and
177 deletions
+913
-177
src/targets/gpu/rocblas.cpp
src/targets/gpu/rocblas.cpp
+33
-0
src/targets/gpu/softmax.cpp
src/targets/gpu/softmax.cpp
+0
-49
src/tf/tf_parser.cpp
src/tf/tf_parser.cpp
+1
-1
src/value.cpp
src/value.cpp
+1
-8
test/api/test_custom_op.cpp
test/api/test_custom_op.cpp
+43
-0
test/api/test_custom_op_gpu.cpp
test/api/test_custom_op_gpu.cpp
+258
-37
test/api/test_gpu.cpp
test/api/test_gpu.cpp
+60
-2
test/gpu/adjust_allocation.cpp
test/gpu/adjust_allocation.cpp
+5
-1
test/gpu/jit.cpp
test/gpu/jit.cpp
+2
-0
test/gpu/make_precompile_op.hpp
test/gpu/make_precompile_op.hpp
+66
-0
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+0
-4
test/gpu/pack_int8_args.cpp
test/gpu/pack_int8_args.cpp
+83
-74
test/gpu/stream_sync.cpp
test/gpu/stream_sync.cpp
+146
-0
test/memory_coloring_test.cpp
test/memory_coloring_test.cpp
+1
-1
test/onnx/batch_norm_1d_test.onnx
test/onnx/batch_norm_1d_test.onnx
+35
-0
test/onnx/batch_norm_2d_test.onnx
test/onnx/batch_norm_2d_test.onnx
+35
-0
test/onnx/batch_norm_3d_test.onnx
test/onnx/batch_norm_3d_test.onnx
+44
-0
test/onnx/batch_norm_flat_test.onnx
test/onnx/batch_norm_flat_test.onnx
+34
-0
test/onnx/batch_norm_invalid_bias_rank_test.onnx
test/onnx/batch_norm_invalid_bias_rank_test.onnx
+35
-0
test/onnx/batch_norm_invalid_rank_test.onnx
test/onnx/batch_norm_invalid_rank_test.onnx
+31
-0
No files found.
src/targets/gpu/rocblas.cpp
View file @
8f568801
...
...
@@ -21,7 +21,13 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <unordered_set>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -41,6 +47,33 @@ rocblas_handle_ptr create_rocblas_handle_ptr(hipStream_t s)
return
rb
;
}
const
std
::
unordered_set
<
std
::
string
>&
get_rocblas_fp32_archs
()
{
static
std
::
unordered_set
<
std
::
string
>
supported_archs
{
"gfx908"
,
"gfx90a"
};
return
supported_archs
;
}
bool
get_compute_fp32_flag
()
{
bool
compute_fp32
=
false
;
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
const
auto
device_name
=
trim
(
split_string
(
get_device_name
(),
':'
).
front
());
if
(
contains
(
get_rocblas_fp32_archs
(),
device_name
))
compute_fp32
=
true
;
#endif
return
compute_fp32
;
}
bool
get_int8_x4_format
(
context
&
ctx
)
{
bool
int8_x4_format
=
true
;
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
rocblas_gemm_flags
flag
;
rocblas_query_int8_layout_flag
(
ctx
.
get_stream
().
get_rocblas
(),
&
flag
);
int8_x4_format
=
(
flag
==
rocblas_gemm_flags_pack_int8x4
);
#endif
return
int8_x4_format
;
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/softmax.cpp
deleted
100644 → 0
View file @
7393cf1e
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/device/softmax.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/tune_axis.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
shape
hip_softmax
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
standard
();
return
op
.
normalize_compute_shape
({
inputs
.
at
(
0
)});
}
argument
hip_softmax
::
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
{
auto
n_dim
=
args
.
front
().
get_shape
().
lens
().
size
();
auto
tuned_axis
=
tune_axis
(
n_dim
,
op
.
axis
,
op
.
name
());
device
::
softmax
(
ctx
.
get_stream
().
get
(),
args
.
back
(),
args
.
front
(),
tuned_axis
);
return
args
.
back
();
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/tf/tf_parser.cpp
View file @
8f568801
...
...
@@ -347,7 +347,7 @@ void tf_parser::parse_node(const std::string& name)
// input was from a node with multiple outputs
if
(
contains
(
input_name
,
':'
))
{
input_name
=
input_name
.
substr
(
0
,
input
.
find
(
':'
));
input_name
.
resize
(
input
.
find
(
':'
));
}
else
{
...
...
src/value.cpp
View file @
8f568801
...
...
@@ -511,14 +511,7 @@ void print_value(std::ostream& os, const std::vector<value>& x)
os
<<
"}"
;
}
void
print_value
(
std
::
ostream
&
os
,
const
value
::
binary
&
x
)
{
// Convert binary to integers
std
::
vector
<
int
>
v
(
x
.
begin
(),
x
.
end
());
os
<<
"{"
;
os
<<
to_string_range
(
v
);
os
<<
"}"
;
}
void
print_value
(
std
::
ostream
&
os
,
const
value
::
binary
&
x
)
{
os
<<
x
;
}
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
value
&
d
)
{
...
...
test/api/test_custom_op.cpp
View file @
8f568801
...
...
@@ -43,6 +43,8 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base
return
inputs
[
1
];
}
virtual
bool
runs_on_offload_target
()
const
override
{
return
true
;
}
virtual
migraphx
::
shape
compute_shape
(
migraphx
::
shapes
inputs
)
const
override
{
if
(
inputs
.
size
()
!=
2
)
...
...
@@ -111,4 +113,45 @@ TEST_CASE(run_sigmoid_with_incorrect_shape)
"Error in compute_shape of: sigmoid_custom_op: op must have two inputs"
));
}
struct
identity_custom_op
final
:
migraphx
::
experimental_custom_op_base
{
virtual
std
::
string
name
()
const
override
{
return
"identity_custom_op"
;
}
virtual
migraphx
::
argument
compute
(
migraphx
::
context
,
migraphx
::
shape
,
migraphx
::
arguments
inputs
)
const
override
{
return
inputs
[
0
];
}
virtual
bool
runs_on_offload_target
()
const
override
{
return
true
;
}
virtual
migraphx
::
shape
compute_shape
(
migraphx
::
shapes
inputs
)
const
override
{
if
(
inputs
.
size
()
!=
1
)
{
throw
std
::
runtime_error
(
"Identity op must have only one input"
);
}
return
inputs
.
back
();
}
virtual
std
::
vector
<
size_t
>
output_alias
(
migraphx
::
shapes
)
const
override
{
return
{
0
,
1
};
}
};
TEST_CASE
(
run_custom_op_with_invalid_output_alias
)
{
identity_custom_op
i_op
;
migraphx
::
register_experimental_custom_op
(
i_op
);
auto
op
=
migraphx
::
operation
(
"identity_custom_op"
);
EXPECT
(
op
.
name
()
==
"identity_custom_op"
);
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx_shape_float_type
,
{
12
}};
migraphx
::
module
m
=
p
.
get_main_module
();
auto
x
=
m
.
add_parameter
(
"x"
,
s
);
auto
i_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
"identity_custom_op"
),
{
x
});
migraphx_test_private_disable_exception_catch
(
true
);
EXPECT
(
test
::
throws
<
std
::
exception
>
(
[
&
]
{
p
.
compile
(
migraphx
::
target
(
"ref"
));
},
"Currently, CustomOps in MIGraphX only supports one output_alias"
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/api/test_custom_op_gpu.cpp
View file @
8f568801
...
...
@@ -24,40 +24,89 @@
#include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include <numeric>
#include <stdexcept>
#include "test.hpp"
#define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess))
struct
simple_custom_op
final
:
migraphx
::
experimental_custom_op_base
struct
half_copy_host
final
:
migraphx
::
experimental_custom_op_base
{
virtual
std
::
string
name
()
const
override
{
return
"simple_custom_op"
;
}
virtual
std
::
string
name
()
const
override
{
return
"half_copy_host"
;
}
virtual
bool
runs_on_offload_target
()
const
override
{
return
false
;
}
virtual
migraphx
::
argument
compute
(
migraphx
::
context
ctx
,
migraphx
::
shape
,
migraphx
::
arguments
inputs
)
const
override
{
// sets first half size_bytes of the input 0, and rest of the half bytes are copied.
int
*
h_output
=
nullptr
;
auto
*
d_output
=
reinterpret_cast
<
int
*>
(
inputs
[
0
].
data
());
auto
input_bytes
=
inputs
[
0
].
get_shape
().
bytes
();
auto
*
output_ptr
=
inputs
[
1
].
data
();
auto
copy_bytes
=
input_bytes
/
2
;
// This custom op simply sets first half size_bytes of the input to 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the host. Therefore,
// `runs_on_offload_target()` is set to false. MIGraphX would inject necessary buffer copies
// to and from GPU to Host based on `runs_on_offload_targe()` flag for input buffers as well
// as the output buffers
auto
*
input_buffer_ptr
=
inputs
[
0
].
data
();
auto
*
output_buffer_ptr
=
inputs
[
1
].
data
();
auto
input_bytes
=
inputs
[
0
].
get_shape
().
bytes
();
auto
copy_bytes
=
input_bytes
/
2
;
MIGRAPHX_HIP_ASSERT
(
hipSetDevice
(
0
));
MIGRAPHX_HIP_ASSERT
(
hipHostMalloc
(
&
h_output
,
input_bytes
));
MIGRAPHX_HIP_ASSERT
(
hipMemcpyAsync
(
h_output
,
d_output
,
input_bytes
,
hipMemcpyDeviceToHost
,
ctx
.
get_queue
<
hipStream_t
>
()));
MIGRAPHX_HIP_ASSERT
(
hipMemcpyAsync
(
output_buffer_ptr
,
input_buffer_ptr
,
input_bytes
,
hipMemcpyHostToHost
,
ctx
.
get_queue
<
hipStream_t
>
()));
MIGRAPHX_HIP_ASSERT
(
hipDeviceSynchronize
());
MIGRAPHX_HIP_ASSERT
(
hipMemset
(
output_buffer_ptr
,
0
,
copy_bytes
));
MIGRAPHX_HIP_ASSERT
(
hipDeviceSynchronize
());
MIGRAPHX_HIP_ASSERT
(
hipMemset
(
h_output
,
0
,
copy_bytes
));
return
inputs
[
1
];
}
virtual
migraphx
::
shape
compute_shape
(
migraphx
::
shapes
inputs
)
const
override
{
if
(
not
inputs
[
0
].
standard
()
or
not
inputs
[
1
].
standard
())
{
throw
std
::
runtime_error
(
"Input args must be standard shaped"
);
}
if
(
inputs
.
size
()
!=
2
)
{
throw
std
::
runtime_error
(
"number of inputs must be 2"
);
}
return
inputs
.
back
();
}
};
struct
half_copy_device
final
:
migraphx
::
experimental_custom_op_base
{
virtual
std
::
string
name
()
const
override
{
return
"half_copy_device"
;
}
virtual
bool
runs_on_offload_target
()
const
override
{
return
true
;
}
virtual
migraphx
::
argument
compute
(
migraphx
::
context
ctx
,
migraphx
::
shape
,
migraphx
::
arguments
inputs
)
const
override
{
// This custom op simply sets first half size_bytes of the input to 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "GPU". Therefore,
// `runs_on_offload_target()` is set to "true".
auto
*
input_buffer_ptr
=
inputs
[
0
].
data
();
auto
*
output_buffer_ptr
=
inputs
[
1
].
data
();
auto
input_bytes
=
inputs
[
0
].
get_shape
().
bytes
();
auto
copy_bytes
=
input_bytes
/
2
;
MIGRAPHX_HIP_ASSERT
(
hipSetDevice
(
0
));
MIGRAPHX_HIP_ASSERT
(
hipMemcpyAsync
(
output_buffer_ptr
,
input_buffer_ptr
,
input_bytes
,
hipMemcpyDeviceToDevice
,
ctx
.
get_queue
<
hipStream_t
>
()));
MIGRAPHX_HIP_ASSERT
(
hipDeviceSynchronize
());
MIGRAPHX_HIP_ASSERT
(
hipMem
cpy
(
output_
ptr
,
h_output
,
input_bytes
,
hipMemcpyHostToDevice
));
MIGRAPHX_HIP_ASSERT
(
hipMem
set
(
output_
buffer_ptr
,
0
,
copy_bytes
));
MIGRAPHX_HIP_ASSERT
(
hipDeviceSynchronize
());
MIGRAPHX_HIP_ASSERT
(
hipHostFree
(
h_output
));
return
inputs
[
1
];
}
virtual
migraphx
::
shape
compute_shape
(
migraphx
::
shapes
inputs
)
const
override
{
if
(
not
inputs
[
0
].
standard
())
if
(
not
inputs
[
0
].
standard
()
or
not
inputs
[
1
].
standard
()
)
{
throw
std
::
runtime_error
(
"
firs
t arg must be standard shaped"
);
throw
std
::
runtime_error
(
"
Inpu
t arg
s
must be standard shaped"
);
}
if
(
inputs
.
size
()
!=
2
)
{
...
...
@@ -67,36 +116,208 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base
}
};
TEST_CASE
(
run_simple_custom_op
)
// overwrites input buffer
struct
half_copy_device_same_buffer
final
:
migraphx
::
experimental_custom_op_base
{
virtual
std
::
string
name
()
const
override
{
return
"half_copy_device_same_buffer"
;
}
virtual
bool
runs_on_offload_target
()
const
override
{
return
true
;
}
virtual
migraphx
::
argument
compute
(
migraphx
::
context
,
migraphx
::
shape
,
migraphx
::
arguments
inputs
)
const
override
{
// This custom op simply sets first half size_bytes of the input 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "device". Therefore,
// `runs_on_offload_target()` is set to "true"
auto
*
buffer_ptr
=
inputs
[
0
].
data
();
auto
input_bytes
=
inputs
[
0
].
get_shape
().
bytes
();
auto
copy_bytes
=
input_bytes
/
2
;
MIGRAPHX_HIP_ASSERT
(
hipSetDevice
(
0
));
MIGRAPHX_HIP_ASSERT
(
hipMemset
(
buffer_ptr
,
0
,
copy_bytes
));
MIGRAPHX_HIP_ASSERT
(
hipDeviceSynchronize
());
return
inputs
[
0
];
}
virtual
migraphx
::
shape
compute_shape
(
migraphx
::
shapes
inputs
)
const
override
{
if
(
not
inputs
[
0
].
standard
())
{
throw
std
::
runtime_error
(
"Input arg must be standard shaped"
);
}
return
inputs
.
front
();
}
};
TEST_CASE
(
register_half_copy_op
)
{
half_copy_host
hch
;
migraphx
::
register_experimental_custom_op
(
hch
);
auto
op
=
migraphx
::
operation
(
"half_copy_host"
);
EXPECT
(
op
.
name
()
==
"half_copy_host"
);
half_copy_device
hcd
;
migraphx
::
register_experimental_custom_op
(
hcd
);
op
=
migraphx
::
operation
(
"half_copy_device"
);
EXPECT
(
op
.
name
()
==
"half_copy_device"
);
half_copy_device_same_buffer
hcdsb
;
migraphx
::
register_experimental_custom_op
(
hcdsb
);
op
=
migraphx
::
operation
(
"half_copy_device_same_buffer"
);
EXPECT
(
op
.
name
()
==
"half_copy_device_same_buffer"
);
}
TEST_CASE
(
half_copy_custom_op_test
)
{
auto
run_test_prog
=
[](
const
std
::
string
&
op_name
,
bool
buffer_alloc
)
{
migraphx
::
program
p
;
migraphx
::
module
m
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx_shape_float_type
,
{
4
,
3
}};
auto
x
=
m
.
add_parameter
(
"x"
,
s
);
migraphx
::
instructions
inputs
=
{
x
};
if
(
buffer_alloc
)
{
auto
alloc
=
m
.
add_allocation
(
s
);
inputs
=
{
x
,
alloc
};
}
auto
half_copy_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
op_name
.
c_str
()),
inputs
);
m
.
add_return
({
half_copy_ins
});
migraphx
::
compile_options
options
;
options
.
set_offload_copy
();
p
.
compile
(
migraphx
::
target
(
"gpu"
),
options
);
migraphx
::
program_parameters
pp
;
std
::
vector
<
float
>
x_data
(
12
);
std
::
iota
(
x_data
.
begin
(),
x_data
.
end
(),
0
);
pp
.
add
(
"x"
,
migraphx
::
argument
(
s
,
x_data
.
data
()));
auto
results
=
p
.
eval
(
pp
);
auto
result
=
results
[
0
];
auto
result_vec
=
result
.
as_vector
<
float
>
();
std
::
vector
<
float
>
expected_result
(
12
,
0
);
std
::
iota
(
expected_result
.
begin
()
+
6
,
expected_result
.
end
(),
6
);
EXPECT
(
bool
{
result
==
migraphx
::
argument
(
s
,
expected_result
.
data
())});
};
// register all the ops
half_copy_host
hch
;
migraphx
::
register_experimental_custom_op
(
hch
);
half_copy_device
hcd
;
migraphx
::
register_experimental_custom_op
(
hcd
);
half_copy_device_same_buffer
hcdsb
;
migraphx
::
register_experimental_custom_op
(
hcdsb
);
std
::
vector
<
std
::
pair
<
std
::
string
,
bool
>>
tests_config
=
{
{
"half_copy_host"
,
true
},
{
"half_copy_device"
,
true
},
{
"half_copy_device_same_buffer"
,
false
}};
for
(
const
auto
&
i
:
tests_config
)
{
run_test_prog
(
i
.
first
,
i
.
second
);
}
}
struct
stride_two
final
:
migraphx
::
experimental_custom_op_base
{
virtual
std
::
string
name
()
const
override
{
return
"stride_two"
;
}
virtual
migraphx
::
argument
compute
(
migraphx
::
context
,
migraphx
::
shape
out_shape
,
migraphx
::
arguments
inputs
)
const
override
{
return
{
out_shape
,
inputs
[
0
].
data
()};
}
virtual
migraphx
::
shape
compute_shape
(
migraphx
::
shapes
inputs
)
const
override
{
if
(
inputs
.
size
()
!=
1
)
{
throw
std
::
runtime_error
(
"stride_two op must have only one input argument"
);
};
if
(
not
inputs
[
0
].
standard
())
{
throw
std
::
runtime_error
(
"stride_two op only works on the standard input shapes"
);
}
migraphx
::
shape
input_s
=
inputs
[
0
];
std
::
vector
<
size_t
>
dims
=
input_s
.
lengths
();
std
::
vector
<
size_t
>
new_dims
;
std
::
vector
<
size_t
>
strides
=
input_s
.
strides
();
std
::
vector
<
size_t
>
new_strides
;
std
::
for_each
(
dims
.
begin
(),
dims
.
end
(),
[
&
](
auto
i
)
{
new_dims
.
push_back
(
i
/
2
);
});
std
::
for_each
(
strides
.
begin
(),
strides
.
end
(),
[
&
](
auto
i
)
{
new_strides
.
push_back
(
i
*
2
);
});
migraphx
::
shape
output_shape
{
input_s
.
type
(),
new_dims
,
new_strides
};
return
output_shape
;
}
virtual
bool
runs_on_offload_target
()
const
override
{
return
true
;
}
virtual
std
::
vector
<
size_t
>
output_alias
(
migraphx
::
shapes
)
const
override
{
return
{
0
};
};
};
TEST_CASE
(
stride_two_custom_op_test
)
{
simple_custom_op
simple_op
;
migraphx
::
register_experimental_custom_op
(
simple_op
);
stride_two
st
;
migraphx
::
register_experimental_custom_op
(
st
);
migraphx
::
program
p
;
migraphx
::
module
m
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx_shape_float_type
,
{
4
,
4
,
4
}};
auto
x
=
m
.
add_parameter
(
"x"
,
s
);
auto
stride_two_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
"stride_two"
),
{
x
});
m
.
add_return
({
stride_two_ins
});
migraphx
::
compile_options
options
;
options
.
set_offload_copy
();
p
.
compile
(
migraphx
::
target
(
"gpu"
),
options
);
migraphx
::
program_parameters
pp
;
std
::
vector
<
float
>
x_data
(
64
);
std
::
iota
(
x_data
.
begin
(),
x_data
.
end
(),
0
);
pp
.
add
(
"x"
,
migraphx
::
argument
(
s
,
x_data
.
data
()));
auto
results
=
p
.
eval
(
pp
);
auto
result
=
results
[
0
];
auto
result_vec
=
result
.
as_vector
<
float
>
();
std
::
vector
<
float
>
expected_result
=
{
0
,
2
,
8
,
10
,
32
,
34
,
40
,
42
};
EXPECT
(
result_vec
==
expected_result
);
}
TEST_CASE
(
custom_op_with_pre_and_post_subgraph_test
)
{
half_copy_host
hco
;
migraphx
::
register_experimental_custom_op
(
hco
);
stride_two
st
;
migraphx
::
register_experimental_custom_op
(
st
);
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx_shape_int32_type
,
{
4
,
3
}};
migraphx
::
shape
trans_shape
{
migraphx_shape_int32_type
,
{
3
,
4
}};
migraphx
::
shape
s
{
migraphx_shape_float_type
,
{
4
,
6
}};
migraphx
::
module
m
=
p
.
get_main_module
();
auto
x
=
m
.
add_parameter
(
"x"
,
s
);
auto
neg
=
m
.
add_instruction
(
migraphx
::
operation
(
"neg"
),
x
);
auto
alloc
=
m
.
add_allocation
(
trans_shape
);
auto
neg_trans
=
m
.
add_instruction
(
migraphx
::
operation
(
"transpose"
,
"{permutation: [1, 0]}"
),
{
neg
});
auto
neg_cont
=
m
.
add_instruction
(
migraphx
::
operation
(
"contiguous"
),
{
neg_trans
});
auto
custom_kernel
=
m
.
add_instruction
(
migraphx
::
operation
(
"simple_custom_op"
),
{
neg_cont
,
alloc
});
auto
relu
=
m
.
add_instruction
(
migraphx
::
operation
(
"relu"
),
custom_kernel
);
m
.
add_return
({
relu
});
// pre-subgraph
auto
neg_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
"neg"
),
x
);
auto
trans_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
"transpose"
,
"{permutation: [1, 0]}"
),
{
neg_ins
});
auto
cont_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
"contiguous"
),
{
trans_ins
});
// custom_op
migraphx
::
shape
trans_shape
{
migraphx_shape_float_type
,
{
6
,
4
}};
auto
alloc
=
m
.
add_allocation
(
trans_shape
);
auto
half_copy_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
"half_copy_host"
),
{
cont_ins
,
alloc
});
// post-subgraph
auto
abs_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
"abs"
),
{
half_copy_ins
});
// another custom_op
auto
stride_two_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
"stride_two"
),
{
abs_ins
});
// post-subgraph
auto
relu_ins
=
m
.
add_instruction
(
migraphx
::
operation
(
"relu"
),
{
stride_two_ins
});
m
.
add_return
({
relu_ins
});
migraphx
::
compile_options
options
;
options
.
set_offload_copy
();
p
.
compile
(
migraphx
::
target
(
"gpu"
),
options
);
migraphx
::
program_parameters
pp
;
std
::
vector
<
int
>
x_data
(
12
,
-
3
);
std
::
vector
<
float
>
x_data
(
s
.
elements
());
std
::
iota
(
x_data
.
begin
(),
x_data
.
end
(),
0
);
pp
.
add
(
"x"
,
migraphx
::
argument
(
s
,
x_data
.
data
()));
auto
results
=
p
.
eval
(
pp
);
auto
result
=
results
[
0
];
auto
result_vec
=
result
.
as_vector
<
in
t
>
();
std
::
vector
<
in
t
>
expected_result
(
12
,
0
)
;
std
::
fill
(
expected_result
.
begin
()
+
6
,
expected_result
.
end
(),
3
);
EXPECT
(
bool
{
result
==
migraphx
::
argument
(
trans_shape
,
expected_result
.
data
())});
auto
results
=
p
.
eval
(
pp
);
auto
result
=
results
[
0
];
auto
result_vec
=
result
.
as_vector
<
floa
t
>
();
std
::
vector
<
floa
t
>
expected_result
=
{
0
,
0
,
0
,
0
,
4
,
16
}
;
EXPECT
(
bool
{
result
==
migraphx
::
argument
(
migraphx
::
shape
{
migraphx_shape_float_type
,
{
3
,
2
}},
expected_result
.
data
())});
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/api/test_gpu.cpp
View file @
8f568801
...
...
@@ -25,6 +25,8 @@
#include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include <migraphx/manage_ptr.hpp>
#include "test.hpp"
TEST_CASE
(
load_and_run
)
...
...
@@ -44,11 +46,67 @@ TEST_CASE(load_and_run)
{
pp
.
add
(
name
,
migraphx
::
argument
::
generate
(
param_shapes
[
name
]));
}
auto
outputs
=
p
.
eval
(
pp
);
CHECK
(
shapes_before
.
size
()
==
outputs
.
size
());
CHECK
(
bool
{
shapes_before
.
front
()
==
outputs
.
front
().
get_shape
()});
}
using
hip_ptr
=
MIGRAPHX_MANAGE_PTR
(
void
,
hipFree
);
using
stream_ptr
=
MIGRAPHX_MANAGE_PTR
(
hipStream_t
,
hipStreamDestroy
);
stream_ptr
get_stream
()
{
hipStream_t
stream
;
auto
err
=
hipStreamCreateWithFlags
(
&
stream
,
0
);
EXPECT
(
err
==
hipSuccess
);
return
stream_ptr
{
stream
};
}
hip_ptr
get_hip_buffer
(
size_t
size
)
{
void
*
ptr
;
auto
err
=
hipMalloc
(
&
ptr
,
size
);
EXPECT
(
err
==
hipSuccess
);
return
hip_ptr
{
ptr
};
}
TEST_CASE
(
load_and_run_async
)
{
auto
p
=
migraphx
::
parse_onnx
(
"conv_relu_maxpool_test.onnx"
);
auto
shapes_before
=
p
.
get_output_shapes
();
migraphx
::
compile_options
options
;
options
.
set_offload_copy
(
false
);
p
.
compile
(
migraphx
::
target
(
"gpu"
),
options
);
auto
shapes_after
=
p
.
get_output_shapes
();
CHECK
(
shapes_before
.
size
()
==
1
);
CHECK
(
shapes_before
.
size
()
==
shapes_after
.
size
());
CHECK
(
bool
{
shapes_before
.
front
()
==
shapes_after
.
front
()});
migraphx
::
program_parameters
pp
;
auto
param_shapes
=
p
.
get_parameter_shapes
();
stream_ptr
stream
=
get_stream
();
std
::
vector
<
hip_ptr
>
buffs
;
std
::
vector
<
migraphx
::
argument
>
args
;
for
(
auto
&&
name
:
param_shapes
.
names
())
{
args
.
push_back
(
migraphx
::
argument
::
generate
(
param_shapes
[
name
]));
buffs
.
push_back
(
get_hip_buffer
(
args
.
rbegin
()
->
get_shape
().
bytes
()));
auto
err
=
hipMemcpy
(
buffs
.
rbegin
()
->
get
(),
args
.
rbegin
()
->
data
(),
args
.
rbegin
()
->
get_shape
().
bytes
(),
hipMemcpyHostToDevice
);
EXPECT
(
err
==
hipSuccess
);
pp
.
add
(
name
,
migraphx
::
argument
(
args
.
rbegin
()
->
get_shape
(),
buffs
.
rbegin
()
->
get
()));
}
auto
outputs
=
p
.
run_async
(
pp
,
stream
.
get
());
CHECK
(
shapes_before
.
size
()
==
outputs
.
size
());
CHECK
(
bool
{
shapes_before
.
front
()
==
outputs
.
front
().
get_shape
()});
}
TEST_CASE
(
load_and_run_ctx
)
{
auto
p
=
migraphx
::
parse_onnx
(
"conv_relu_maxpool_test.onnx"
);
...
...
@@ -82,10 +140,10 @@ TEST_CASE(if_pl_test)
migraphx
::
program_parameters
pp
;
auto
param_shapes
=
p
.
get_parameter_shapes
();
auto
xs
=
param_shapes
[
"x"
];
std
::
vector
<
float
>
xd
(
xs
.
bytes
()
/
sizeof
(
float
),
1.0
);
std
::
vector
<
float
>
xd
(
xs
.
elements
(
),
1.0
);
pp
.
add
(
"x"
,
migraphx
::
argument
(
xs
,
xd
.
data
()));
auto
ys
=
param_shapes
[
"y"
];
std
::
vector
<
float
>
yd
(
ys
.
bytes
()
/
sizeof
(
float
),
2.0
);
std
::
vector
<
float
>
yd
(
ys
.
elements
(
),
2.0
);
pp
.
add
(
"y"
,
migraphx
::
argument
(
ys
,
yd
.
data
()));
char
ccond
=
cond
;
pp
.
add
(
"cond"
,
migraphx
::
argument
(
param_shapes
[
"cond"
],
&
ccond
));
...
...
test/gpu/adjust_allocation.cpp
View file @
8f568801
...
...
@@ -40,6 +40,10 @@
#include <migraphx/make_op.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
#include "make_precompile_op.hpp"
// Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE
(
"add"
,
"mul"
,
"convert"
)
void
run_lowering
(
migraphx
::
program
&
p
,
bool
offload_copy
=
false
)
{
...
...
@@ -118,7 +122,7 @@ TEST_CASE(no_copy_dead_param)
auto
xb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
to_value
(
s
)}}));
auto
gx
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"hip::copy_to_gpu"
),
x
,
xb
);
auto
ab
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
to_value
(
s
)}}));
auto
sum
=
mm
->
add_instruction
(
m
igraphx
::
mak
e_op
(
"
gpu::
add"
),
gx
,
gx
,
ab
);
auto
sum
=
mm
->
add_instruction
(
m
ake_precompil
e_op
(
"add"
),
gx
,
gx
,
ab
);
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"hip::copy_from_gpu"
),
sum
);
mm
->
add_return
({
r
});
...
...
test/gpu/jit.cpp
View file @
8f568801
...
...
@@ -307,12 +307,14 @@ TEST_CASE(compile_math)
"erf(x)"
,
"exp(x)"
,
"floor(x)"
,
"fmod(x, x)"
,
"isnan(x)"
,
"log(x)"
,
"max(x, x)"
,
"min(x, x)"
,
"pow(x, 0)"
,
"pow(x, x)"
,
"remainder(x,x)"
,
"round(x)"
,
"rsqrt(x)"
,
"sin(x)"
,
...
...
src/targets/gpu/device/gelu.c
pp
→
test/gpu/make_precompile_op.h
pp
View file @
8f568801
...
...
@@ -21,63 +21,46 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/device/gelu.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/types.hpp>
#include <cmath>
#ifndef MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
#define MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
#include <migraphx/operation.hpp>
#include <migraphx/gpu/compiler.hpp>
#include <migraphx/make_op.hpp>
// x * 0.5 * (1.0 + erf(x / sqrt(2.0)))
template
<
class
T
>
auto
gelu_fn
(
T
x
)
__device__
{
return
x
*
0.5
*
(
1
+
::
erf
(
x
*
M_SQRT1_2
));
}
// NOLINTNEXTLINE
#define MIGRAPHX_GPU_TEST_PRECOMPILE(...) \
struct test_compiler : migraphx::gpu::compiler<test_compiler> \
{ \
std::vector<std::string> names() const { return {__VA_ARGS__}; } \
\
template <class... Ts> \
migraphx::operation compile_op(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
\
template <class... Ts> \
migraphx::gpu::compiler_replace compile(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
};
// 0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * pow(x, 3))))
template
<
class
T
>
auto
gelu_fn_new
(
T
x
)
__device__
inline
migraphx
::
operation
make_precompile_op
(
migraphx
::
rank
<
0
>
,
const
migraphx
::
operation
&
op
)
{
return
0.5
*
x
*
(
1
+
tanh
(
sqrt
(
M_2_PI
)
*
(
x
+
0.044715
*
x
*
x
*
x
))
);
return
migraphx
::
make_op
(
"gpu::precompile_op"
,
{{
"op"
,
migraphx
::
to_value
(
op
)}}
);
}
void
gelu
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
)
inline
migraphx
::
operation
make_precompile_op
(
migraphx
::
rank
<
1
>
,
const
std
::
string
&
name
)
{
nary
(
stream
,
result
,
arg
)([](
auto
x
)
__device__
{
return
gelu_fn
(
to_hip_type
(
x
));
}
);
return
make_precompile_op
(
migraphx
::
rank
<
0
>
{},
migraphx
::
make_op
(
name
)
);
}
void
gelu_new
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
)
{
nary
(
stream
,
result
,
arg
)([](
auto
x
)
__device__
{
return
gelu_fn_new
(
to_hip_type
(
x
));
});
}
void
add_gelu
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg1
,
const
argument
&
arg2
)
{
nary
(
stream
,
result
,
arg1
,
arg2
)([](
auto
x
,
auto
y
)
__device__
{
auto
sum
=
to_hip_type
(
x
+
y
);
return
gelu_fn
(
sum
);
});
}
void
add_gelu_new
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg1
,
const
argument
&
arg2
)
template
<
class
T
>
auto
make_precompile_op
(
const
T
&
x
)
{
nary
(
stream
,
result
,
arg1
,
arg2
)([](
auto
x
,
auto
y
)
__device__
{
auto
sum
=
to_hip_type
(
x
+
y
);
return
gelu_fn
(
sum
);
});
return
make_precompile_op
(
migraphx
::
rank
<
1
>
{},
x
);
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif // MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
test/gpu/mlir.cpp
View file @
8f568801
...
...
@@ -37,10 +37,6 @@
#include <migraphx/functional.hpp>
#include <test.hpp>
using
migraphx
::
trim
;
// m test_gpu_mlir && ./bin/test_gpu_mlir
struct
mlir_gpu_target
:
migraphx
::
gpu
::
target
{
std
::
string
name
()
const
{
return
"mlir"
;
}
...
...
test/gpu/pack_int8_args.cpp
View file @
8f568801
...
...
@@ -30,6 +30,7 @@
#include <migraphx/adjust_allocation.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/replace_allocate.hpp>
...
...
@@ -38,10 +39,13 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
#include "make_precompile_op.hpp"
void
run_passes
(
migraphx
::
module
&
m
)
// Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE
(
"add"
,
"mul"
,
"convert"
)
void
run_passes
(
migraphx
::
module
&
m
,
migraphx
::
gpu
::
context
&
ctx
)
{
auto
ctx
=
migraphx
::
gpu
::
context
{};
migraphx
::
run_passes
(
m
,
{
migraphx
::
auto_contiguous
{},
migraphx
::
gpu
::
lowering
{
&
ctx
,
false
},
...
...
@@ -52,18 +56,6 @@ void run_passes(migraphx::module& m)
migraphx
::
dead_code_elimination
{}});
}
bool
get_int8_x4_format
()
{
bool
int8_x4_format
=
true
;
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
auto
ctx
=
migraphx
::
gpu
::
context
{};
rocblas_gemm_flags
flag
;
rocblas_query_int8_layout_flag
(
ctx
.
get_stream
().
get_rocblas
(),
&
flag
);
int8_x4_format
=
(
flag
==
rocblas_gemm_flags_pack_int8x4
);
#endif
return
int8_x4_format
;
}
TEST_CASE
(
quant_dot
)
{
auto
create_module
=
[]
{
...
...
@@ -102,11 +94,13 @@ TEST_CASE(quant_dot)
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
m2_shape
)}}));
packa
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::int8_gemm_pack_a"
),
l2
,
alloc
);
}
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
}}),
l1
,
packa
,
gemm_alloc
);
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
},
{
"compute_fp32"
,
migraphx
::
gpu
::
get_compute_fp32_flag
()}}),
l1
,
packa
,
gemm_alloc
);
auto
beta_broadcast
=
m
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
m3_shape
.
lens
()}}),
beta
);
...
...
@@ -116,19 +110,19 @@ TEST_CASE(quant_dot)
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::contiguous"
),
beta_broadcast
,
beta_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
m3_shape
)}}));
auto
m3_beta
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::mul"
),
l3
,
beta_contiguous
,
mul_alloc
);
auto
gemm_add
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::add"
),
gemm
,
m3_beta
,
output
);
auto
m3_beta
=
m
.
add_instruction
(
make_precompile_op
(
"mul"
),
l3
,
beta_contiguous
,
mul_alloc
);
auto
gemm_add
=
m
.
add_instruction
(
make_precompile_op
(
"add"
),
gemm
,
m3_beta
,
output
);
m
.
add_return
({
gemm_add
});
return
m
;
};
auto
m1
=
create_module
();
run_passes
(
m1
);
auto
m1
=
create_module
();
auto
ctx
=
migraphx
::
gpu
::
context
{};
run_passes
(
m1
,
ctx
);
bool
flag
=
get_int8_x4_format
();
auto
m2
=
create_optimized_int8_x4
(
flag
);
bool
int8_x4
=
migraphx
::
gpu
::
get_int8_x4_format
(
ctx
);
auto
m2
=
create_optimized_int8_x4
(
int8_x4
);
EXPECT
(
m1
==
m2
);
}
...
...
@@ -187,21 +181,23 @@ TEST_CASE(quant_dot_trans)
// back result to int8
auto
tl1_convert_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
alpha_contiguous
->
get_shape
())}}));
auto
tl1_convert
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::convert"
,
{{
"target_type"
,
alpha
->
get_shape
().
type
()}}),
conta
,
tl1_convert_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
auto
tl1_convert
=
m
.
add_instruction
(
make_precompile_op
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
alpha
->
get_shape
().
type
()}})),
conta
,
tl1_convert_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
tl1_convert
->
get_shape
())}}));
auto
tl1_alpha_int32
=
m
.
add_instruction
(
m
igraphx
::
make_op
(
"gpu::
mul"
),
alpha_contiguous
,
tl1_convert
,
mul_alloc
);
auto
tl1_alpha_int32
=
m
.
add_instruction
(
make_precompile_op
(
"
mul"
),
alpha_contiguous
,
tl1_convert
,
mul_alloc
);
// convert mul_res to int8
auto
tl1_alpha_int8_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
conta
->
get_shape
())}}));
auto
tl1_alpha_int8
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::convert"
,
{{
"target_type"
,
conta
->
get_shape
().
type
()}}),
tl1_alpha_int32
,
tl1_alpha_int8_alloc
);
auto
tl1_alpha_int8
=
m
.
add_instruction
(
make_precompile_op
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
conta
->
get_shape
().
type
()}})),
tl1_alpha_int32
,
tl1_alpha_int8_alloc
);
auto
packb
=
contb
;
if
(
int8_x4
)
...
...
@@ -211,21 +207,24 @@ TEST_CASE(quant_dot_trans)
packb
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::int8_gemm_pack_a"
),
contb
,
allocpb
);
}
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
}}),
tl1_alpha_int8
,
packb
,
output
);
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
},
{
"compute_fp32"
,
migraphx
::
gpu
::
get_compute_fp32_flag
()}}),
tl1_alpha_int8
,
packb
,
output
);
m
.
add_return
({
gemm
});
return
m
;
};
auto
m1
=
create_module
();
bool
flag
=
get_int8_x4_format
()
;
auto
m2
=
create_optimized_int8_x4
(
flag
);
auto
m1
=
create_module
();
auto
ctx
=
migraphx
::
gpu
::
context
{}
;
run_passes
(
m1
,
ctx
);
run_passes
(
m1
);
bool
int8_x4
=
migraphx
::
gpu
::
get_int8_x4_format
(
ctx
);
auto
m2
=
create_optimized_int8_x4
(
int8_x4
);
EXPECT
(
m1
==
m2
);
}
...
...
@@ -292,11 +291,13 @@ TEST_CASE(quant_dot_pad)
packa
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::int8_gemm_pack_a"
),
pl2
,
alloc
);
}
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
}}),
pl1
,
packa
,
gemm_alloc
);
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
},
{
"compute_fp32"
,
migraphx
::
gpu
::
get_compute_fp32_flag
()}}),
pl1
,
packa
,
gemm_alloc
);
auto
beta_broadcast
=
m
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s3
.
lens
()}}),
beta
);
...
...
@@ -306,18 +307,18 @@ TEST_CASE(quant_dot_pad)
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::contiguous"
),
beta_broadcast
,
beta_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
s3
)}}));
auto
m3_beta
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::mul"
),
l3
,
beta_contiguous
,
mul_alloc
);
auto
gemm_add
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::add"
),
gemm
,
m3_beta
,
output
);
auto
m3_beta
=
m
.
add_instruction
(
make_precompile_op
(
"mul"
),
l3
,
beta_contiguous
,
mul_alloc
);
auto
gemm_add
=
m
.
add_instruction
(
make_precompile_op
(
"add"
),
gemm
,
m3_beta
,
output
);
m
.
add_return
({
gemm_add
});
return
m
;
};
auto
m1
=
create_module
();
bool
flag
=
get_int8_x4_format
()
;
auto
m2
=
create_optimized_int8_x4
(
flag
);
auto
m1
=
create_module
();
auto
ctx
=
migraphx
::
gpu
::
context
{}
;
run_passes
(
m1
,
ctx
);
run_passes
(
m1
);
bool
int8_x4
=
migraphx
::
gpu
::
get_int8_x4_format
(
ctx
);
auto
m2
=
create_optimized_int8_x4
(
int8_x4
);
EXPECT
(
m1
==
m2
);
}
...
...
@@ -396,14 +397,15 @@ TEST_CASE(quant_dot_trans_pad)
// back result to int8
auto
tl1_convert_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
alpha_contiguous
->
get_shape
())}}));
auto
tl1_convert
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::convert"
,
{{
"target_type"
,
alpha
->
get_shape
().
type
()}}),
conta
,
tl1_convert_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
auto
tl1_convert
=
m
.
add_instruction
(
make_precompile_op
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
alpha
->
get_shape
().
type
()}})),
conta
,
tl1_convert_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
tl1_convert
->
get_shape
())}}));
auto
tl1_alpha_int32
=
m
.
add_instruction
(
m
igraphx
::
make_op
(
"gpu::
mul"
),
alpha_contiguous
,
tl1_convert
,
mul_alloc
);
auto
tl1_alpha_int32
=
m
.
add_instruction
(
make_precompile_op
(
"
mul"
),
alpha_contiguous
,
tl1_convert
,
mul_alloc
);
// convert mul_res to int8
auto
tl1_alpha_int8_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
conta
->
get_shape
())}}));
...
...
@@ -415,10 +417,11 @@ TEST_CASE(quant_dot_trans_pad)
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
ps1
)}}));
}
auto
tl1_alpha_int8
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::convert"
,
{{
"target_type"
,
conta
->
get_shape
().
type
()}}),
tl1_alpha_int32
,
tl1_alpha_int8_alloc
);
auto
tl1_alpha_int8
=
m
.
add_instruction
(
make_precompile_op
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
conta
->
get_shape
().
type
()}})),
tl1_alpha_int32
,
tl1_alpha_int8_alloc
);
auto
pa
=
tl1_alpha_int8
;
if
(
int8_x4
)
...
...
@@ -438,17 +441,23 @@ TEST_CASE(quant_dot_trans_pad)
}
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
}}),
pa
,
packb
,
output
);
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
},
{
"compute_fp32"
,
migraphx
::
gpu
::
get_compute_fp32_flag
()}}),
pa
,
packb
,
output
);
m
.
add_return
({
gemm
});
return
m
;
};
auto
m1
=
create_module
();
bool
flag
=
get_int8_x4_format
()
;
auto
m2
=
create_optimized_int8_x4
(
flag
);
auto
m1
=
create_module
();
auto
ctx
=
migraphx
::
gpu
::
context
{}
;
run_passes
(
m1
,
ctx
);
run_passes
(
m1
);
bool
int8_x4
=
migraphx
::
gpu
::
get_int8_x4_format
(
ctx
);
auto
m2
=
create_optimized_int8_x4
(
int8_x4
);
EXPECT
(
m1
==
m2
);
}
...
...
test/gpu/stream_sync.cpp
0 → 100644
View file @
8f568801
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <iostream>
#include <vector>
#include <migraphx/gpu/context.hpp>
#include <migraphx/context.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/module.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/gpu/target.hpp>
#include "test.hpp"
using
hip_stream_ptr
=
MIGRAPHX_MANAGE_PTR
(
hipStream_t
,
hipStreamDestroy
);
constexpr
uint32_t
stream_sync_test_val
=
1337
;
// NOLINTNEXTLINE
const
std
::
string
compare_numbers
=
R"__migraphx__(
#include <hip/hip_runtime.h>
extern "C" {
__global__ void compare(float* data)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (data[i] != 1337)
{
abort();
}
}
}
int main() {}
)__migraphx__"
;
migraphx
::
src_file
make_src_file
(
const
std
::
string
&
name
,
const
std
::
string
&
content
)
{
return
{
name
,
std
::
make_pair
(
content
.
data
(),
content
.
data
()
+
content
.
size
())};
}
hip_stream_ptr
get_stream
()
{
hipStream_t
stream
;
auto
status
=
hipStreamCreate
(
&
stream
);
if
(
status
!=
hipSuccess
)
{
MIGRAPHX_THROW
(
"Failed to get stream"
);
}
return
hip_stream_ptr
{
stream
};
}
TEST_CASE
(
test_stream_sync_compare_kernel
)
{
auto
binaries
=
migraphx
::
gpu
::
compile_hip_src
(
{
make_src_file
(
"check_stuff.cpp"
,
compare_numbers
)},
""
,
migraphx
::
gpu
::
get_device_name
());
EXPECT
(
binaries
.
size
()
==
1
);
migraphx
::
gpu
::
kernel
k1
{
binaries
.
front
(),
"compare"
};
auto
input
=
migraphx
::
fill_argument
({
migraphx
::
shape
::
float_type
,
{
128
}},
stream_sync_test_val
);
auto
ginput
=
migraphx
::
gpu
::
to_gpu
(
input
);
hip_stream_ptr
pstream
=
get_stream
();
k1
.
launch
(
pstream
.
get
(),
input
.
get_shape
().
elements
(),
1024
)(
ginput
.
cast
<
float
>
());
auto
output
=
migraphx
::
gpu
::
from_gpu
(
ginput
);
EXPECT
(
output
==
input
);
}
TEST_CASE
(
test_stream_sync
)
{
auto
binaries
=
migraphx
::
gpu
::
compile_hip_src
(
{
make_src_file
(
"check_stuff.cpp"
,
compare_numbers
)},
""
,
migraphx
::
gpu
::
get_device_name
());
EXPECT
(
binaries
.
size
()
==
1
);
migraphx
::
gpu
::
kernel
k1
{
binaries
.
front
(),
"compare"
};
const
unsigned
int
m
=
128
;
const
unsigned
int
k
=
8192
;
// Setup empty GPU memory buffer
migraphx
::
shape
input_shape
{
migraphx
::
shape
::
float_type
,
{
m
,
k
}};
migraphx
::
shape
output_shape
{
migraphx
::
shape
::
float_type
,
{
m
,
m
}};
auto
input
=
migraphx
::
fill_argument
(
input_shape
,
0
);
auto
ginput
=
migraphx
::
gpu
::
to_gpu
(
input
);
auto
output
=
migraphx
::
fill_argument
(
output_shape
,
0
);
auto
goutput
=
migraphx
::
gpu
::
to_gpu
(
output
);
hip_stream_ptr
pstream
=
get_stream
();
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
m
,
k
}});
auto
y
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
k
,
m
}}));
std
::
vector
<
float
>
data
(
m
*
m
,
stream_sync_test_val
);
auto
test_val
=
mm
->
add_literal
(
output_shape
,
data
);
auto
mult_out
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"dot"
),
x
,
y
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
mult_out
,
test_val
);
p
.
compile
(
migraphx
::
gpu
::
target
{});
// Run network and then verify with kernel
auto
args
=
p
.
eval
({{
"x"
,
ginput
},
{
"output"
,
goutput
}},
{
pstream
.
get
(),
true
});
k1
.
launch
(
pstream
.
get
(),
m
*
m
,
1024
)(
goutput
.
cast
<
float
>
());
output
=
migraphx
::
gpu
::
from_gpu
(
goutput
);
EXPECT
(
output
!=
input
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/memory_coloring_test.cpp
View file @
8f568801
...
...
@@ -724,7 +724,7 @@ TEST_CASE(test39)
auto
sub_modules
=
p
.
get_modules
();
std
::
reverse
(
sub_modules
.
begin
(),
sub_modules
.
end
());
for
(
auto
&
smod
:
sub_modules
)
for
(
const
auto
&
smod
:
sub_modules
)
{
run_pass
(
*
smod
);
}
...
...
test/onnx/batch_norm_1d_test.onnx
0 → 100644
View file @
8f568801
batch_norm_1d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_1d_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_2d_test.onnx
0 → 100644
View file @
8f568801
batch_norm_2d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_2d_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_3d_test.onnx
0 → 100644
View file @
8f568801
batch_norm_3d_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_3d_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_flat_test.onnx
0 → 100644
View file @
8f568801
batch_norm_flat_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_flat_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_invalid_bias_rank_test.onnx
0 → 100644
View file @
8f568801
!batch_norm_invalid_bias_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalization!batch_norm_invalid_bias_rank_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_invalid_rank_test.onnx
0 → 100644
View file @
8f568801
batch_norm_invalid_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_invalid_rank_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
Prev
1
…
8
9
10
11
12
13
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment