"git@developer.sourcefind.cn:orangecat/ollama.git" did not exist on "3a9f4471412cf132f0fa7b872cb4f616a1dd8b59"
Unverified Commit a5dab12c authored by turneram's avatar turneram Committed by GitHub
Browse files

Merge branch 'develop' into ck-flash-attn

parents 321367be c7f0fbc4
...@@ -281,6 +281,7 @@ jobs: ...@@ -281,6 +281,7 @@ jobs:
-DBUILD_DEV=On \ -DBUILD_DEV=On \
-DCMAKE_CXX_COMPILER_LAUNCHER=/usr/local/bin/ccache \ -DCMAKE_CXX_COMPILER_LAUNCHER=/usr/local/bin/ccache \
-DCMAKE_C_COMPILER_LAUNCHER=/usr/local/bin/ccache \ -DCMAKE_C_COMPILER_LAUNCHER=/usr/local/bin/ccache \
-DCMAKE_CXX_FLAGS="-Werror" \
-DGPU_TARGETS=gfx908 \ -DGPU_TARGETS=gfx908 \
.. ..
make -j$(nproc) tests driver make -j$(nproc) tests driver
......
...@@ -71,7 +71,7 @@ include(ROCMSetupVersion) ...@@ -71,7 +71,7 @@ include(ROCMSetupVersion)
option(BUILD_DEV "Build for development purpose only" OFF) option(BUILD_DEV "Build for development purpose only" OFF)
rocm_setup_version(VERSION 2.7.0) rocm_setup_version(VERSION 2.8.0)
set(MIGRAPHX_SO_VERSION ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH}) set(MIGRAPHX_SO_VERSION ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH})
option( BUILD_SHARED_LIBS "Build as a shared library" ON ) option( BUILD_SHARED_LIBS "Build as a shared library" ON )
...@@ -283,7 +283,6 @@ file(MAKE_DIRECTORY ${DEST_DIR}/lib/onnx_migraphx) ...@@ -283,7 +283,6 @@ file(MAKE_DIRECTORY ${DEST_DIR}/lib/onnx_migraphx)
foreach(py_file ${backend_files}) foreach(py_file ${backend_files})
configure_file(${py_file} ${DEST_DIR}/lib/onnx_migraphx/. COPYONLY) configure_file(${py_file} ${DEST_DIR}/lib/onnx_migraphx/. COPYONLY)
endforeach(py_file) endforeach(py_file)
configure_file(${CMAKE_SOURCE_DIR}/test/py/onnx_backend_test.py ${DEST_DIR}/onnx_backend_test.py COPYONLY)
rocm_create_package( rocm_create_package(
NAME MIGraphX NAME MIGraphX
......
def getgputargets() { def getgputargets() {
targets="gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx1102" targets="gfx906;gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx1102"
return targets return targets
} }
......
...@@ -213,13 +213,13 @@ cpp_generator::function cpp_generator::generate_module(const module& m, ...@@ -213,13 +213,13 @@ cpp_generator::function cpp_generator::generate_module(const module& m,
ins->get_literal().visit([&](auto v) { ins->get_literal().visit([&](auto v) {
assert(v.size() == 1); assert(v.size() == 1);
auto x = v.front(); auto x = v.front();
if(std::isinf(x)) if(std::isinf(static_cast<double>(x)))
{ {
string_literal = "__builtin_huge_val()"; string_literal = "__builtin_huge_val()";
if(x < 0) if(x < 0)
string_literal = "-__builtin_huge_val()"; string_literal = "-__builtin_huge_val()";
} }
else if(std::isnan(x)) else if(std::isnan(static_cast<double>(x)))
string_literal = "__builtin_nan()"; string_literal = "__builtin_nan()";
else else
string_literal = ins->get_literal().to_string(); string_literal = ins->get_literal().to_string();
......
...@@ -68,7 +68,7 @@ struct convert : unary<convert> ...@@ -68,7 +68,7 @@ struct convert : unary<convert>
auto y = x; auto y = x;
shape::visit(type, [&](auto as) { shape::visit(type, [&](auto as) {
// clamping value between target_type's max and min doesn't work for NaNs, // clamping value between target_type's max and min doesn't work for NaNs,
if(std::isnan(x)) if(std::isnan(static_cast<double>(x)))
{ {
y = as.nan(); y = as.nan();
} }
......
...@@ -35,7 +35,7 @@ struct isnan : unary<isnan> ...@@ -35,7 +35,7 @@ struct isnan : unary<isnan>
{ {
auto apply() const auto apply() const
{ {
return [](auto x) { return std::isnan(x); }; return [](auto x) { return std::isnan(static_cast<double>(x)); };
} }
std::string name() const { return "isnan"; } std::string name() const { return "isnan"; }
......
...@@ -90,8 +90,7 @@ struct not_finite_fn ...@@ -90,8 +90,7 @@ struct not_finite_fn
template <class T> template <class T>
bool operator()(T x) const bool operator()(T x) const
{ {
using std::isfinite; return not std::isfinite(static_cast<double>(x));
return not isfinite(x);
} }
}; };
static constexpr not_finite_fn not_finite{}; static constexpr not_finite_fn not_finite{};
...@@ -101,8 +100,7 @@ struct compare_mag_fn ...@@ -101,8 +100,7 @@ struct compare_mag_fn
template <class T, class U> template <class T, class U>
bool operator()(T x, U y) const bool operator()(T x, U y) const
{ {
using std::fabs; return std::fabs(x) < std::fabs(y);
return fabs(x) < fabs(y);
} }
}; };
static constexpr compare_mag_fn compare_mag{}; static constexpr compare_mag_fn compare_mag{};
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_castlike : op_parser<parse_castlike>
{
std::vector<op_desc> operators() const { return {{"CastLike"}}; }
instruction_ref parse(const op_desc& /*opd*/,
const onnx_parser& /*parser*/,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
if(not(args.size() == 2))
{
MIGRAPHX_THROW("PARSE_CASTLIKE: CastLike must have exactly 2 inputs!");
}
shape::type_t target_type = args[1]->get_shape().type();
return info.add_instruction(make_op("convert", {{"target_type", target_type}}), args[0]);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -49,6 +49,8 @@ struct parse_constant_of_shape : op_parser<parse_constant_of_shape> ...@@ -49,6 +49,8 @@ struct parse_constant_of_shape : op_parser<parse_constant_of_shape>
{ {
MIGRAPHX_THROW("ConstantOfShape: attribute value can contain only 1 elements!"); MIGRAPHX_THROW("ConstantOfShape: attribute value can contain only 1 elements!");
} }
// convert to a scalar literal
l_val = literal(shape{l_val.get_shape().type(), {1}, {0}}, l_val.data());
} }
else else
{ {
...@@ -64,30 +66,37 @@ struct parse_constant_of_shape : op_parser<parse_constant_of_shape> ...@@ -64,30 +66,37 @@ struct parse_constant_of_shape : op_parser<parse_constant_of_shape>
migraphx::shape s; migraphx::shape s;
// input is empty, output is a scalar // input is empty, output is a scalar
auto type = l_val.get_shape().type(); auto type = l_val.get_shape().type();
// empty input tensor, output is a scalar migraphx::argument input = args[0]->eval();
if(args[0]->get_shape().elements() == 0) if(not input.empty())
{ {
s = migraphx::shape{type, {1}, {0}}; // empty input tensor, output is a scalar
if(args[0]->get_shape().elements() == 0)
{
s = migraphx::shape{type, {1}, {0}};
}
else
{
std::vector<std::size_t> dims;
input.visit([&](auto ia) { dims.assign(ia.begin(), ia.end()); });
s = migraphx::shape{type, dims};
}
literal l_out{};
l_val.visit([&](auto val) {
using val_type = std::remove_cv_t<typename decltype(val)::value_type>;
// l_val contains only one element
std::vector<val_type> out_vec(s.elements(), val.front());
l_out = literal(s, out_vec);
});
return info.add_literal(l_out);
} }
// has variable input (dynamic shape buffer)
else else
{ {
migraphx::argument in = args[0]->eval(); auto dv_lit = info.add_literal(l_val);
check_arg_empty(in, "ConstantOfShape: dynamic shape is not supported"); auto alloc_ins =
info.add_instruction(make_op("allocate", {{"buf_type", type}}), args[0]);
std::vector<std::size_t> dims; return info.add_instruction(make_op("fill"), dv_lit, alloc_ins);
in.visit([&](auto input) { dims.assign(input.begin(), input.end()); });
s = migraphx::shape{type, dims};
} }
literal l_out{};
l_val.visit([&](auto val) {
using val_type = std::remove_cv_t<typename decltype(val)::value_type>;
// l_val contains only one element
std::vector<val_type> out_vec(s.elements(), val.front());
l_out = literal(s, out_vec);
});
return info.add_literal(l_out);
} }
} }
}; };
......
...@@ -320,7 +320,8 @@ struct parse_resize : op_parser<parse_resize> ...@@ -320,7 +320,8 @@ struct parse_resize : op_parser<parse_resize>
// get the number of dimensions // get the number of dimensions
std::size_t n_dim = out_lens.size(); std::size_t n_dim = out_lens.size();
auto vvv_ind = std::vector(n_dim, std::vector(2, std::vector<size_t>(out_elements))); std::vector<std::vector<std::size_t>> vv_ind(2, std::vector<std::size_t>(out_elements));
std::vector<std::vector<std::vector<std::size_t>>> vvv_ind(n_dim, vv_ind);
std::vector<std::vector<float>> delta(n_dim, std::vector<float>(out_elements)); std::vector<std::vector<float>> delta(n_dim, std::vector<float>(out_elements));
shape_for_each(out_s, [&](const auto& out_idx_v, size_t out_idx) { shape_for_each(out_s, [&](const auto& out_idx_v, size_t out_idx) {
......
...@@ -347,7 +347,7 @@ void program::finalize() ...@@ -347,7 +347,7 @@ void program::finalize()
template <class T> template <class T>
std::string classify(T x) std::string classify(T x)
{ {
switch(std::fpclassify(x)) switch(std::fpclassify(static_cast<double>(x)))
{ {
case FP_INFINITE: return "inf"; case FP_INFINITE: return "inf";
case FP_NAN: return "nan"; case FP_NAN: return "nan";
......
...@@ -81,6 +81,14 @@ inline auto launch(hipStream_t stream, index_int global, index_int local) ...@@ -81,6 +81,14 @@ inline auto launch(hipStream_t stream, index_int global, index_int local)
using f_type = decltype(f); using f_type = decltype(f);
dim3 nblocks(global / local); dim3 nblocks(global / local);
dim3 nthreads(local); dim3 nthreads(local);
/*
hipGetLastError() returns error for the first failed HIP call that happened previously.
MIGraphX calls into various backend libraries and failed HIP calls can also happen there.
Calling hipGetLastError() would reset error code to hipSuccess, so that inside MIGraphX
failed call to hipLaunchKernelGGL() can be captured.
*/
hipError_t flush_call = hipGetLastError();
(void)(flush_call);
// cppcheck-suppress UseDeviceLaunch // cppcheck-suppress UseDeviceLaunch
hipLaunchKernelGGL((launcher<f_type>), nblocks, nthreads, 0, stream, f); hipLaunchKernelGGL((launcher<f_type>), nblocks, nthreads, 0, stream, f);
hipError_t kernel_launch_status = hipGetLastError(); hipError_t kernel_launch_status = hipGetLastError();
......
 castlike_error_test:M

0out"CastLikecastlike_error_testZ
0



b
out


B
\ No newline at end of file
  castlike_test:[

0
1out"CastLike castlike_testZ
0



Z
1


b
out


B
\ No newline at end of file
const_of_shape_default_test:
6shape"Constant*#
value*:B shape_tensor

shapey"ConstantOfShapeconst_of_shape_default_testb
y



B
\ No newline at end of file
const_of_shape_dyn_int64_test:
=
output_dimsy"ConstantOfShape*
value*:
Bvalueconst_of_shape_dyn_int64_testZ
output_dims

b
y



B
\ No newline at end of file
constant-of-shape: const_of_shape_int64_test:
6shape"Constant*# 6shape"Constant*#
value**B shape_tensor  value*:B shape_tensor
7 7
shapey"ConstantOfShape* shapey"ConstantOfShape*
value*: value*:
Bvalue constant_of_shapeb Bvalueconst_of_shape_int64_testb
y y
 
 
 
B B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment