Commit 473881cf authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into jit-layernorm

parents 85247d4d 7271ddbc
......@@ -39,6 +39,8 @@ function(generate_embed_source EMBED_NAME)
file(WRITE "${PARSE_HEADER}" "
#include <unordered_map>
#include <string>
#include <utility>
const std::unordered_map<std::string, std::pair<const char*,const char*>>& ${EMBED_NAME}();
")
......
tensorflow==2.6.4
tensorflow==2.7.2
onnxruntime
tokenizers
\ No newline at end of file
......@@ -6,6 +6,7 @@
#include <migraphx/stringutils.hpp>
#include <migraphx/op/contiguous.hpp>
#include <migraphx/op/identity.hpp>
#include <migraphx/par_for.hpp>
#include <utility>
namespace migraphx {
......@@ -71,6 +72,8 @@ static bool try_compute_shape(instruction_ref ins,
void eliminate_contiguous::apply(module& m) const
{
std::vector<instruction_ref> const_instruction;
for(auto ins : iterator_for(m))
{
// return instruction should have inputs with standard shape
......@@ -81,6 +84,7 @@ void eliminate_contiguous::apply(module& m) const
auto args = ins->inputs();
auto new_args = args;
auto mod_args = ins->module_inputs();
for(auto arg : ins->inputs())
{
if(arg->name() == op_name)
......@@ -93,15 +97,25 @@ void eliminate_contiguous::apply(module& m) const
}
else if(prev->can_eval())
{
auto c = op::contiguous{};
auto r = c.compute(c.compute_shape({prev->get_shape()}), {prev->eval()});
auto l = m.add_literal(r.get_shape(), r.data());
m.replace_instruction(arg, l);
const_instruction.push_back(arg);
}
}
}
}
// Perform evaluations in parallel
std::vector<argument> literals(const_instruction.size());
par_for(const_instruction.size(), 1, [&](const auto i) {
auto c = op::contiguous{};
auto prev = const_instruction[i]->inputs().front();
literals[i] = c.compute(c.compute_shape({prev->get_shape()}), {prev->eval()});
});
for(size_t i = 0; i < const_instruction.size(); i++)
{
auto l = m.add_literal(literals[i].get_shape(), literals[i].data());
m.replace_instruction(const_instruction[i], l);
}
}
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -965,7 +965,7 @@ struct find_gemm_pointwise
inputs.pop_back();
inputs.push_back(c_ins);
inputs.push_back(gemm_ins->inputs().back());
inputs.push_back(ins->inputs().back());
gemm.beta = 1;
m.replace_instruction(ins, gemm, inputs);
......
......@@ -34,6 +34,10 @@ struct code_object_op
f(self.output, "output"));
}
value attributes() const { return {{"group", group()}}; }
std::string group() const { return "gpu::code_object::" + symbol_name; }
std::string name() const { return "gpu::code_object"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
......
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/apply_alpha_beta.hpp>
struct gemm_add : verify_program<gemm_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape m1_shape{migraphx::shape::float_type, {1, 2, 3}};
migraphx::shape m2_shape{migraphx::shape::float_type, {1, 3, 4}};
migraphx::shape m3_shape{migraphx::shape::float_type, {1, 2, 4}};
auto l1 = mm->add_parameter("1", m1_shape);
auto l2 = mm->add_parameter("2", m2_shape);
auto l3 = mm->add_parameter("3", m3_shape);
auto dot = mm->add_instruction(migraphx::make_op("dot"), l1, l2);
mm->add_instruction(migraphx::make_op("add"), dot, l3);
return p;
}
};
......@@ -27,7 +27,7 @@ elif [ "$#" -eq 1 ]; then
PREFIX=$1
fi
echo "Dependencies are install at $PREFIX"
echo "Dependencies are installed at $PREFIX"
# Install deps with rbuild
rbuild prepare -d $PREFIX -s develop
......@@ -35,3 +35,5 @@ rbuild prepare -d $PREFIX -s develop
# install onnx package for unit tests
pip3 install onnx==1.8.1 numpy==1.18.5 typing==3.7.4 pytest==6.0.1 packaging==16.8
# pin version of protobuf in Python for onnx runtime unit tests
pip3 install protobuf==3.20.0
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment