Commit dd653b52 authored by Scott Thornton's avatar Scott Thornton
Browse files

Added test for cpu ops; also refactored ops a bit

parent 8fb333ef
......@@ -227,11 +227,11 @@ struct gemm
std::size_t ldc = 1;
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(2).same_type().same_dims().only_dims(2);
check_shapes{inputs}.has(2).same_type();
const shape& A = inputs.at(0);
const shape& B = inputs.at(1);
auto t = A.type();
if (A.lens()[1] != B.lens()[0])
RTG_THROW("Inner dimensions do not match");
return {t, {A.lens()[0], B.lens()[1]}};
......@@ -247,81 +247,147 @@ struct gemm
}
};
struct identity_op
struct identity
{
std::string name() const {return "identity"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct abs_op
struct abs
{
std::string name() const {return "abs"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct exp_op
struct exp
{
std::string name() const {return "exp"; }
std::string name() const { return "exp"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct sin_op
struct sin
{
std::string name() const {return "sin"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct cos_op
struct cos
{
std::string name() const {return "cos"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct tan_op
struct tan
{
std::string name() const {return "tan"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct asin_op
struct asin
{
std::string name() const {return "asin"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct acos_op
struct acos
{
std::string name() const {return "acos"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct atan_op
struct atan
{
std::string name() const {return "atan"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct softmax_op
struct softmax
{
std::string name() const {return "softmax"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct tanh_op
struct tanh
{
std::string name() const {return "tanh"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct sigmoid_op
struct sigmoid
{
std::string name() const {return "sigmoid"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct neg_op
struct neg
{
std::string name() const {return "neg"; }
};
template <typename Op>
struct unaryop
{
Op op;
std::string name() const { op.name(); }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.at(0);
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
};
struct flatten
......@@ -329,31 +395,42 @@ struct flatten
std::string name() const { return "flatten"; }
};
struct add_op
struct add
{
std::string name() const { return "add"; }
shape compute_shape(std::vector<shape> inputs) const
{
// TODO(wsttiger@gmail.com) Check this for numpy-style broadcasting operations
check_shapes{inputs}.has(2).same_type().same_dims();
return inputs.at(0);
}
};
struct sub_op
struct sub
{
std::string name() const { return "sub"; }
shape compute_shape(std::vector<shape> inputs) const
{
// TODO(wsttiger@gmail.com) Check this for numpy-style broadcasting operations
check_shapes{inputs}.has(2).same_type().same_dims();
return inputs.at(0);
}
};
struct mul_op
struct mul
{
std::string name() const { return "mul"; }
shape compute_shape(std::vector<shape> inputs) const
{
// TODO(wsttiger@gmail.com) Check this for numpy-style broadcasting operations
check_shapes{inputs}.has(2).same_type().same_dims();
return inputs.at(0);
}
};
struct div_op
struct div
{
std::string name() const { return "div"; }
};
template <typename Op>
struct binaryop
{
Op op;
std::string name() const { op.name(); }
shape compute_shape(std::vector<shape> inputs) const
{
// TODO(wsttiger@gmail.com) Check this for numpy-style broadcasting operations
......
......@@ -54,7 +54,7 @@ struct cpu_gemm
{
gemm op;
std::string name() const { return "cpu::gemm"; }
shape compute_shape(std::vector<shape> inputs)
shape compute_shape(std::vector<shape> inputs) const
{
return op.compute_shape(inputs);
}
......@@ -93,73 +93,73 @@ struct cpu_gemm
struct identity_op
{
std::string name() const {return "cpu::identity"; }
auto fcn() { return [](auto x) { return x; }; }
auto fcn() const { return [](auto x) { return x; }; }
};
struct abs_op
{
std::string name() const {return "cpu::abs"; }
auto fcn() { return [](auto x) { return std::abs(x); }; }
auto fcn() const { return [](auto x) { return std::abs(x); }; }
};
struct exp_op
{
std::string name() const {return "cpu::exp"; }
auto fcn() { return [](auto x) { return std::exp(x); }; }
auto fcn() const { return [](auto x) { return std::exp(x); }; }
};
struct sin_op
{
std::string name() const {return "cpu::sin"; }
auto fcn() { return [](auto x) { return std::sin(x); }; }
auto fcn() const { return [](auto x) { return std::sin(x); }; }
};
struct cos_op
{
std::string name() const {return "cpu::cos"; }
auto fcn() { return [](auto x) { return std::cos(x); }; }
auto fcn() const { return [](auto x) { return std::cos(x); }; }
};
struct tan_op
{
std::string name() const {return "cpu::tan"; }
auto fcn() { return [](auto x) { return std::tan(x); }; }
auto fcn() const { return [](auto x) { return std::tan(x); }; }
};
struct asin_op
{
std::string name() const {return "cpu::asin"; }
auto fcn() { return [](auto x) { return std::asin(x); }; }
auto fcn() const { return [](auto x) { return std::asin(x); }; }
};
struct acos_op
{
std::string name() const {return "cpu::acos"; }
auto fcn() { return [](auto x) { return std::acos(x); }; }
auto fcn() const { return [](auto x) { return std::acos(x); }; }
};
struct atan_op
{
std::string name() const {return "cpu::atan"; }
auto fcn() { return [](auto x) { return std::atan(x); }; }
auto fcn() const { return [](auto x) { return std::atan(x); }; }
};
struct tanh_op
{
std::string name() const {return "cpu::tanh"; }
auto fcn() { return [](auto x) { return std::tanh(x); }; }
auto fcn() const { return [](auto x) { return std::tanh(x); }; }
};
struct sigmoid_op
{
std::string name() const {return "cpu::sigmoid"; }
auto fcn() { return [](auto x) { return 1.f/(1.f + std::exp(-x)); }; }
auto fcn() const { return [](auto x) { return 1.f/(1.f + std::exp(-x)); }; }
};
struct neg_op
{
std::string name() const {return "cpu::neg"; }
auto fcn() { return [](auto x) { return -x; }; }
auto fcn() const { return [](auto x) { return -x; }; }
};
struct relu_op
......@@ -262,6 +262,46 @@ struct cpu_apply
{
apply_activation(it);
}
else if(it->op.name() == "identity")
{
apply_identity(it);
}
else if(it->op.name() == "softmax")
{
apply_softmax(it);
}
else if(it->op.name() == "tanh")
{
apply_tanh(it);
}
else if(it->op.name() == "sigmoid")
{
apply_sigmoid(it);
}
else if(it->op.name() == "exp")
{
apply_exp(it);
}
else if(it->op.name() == "neg")
{
apply_neg(it);
}
else if(it->op.name() == "sin")
{
apply_sin(it);
}
else if(it->op.name() == "cos")
{
apply_cos(it);
}
else if(it->op.name() == "tan")
{
apply_tan(it);
}
else if(it->op.name() == "gemm")
{
apply_gemm(it);
}
}
}
......@@ -271,12 +311,72 @@ struct cpu_apply
prog->replace_instruction(ins, cpu_convolution{op}, ins->arguments);
}
void apply_gemm(instruction_ref ins)
{
auto&& op = any_cast<gemm>(ins->op);
prog->replace_instruction(ins, cpu_gemm{op}, ins->arguments);
}
void apply_activation(instruction_ref ins)
{
auto&& op = any_cast<activation>(ins->op);
if(op.mode == "relu")
prog->replace_instruction(ins, cpu_unary<relu_op>{}, ins->arguments);
}
void apply_identity(instruction_ref ins)
{
auto&& op = any_cast<identity>(ins->op);
prog->replace_instruction(ins, cpu_unary<identity_op>{}, ins->arguments);
}
void apply_softmax(instruction_ref ins)
{
auto&& op = any_cast<softmax>(ins->op);
prog->replace_instruction(ins, softmax{}, ins->arguments);
}
void apply_tanh(instruction_ref ins)
{
auto&& op = any_cast<tanh>(ins->op);
prog->replace_instruction(ins, cpu_unary<tanh_op>{}, ins->arguments);
}
void apply_sigmoid(instruction_ref ins)
{
auto&& op = any_cast<sigmoid>(ins->op);
prog->replace_instruction(ins, cpu_unary<sigmoid_op>{}, ins->arguments);
}
void apply_exp(instruction_ref ins)
{
auto&& op = any_cast<exp>(ins->op);
prog->replace_instruction(ins, cpu_unary<exp_op>{}, ins->arguments);
}
void apply_neg(instruction_ref ins)
{
auto&& op = any_cast<neg>(ins->op);
prog->replace_instruction(ins, cpu_unary<neg_op>{}, ins->arguments);
}
void apply_sin(instruction_ref ins)
{
auto&& op = any_cast<sin>(ins->op);
prog->replace_instruction(ins, cpu_unary<sin_op>{}, ins->arguments);
}
void apply_cos(instruction_ref ins)
{
auto&& op = any_cast<cos>(ins->op);
prog->replace_instruction(ins, cpu_unary<cos_op>{}, ins->arguments);
}
void apply_tan(instruction_ref ins)
{
auto&& op = any_cast<tan>(ins->op);
prog->replace_instruction(ins, cpu_unary<tan_op>{}, ins->arguments);
}
};
std::string cpu_target::name() const { return "cpu"; }
......
......@@ -60,6 +60,7 @@ function(add_test_executable TEST_NAME)
add_dependencies(check ${TEST_NAME})
set_tests_properties(${TEST_NAME} PROPERTIES FAIL_REGULAR_EXPRESSION "FAILED")
target_link_libraries(${TEST_NAME} rtg)
target_link_libraries(${TEST_NAME} rtg_cpu)
endfunction(add_test_executable)
file(GLOB TESTS *.cpp)
......
#include <cassert>
#include <iostream>
#include <vector>
#include <rtg/literal.hpp>
#include <rtg/operators.hpp>
#include <rtg/cpu/cpu_target.hpp>
using rtg::shape;
using rtg::argument;
void exp_test() {
rtg::program p;
rtg::shape s{rtg::shape::float_type, {3}};
auto l = p.add_literal(rtg::literal{s, {-1,0,1}});
p.add_instruction(rtg::exp{}, l);
p.compile(rtg::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
memcpy(results_vector.data(), result.data(), 3*sizeof(float));
std::vector<float> gold = {0.36787944f,1.f,2.71828183f};
float tol = 1e-8;
for (int i = 0; i < results_vector.size(); i++) {
assert(std::abs(results_vector[i]-gold[i]) < tol);
}
}
void gemm_test() {
rtg::program p;
std::vector<float> A = {-0.00925222, 0.56250403, 0.70107397, 0.75402161, -0.505885 ,
1.33628943, -0.11413 , -0.31270559, 1.59336732, -0.19361027,
-0.91620867, 0.40108416, -0.06969921, 0.68483471, -0.39906632,
-1.66423624, 0.69040076, -1.31490171, -0.11282616, -0.79391814};
std::vector<float> B = { 6.09568541e-01, -6.10527007e-01, 3.66646462e-01,
1.18951101e-01, 5.58777432e-01, -3.21296298e-01,
-5.95997198e-01, -5.01425721e-01, -2.84606807e-01,
-5.73673557e-01, -8.99430260e-01, -4.25103093e-01,
1.53027987e+00, -3.81407415e-04, -3.29650255e-01};
std::vector<float> C = {-1.56327541e+00, -7.09570140e-01, -5.37424982e-01,
-2.22994831e-01, -2.15586437e+00, 2.09177941e-03,
-1.47279677e+00, 2.02627040e-01, -6.04527691e-01,
-1.29885596e+00, 2.16294914e+00, -1.48101497e-01};
rtg::shape a_shape{rtg::shape::float_type, {4,5}};
auto a = p.add_literal(rtg::literal{a_shape, A});
rtg::shape b_shape{rtg::shape::float_type, {5,3}};
auto b = p.add_literal(rtg::literal{b_shape, B});
p.add_instruction(rtg::gemm{}, a, b);
p.compile(rtg::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(12);
memcpy(results_vector.data(), result.data(), 12*sizeof(float));
float tol = 1e-6;
for (int i = 0; i < results_vector.size(); i++) {
assert(std::abs(results_vector[i]-C[i]) < tol);
}
}
int main()
{
exp_test();
gemm_test();
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment