Commit 12e9c7d5 authored by Scott Thornton's avatar Scott Thornton
Browse files

Formatting

parent 056c6fe2
...@@ -38,7 +38,7 @@ std::vector<float> read_mnist_images(std::string full_path, int& number_of_image ...@@ -38,7 +38,7 @@ std::vector<float> read_mnist_images(std::string full_path, int& number_of_image
file.read(reinterpret_cast<char*>(&number_of_images), sizeof(number_of_images)); file.read(reinterpret_cast<char*>(&number_of_images), sizeof(number_of_images));
number_of_images = reverse_int(number_of_images); number_of_images = reverse_int(number_of_images);
file.read(reinterpret_cast<char*>(&n_rows), sizeof(n_rows)); file.read(reinterpret_cast<char*>(&n_rows), sizeof(n_rows));
n_rows = reverse_int(n_rows); n_rows = reverse_int(n_rows);
file.read(reinterpret_cast<char*>(&n_cols), sizeof(n_cols)); file.read(reinterpret_cast<char*>(&n_cols), sizeof(n_cols));
n_cols = reverse_int(n_cols); n_cols = reverse_int(n_cols);
...@@ -96,12 +96,13 @@ std::vector<int32_t> read_mnist_labels(std::string full_path, int& number_of_lab ...@@ -96,12 +96,13 @@ std::vector<int32_t> read_mnist_labels(std::string full_path, int& number_of_lab
} }
} }
std::vector<float> softmax(std::vector<float> p) { std::vector<float> softmax(std::vector<float> p)
{
size_t n = p.size(); size_t n = p.size();
std::vector<float> result(n); std::vector<float> result(n);
std::transform(p.begin(), p.end(), result.begin(), [] (auto x) {return std::exp(x);}); std::transform(p.begin(), p.end(), result.begin(), [](auto x) { return std::exp(x); });
float s = std::accumulate(result.begin(), result.end(), 0.0f, std::plus<float>()); float s = std::accumulate(result.begin(), result.end(), 0.0f, std::plus<float>());
std::transform(result.begin(), result.end(), result.begin(), [=] (auto x) {return x/s;}); std::transform(result.begin(), result.end(), result.begin(), [=](auto x) { return x / s; });
return result; return result;
} }
...@@ -123,15 +124,16 @@ int main(int argc, char const* argv[]) ...@@ -123,15 +124,16 @@ int main(int argc, char const* argv[])
auto s = migraph::shape{migraph::shape::float_type, {1, 1, 28, 28}}; auto s = migraph::shape{migraph::shape::float_type, {1, 1, 28, 28}};
std::cout << s << std::endl; std::cout << s << std::endl;
auto ptr = input.data(); auto ptr = input.data();
for (int i = 0; i < 20; i++) for(int i = 0; i < 20; i++)
{ {
std::cout << "label: " << labels[i] << " ----> "; std::cout << "label: " << labels[i] << " ----> ";
auto input3 = migraph::argument{s, &ptr[784*i]}; auto input3 = migraph::argument{s, &ptr[784 * i]};
auto result = prog.eval({{"Input3", input3}}); auto result = prog.eval({{"Input3", input3}});
std::vector<float> logits; std::vector<float> logits;
result.visit([&](auto output) { logits.assign(output.begin(), output.end()); }); result.visit([&](auto output) { logits.assign(output.begin(), output.end()); });
std::vector<float> probs = softmax(logits); std::vector<float> probs = softmax(logits);
for (auto x : probs) std::cout << x << " "; for(auto x : probs)
std::cout << x << " ";
std::cout << std::endl; std::cout << std::endl;
} }
std::cout << std::endl; std::cout << std::endl;
......
...@@ -621,7 +621,7 @@ int main() ...@@ -621,7 +621,7 @@ int main()
transpose_test(); transpose_test();
contiguous_test(); contiguous_test();
softmax_test(); softmax_test();
//maxpool_test(); // maxpool_test();
conv2d_test(); conv2d_test();
conv2d_padding_test(); conv2d_padding_test();
conv2d_padding_stride_test(); conv2d_padding_stride_test();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment