Unverified Commit 65c5581f authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Merge branch 'master' into identity

parents 453fa37a f04a3ba6
......@@ -17,7 +17,7 @@ else()
set(CMAKE_INSTALL_PREFIX "/opt/rocm" CACHE PATH "")
endif()
project(migraph)
project(migraphx)
find_package(ROCM REQUIRED)
include(ROCMSetupVersion)
......
def rocmtestnode(variant, name, body) {
def image = 'migraphlib'
def image = 'migraphxlib'
def cmake_build = { compiler, flags ->
def cmd = """
ulimit -c unlimited
......
# MIGraph
# AMD MIGraphX
AMD's library for graph optimizations.
AMD's graph optimization engine.
## Prerequisites
* [ROCm cmake modules](https://github.com/RadeonOpenCompute/rocm-cmake) **required**
......@@ -20,7 +20,7 @@ cmake -P install_deps.cmake --prefix /some/local/dir
```
## Building MIGraph from source
## Building MIGraphX from source
## Configuring with cmake
......@@ -105,8 +105,8 @@ Also, githooks can be installed to format the code per-commit:
The easiest way to setup the development environment is to use docker. You can build the top-level docker file:
docker build -t migraph .
docker build -t migraphx .
Then to enter the developement environment use `docker run`:
docker run --device='/dev/kfd' --device='/dev/dri' -v=`pwd`:/data -w /data --group-add video -it migraph
docker run --device='/dev/kfd' --device='/dev/dri' -v=`pwd`:/data -w /data --group-add video -it migraphx
......@@ -19,7 +19,7 @@ add_doxygen_doc(
CALL_GRAPH YES
CALLER_GRAPH YES
BUILTIN_STL_SUPPORT YES
PROJECT_NAME MIGraph
PROJECT_NAME MIGraphX
SORT_MEMBERS_CTORS_1ST YES
SOURCE_BROWSER YES
GENERATE_TREEVIEW YES
......
# -*- coding: utf-8 -*-
#
# MIGraph documentation build configuration file, created by
# MIGraphX documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 19 11:38:13 2018.
#
# This file is execfile()d with the current directory set to its
......@@ -45,7 +45,7 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'MIGraph'
project = u'MIGraphX'
copyright = u'2018, AMD'
author = u'AMD'
......@@ -101,7 +101,7 @@ html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MIGraphdoc'
htmlhelp_basename = 'MIGraphXdoc'
# -- Options for LaTeX output ---------------------------------------------
......@@ -128,7 +128,7 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MIGraph.tex', u'MIGraph Documentation',
(master_doc, 'MIGraphX.tex', u'MIGraphX Documentation',
u'AMD', 'manual'),
]
......@@ -138,7 +138,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'migraph', u'MIGraph Documentation',
(master_doc, 'migraphx', u'MIGraphX Documentation',
[author], 1)
]
......@@ -149,13 +149,13 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MIGraph', u'MIGraph Documentation',
author, 'MIGraph', 'One line description of project.',
(master_doc, 'MIGraphX', u'MIGraphX Documentation',
author, 'MIGraphX', 'One line description of project.',
'Miscellaneous'),
]
breathe_default_members = ('members', 'undoc-members')
cpp_index_common_prefix = ['migraph::']
cpp_index_common_prefix = ['migraphx::']
default_role = 'any'
primary_domain = 'cpp'
......
.. MIGraph documentation master file, created by
.. MIGraphX documentation master file, created by
sphinx-quickstart on Thu Jul 19 11:38:13 2018.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to MIGraph's documentation!
===================================
Welcome to AMD MIGraphX's documentation!
========================================
.. toctree::
:maxdepth: 3
......
Overview
========
MIGraph provides an optimized execution engine for deep learning neural networks.
MIGraphX provides an optimized execution engine for deep learning neural networks.
Building a program
------------------
A program consists of a set of instructions to be executed when calling `eval <migraph::program::eval>`. Each instruction has an associated `operation <migraph::operation>` which represents the computation to be performed by the instruction.
A program consists of a set of instructions to be executed when calling `eval <migraphx::program::eval>`. Each instruction has an associated `operation <migraphx::operation>` which represents the computation to be performed by the instruction.
We can start by building a simple program to add two numbers together::
......@@ -15,11 +15,11 @@ We can start by building a simple program to add two numbers together::
instruction_ref two = p.add_literal(2);
p.add_instruction(add{}, one, two);
The `add_literal <migraph::program::add_literal>` function will add an instruction to the program to store a literal number. The `instruction_ref <migraph::instruction_ref>` is a reference to the instruction in the program, which can be used to compose the output of the instruction with another instruction.
The `add_literal <migraphx::program::add_literal>` function will add an instruction to the program to store a literal number. The `instruction_ref <migraphx::instruction_ref>` is a reference to the instruction in the program, which can be used to compose the output of the instruction with another instruction.
After creating the literals, we then create the instruction to add the numbers together. This is done by using the `add{} <migraph::add>` operation class along with the `instruction_ref <migraph::instruction_ref>` for the input arguments of the instruction.
After creating the literals, we then create the instruction to add the numbers together. This is done by using the `add{} <migraphx::add>` operation class along with the `instruction_ref <migraphx::instruction_ref>` for the input arguments of the instruction.
Finally, we can run this `program <migraph::program>` by compiling it for the cpu and then running it with `eval <migraph::program::eval>`::
Finally, we can run this `program <migraphx::program>` by compiling it for the cpu and then running it with `eval <migraphx::program::eval>`::
p.compile(cpu::target{});
argument result = p.eval({});
......@@ -43,7 +43,7 @@ Of course, this program will always produce the same value which is quite uninte
p.add_instruction(add{}, x, two);
p.compile(cpu::target{});
This adds a parameter of type ``int64``, and compiles it for the ``cpu``. To run the program, we need to pass the parameter to it when we call `eval <migraph::program::eval>`::
This adds a parameter of type ``int64``, and compiles it for the ``cpu``. To run the program, we need to pass the parameter to it when we call `eval <migraphx::program::eval>`::
argument result = p.eval({
{"x", literal{1}.get_argument()}
......@@ -52,12 +52,12 @@ This adds a parameter of type ``int64``, and compiles it for the ``cpu``. To run
This will print ``3``.
A parameter is given as an `argument <migraph::argument>`. In this case, the simplest way of creating an `argument <migraph::argument>` is from a `literal <migraph::literal>`.
A parameter is given as an `argument <migraphx::argument>`. In this case, the simplest way of creating an `argument <migraphx::argument>` is from a `literal <migraphx::literal>`.
Tensor data
-----------
In this example we are just creating numbers, but the `shape <migraph::shape>` class can describe multi-dimensional tensors. For example, we can build a simple network with convolution and relu::
In this example we are just creating numbers, but the `shape <migraphx::shape>` class can describe multi-dimensional tensors. For example, we can build a simple network with convolution and relu::
program p;
instruction_ref input = p.add_parameter("x", shape{shape::float_type, {1, 3, 32, 32}});
......@@ -65,7 +65,7 @@ In this example we are just creating numbers, but the `shape <migraph::shape>` c
instruction_ref conv = p.add_instruction(convolution{}, input, weights);
p.add_instruction(activation{"relu"}, conv);
Here we create two parameters for both the ``input`` and ``weights``. In the previous examples, we just created simple literals, however, most programs will take data from already allocated buffers(usually on the GPU). In this case, we can create `argument <migraph::argument>` objects directly from the pointers to the buffers::
Here we create two parameters for both the ``input`` and ``weights``. In the previous examples, we just created simple literals, however, most programs will take data from already allocated buffers(usually on the GPU). In this case, we can create `argument <migraphx::argument>` objects directly from the pointers to the buffers::
// Compile the program
p.compile(gpu::target{});
......@@ -77,12 +77,12 @@ Here we create two parameters for both the ``input`` and ``weights``. In the pre
argument weights_arg{shape{shape::float_type, {1, 3, 32, 32}}, weights};
p.eval({{"x", input_arg}, {"w", weights_arg}})
An `argument <migraph::argument>` can handle memory buffers from either the GPU or the CPU, but when running the `program <migraph::program>`, buffers should be allocated for the corresponding target. That is, when compiling for the CPU, the buffers should be allocated on the CPU, and when compiling for the GPU the buffers should be allocated on the GPU.
An `argument <migraphx::argument>` can handle memory buffers from either the GPU or the CPU, but when running the `program <migraphx::program>`, buffers should be allocated for the corresponding target. That is, when compiling for the CPU, the buffers should be allocated on the CPU, and when compiling for the GPU the buffers should be allocated on the GPU.
Importing from onnx
-------------------
A `program <migraph::program>` can be built directly from an onnx file, which makes it easier to use neural networks directly from other frameworks. In this case, there is an ``parse_onnx`` function::
A `program <migraphx::program>` can be built directly from an onnx file, which makes it easier to use neural networks directly from other frameworks. In this case, there is an ``parse_onnx`` function::
program p = parse_onnx("model.onnx");
p.compile(gpu::target{});
......
......@@ -4,27 +4,27 @@ Data types
shape
-----
.. doxygenstruct:: migraph::shape
.. doxygenstruct:: migraphx::shape
literal
-------
.. doxygenstruct:: migraph::literal
.. doxygenstruct:: migraphx::literal
argument
--------
.. doxygenstruct:: migraph::argument
.. doxygenstruct:: migraphx::argument
raw_data
--------
.. doxygenstruct:: migraph::raw_data
.. doxygenstruct:: migraphx::raw_data
.. doxygenfunction:: migraph::MIGRAPH_INLINE_NS::visit_all
.. doxygenfunction:: migraphx::MIGRAPH_INLINE_NS::visit_all
tensor_view
-----------
.. doxygenstruct:: migraph::tensor_view
.. doxygenstruct:: migraphx::tensor_view
......@@ -4,7 +4,7 @@ Operators
operation
---------
.. doxygenstruct:: migraph::operation
.. doxygenstruct:: migraphx::operation
operators
---------
......
......@@ -4,44 +4,44 @@ Passes
pass
----
.. doxygenstruct:: migraph::pass
.. doxygenstruct:: migraphx::pass
dead_code_elimination
---------------------
.. doxygenstruct:: migraph::dead_code_elimination
.. doxygenstruct:: migraphx::dead_code_elimination
common_subexpression_elimination
--------------------------------
.. doxygenstruct:: migraph::common_subexpression_elimination
.. doxygenstruct:: migraphx::common_subexpression_elimination
constant_propagate
------------------
.. doxygenstruct:: migraph::constant_propagate
.. doxygenstruct:: migraphx::constant_propagate
eliminate_concat
----------------
.. doxygenstruct:: migraph::eliminate_concat
.. doxygenstruct:: migraphx::eliminate_concat
eliminate_contiguous
--------------------
.. doxygenstruct:: migraph::eliminate_contiguous
.. doxygenstruct:: migraphx::eliminate_contiguous
fwd_conv_batchnorm_rewrite
--------------------------
.. doxygenstruct:: migraph::fwd_conv_batchnorm_rewrite
.. doxygenstruct:: migraphx::fwd_conv_batchnorm_rewrite
simplify_algebra
----------------
.. doxygenstruct:: migraph::simplify_algebra
.. doxygenstruct:: migraphx::simplify_algebra
simplify_reshapes
-----------------
.. doxygenstruct:: migraph::simplify_reshapes
.. doxygenstruct:: migraphx::simplify_reshapes
......@@ -4,21 +4,21 @@ Program
instruction
-----------
.. doxygenstruct:: migraph::instruction
.. doxygenstruct:: migraphx::instruction
instruction_ref
---------------
.. cpp:type:: migraph::instruction_ref
.. cpp:type:: migraphx::instruction_ref
References an instruction in the program.
program
-------
.. doxygenstruct:: migraph::program
.. doxygenstruct:: migraphx::program
parse_onnx
----------
.. doxygenfunction:: migraph::MIGRAPH_INLINE_NS::parse_onnx
.. doxygenfunction:: migraphx::MIGRAPH_INLINE_NS::parse_onnx
......@@ -4,15 +4,15 @@ Targets
target
------
.. doxygenstruct:: migraph::target
.. doxygenstruct:: migraphx::target
gpu::target
-----------
.. doxygenstruct:: migraph::gpu::target
.. doxygenstruct:: migraphx::gpu::target
cpu::target
-----------
.. doxygenstruct:: migraph::cpu::target
.. doxygenstruct:: migraphx::cpu::target
......@@ -2,7 +2,7 @@
include(ROCMInstallTargets)
include(ROCMPackageConfigHelpers)
add_library(migraph
add_library(migraphx
auto_contiguous.cpp
common_subexpression_elimination.cpp
constant_propagate.cpp
......@@ -21,16 +21,16 @@ add_library(migraph
opt/memory_coloring.cpp
opt/memory_coloring_impl.cpp
)
rocm_clang_tidy_check(migraph)
rocm_clang_tidy_check(migraphx)
rocm_install_targets(
TARGETS migraph
TARGETS migraphx
INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/include
)
find_path(HALF_INCLUDE_DIR half.hpp)
# TODO: Fix the incorrect path
target_include_directories(migraph SYSTEM PUBLIC $<BUILD_INTERFACE:${HALF_INCLUDE_DIR}>)
target_include_directories(migraphx SYSTEM PUBLIC $<BUILD_INTERFACE:${HALF_INCLUDE_DIR}>)
set(PACKAGE_DEPENDS)
......@@ -42,8 +42,8 @@ add_subdirectory(targets/gpu)
endif()
rocm_export_targets(
TARGETS migraph::migraph
NAMESPACE migraph::
TARGETS migraphx::migraphx
NAMESPACE migraphx::
DEPENDS
${PACKAGE_DEPENDS}
)
......
#include <migraph/auto_contiguous.hpp>
#include <migraph/program.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/iterator_for.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/iterator_for.hpp>
namespace migraph {
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
void auto_contiguous::apply(program& p) const
......@@ -21,4 +21,4 @@ void auto_contiguous::apply(program& p) const
}
} // namespace MIGRAPH_INLINE_NS
} // namespace migraph
} // namespace migraphx
#include <migraph/common_subexpression_elimination.hpp>
#include <migraph/program.hpp>
#include <migraph/instruction.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/ranges.hpp>
#include <migraph/functional.hpp>
#include <migraphx/common_subexpression_elimination.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/functional.hpp>
#include <unordered_set>
namespace migraph {
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
template <class Range>
......@@ -36,4 +36,4 @@ void cse_range(program& p, Range&& r)
void common_subexpression_elimination::apply(program& p) const { cse_range(p, iterator_for(p)); }
} // namespace MIGRAPH_INLINE_NS
} // namespace migraph
} // namespace migraphx
#include <migraph/constant_propagate.hpp>
#include <migraph/program.hpp>
#include <migraph/matcher.hpp>
#include <migraph/literal.hpp>
#include <migraphx/constant_propagate.hpp>
#include <migraphx/program.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/literal.hpp>
namespace migraph {
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
struct match_const_add
......@@ -27,4 +27,4 @@ struct match_const_add
void constant_propagate::apply(program& p) const { match::find_matches(p, match_const_add{}); }
} // namespace MIGRAPH_INLINE_NS
} // namespace migraph
} // namespace migraphx
#include <migraph/dead_code_elimination.hpp>
#include <migraph/program.hpp>
#include <migraph/instruction.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/functional.hpp>
#include <migraph/ranges.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/functional.hpp>
#include <migraphx/ranges.hpp>
namespace migraph {
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
template <class Range, class Iterator>
......@@ -63,4 +63,4 @@ void dead_code_elimination::apply(program& p) const
}
} // namespace MIGRAPH_INLINE_NS
} // namespace migraph
} // namespace migraphx
#include <migraph/eliminate_allocation.hpp>
#include <migraph/program.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/ranges.hpp>
#include <migraph/stringutils.hpp>
#include <migraph/pass_config.hpp>
#include <migraphx/eliminate_allocation.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/pass_config.hpp>
namespace migraph {
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
void eliminate_allocation::apply(program& p) const
......@@ -38,4 +38,4 @@ void eliminate_allocation::apply(program& p) const
}
} // namespace MIGRAPH_INLINE_NS
} // namespace migraph
} // namespace migraphx
#include <iterator>
#include <migraph/eliminate_concat.hpp>
#include <migraph/program.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/dfor.hpp>
#include <migraphx/eliminate_concat.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/dfor.hpp>
namespace migraph {
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
void eliminate_concat::apply(program& p) const
{
......@@ -56,16 +56,16 @@ void eliminate_concat::apply(program& p) const
std::size_t offset = 0;
for(auto x : allocations)
{
migraph::op::load op{x->get_shape(), offset};
// migraph::op::load op{x->get_shape(), 0};
migraphx::op::load op{x->get_shape(), offset};
// migraphx::op::load op{x->get_shape(), 0};
p.replace_instruction(x, op, {super});
offset += x->get_shape().bytes();
}
std::vector<instruction_ref> args = {super};
std::copy(ins->inputs().begin(), ins->inputs().end() - 1, std::back_inserter(args));
p.replace_instruction(ins, migraph::op::identity{}, args);
p.replace_instruction(ins, migraphx::op::identity{}, args);
}
}
}
} // namespace MIGRAPH_INLINE_NS
} // namespace migraph
} // namespace migraphx
#include <migraph/eliminate_contiguous.hpp>
#include <migraph/program.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/ranges.hpp>
#include <migraph/stringutils.hpp>
#include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <utility>
namespace migraph {
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
bool try_compute_shape(const operation& op, const std::vector<instruction_ref>& args)
......@@ -48,4 +48,4 @@ void eliminate_contiguous::apply(program& p) const
}
} // namespace MIGRAPH_INLINE_NS
} // namespace migraph
} // namespace migraphx
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment