Commit 63387253 authored by charlie's avatar charlie
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into select_module_op

parents dd74a89a 962329f3
CheckOptions:
- key: bugprone-reserved-identifier.AllowedIdentifiers
value: '__HIP_PLATFORM_HCC__;__HIP_ROCclr__'
value: '__HIP_PLATFORM_AMD__;__HIP_ROCclr__'
- key: bugprone-unused-return-value.CheckedFunctions
value: '::std::async;::std::launder;::std::remove;::std::remove_if;::std::unique;::std::unique_ptr::release;::std::basic_string::empty;::std::vector::empty;::std::find;::std::find_if;::std::find_if_not;::std::all_of;::std::any_of;::std::none_of;::std::count;::std::count_if;::std::mismatch;::std::find_end;::std::find_first_of;::std::adjacent_find;::std::search;::std::search_n;::std::nth_element;::std::lower_bound;::std::upper_bound;::std::binary_search;::std::equal_range;::std::max;::std::max_element;::std::min;::std::min_element;::std::minmax;::std::minmax_element;::std::equal;::std::lexicographical_compare;::std::accumulate;::std::inner_product'
- key: cppcoreguidelines-macro-usage.AllowedRegexp
......
......@@ -95,7 +95,7 @@ endif()
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
include(EnableCompilerWarnings)
include(ROCMClangTidy)
if(CMAKE_CXX_COMPILER MATCHES ".*hcc" OR CMAKE_CXX_COMPILER MATCHES ".*clang\\+\\+")
if(CMAKE_CXX_COMPILER MATCHES ".*clang\\+\\+")
set(MIGRAPHX_TIDY_ERRORS ERRORS * -readability-inconsistent-declaration-parameter-name)
# Enable tidy on hip
elseif(MIGRAPHX_ENABLE_GPU)
......
......@@ -54,8 +54,9 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# add this for roctracer dependancies
RUN pip3 install CppHeaderParser packaging==22.0
RUN pip3 install CppHeaderParser
# Workaround broken rocm packages
RUN ln -s /opt/rocm-* /opt/rocm
......
......@@ -33,11 +33,9 @@ def rocmtestnode(Map conf) {
"""
echo cmd
sh cmd
if (compiler != "hcc") {
// Only archive from master or develop
if (env.BRANCH_NAME == "develop" || env.BRANCH_NAME == "master") {
archiveArtifacts artifacts: "build/*.deb", allowEmptyArchive: true, fingerprint: true
}
// Only archive from master or develop
if (env.BRANCH_NAME == "develop" || env.BRANCH_NAME == "master") {
archiveArtifacts artifacts: "build/*.deb", allowEmptyArchive: true, fingerprint: true
}
}
node(name) {
......
......@@ -76,7 +76,7 @@ function(py_add_module NAME)
)
endfunction()
set(PYTHON_SEARCH_VERSIONS 2.7 3.5 3.6 3.7 3.8 3.9)
set(PYTHON_SEARCH_VERSIONS 2.7 3.5 3.6 3.7 3.8 3.9 3.10)
set(PYTHON_DISABLE_VERSIONS "" CACHE STRING "")
foreach(PYTHON_DISABLE_VERSION ${PYTHON_DISABLE_VERSIONS})
list(REMOVE_ITEM PYTHON_SEARCH_VERSIONS ${PYTHON_DISABLE_VERSION})
......
......@@ -137,7 +137,7 @@ An `argument <migraphx::argument>` can handle memory buffers from either the GPU
By default when running the `program <migraphx::program>`, buffers are allocated on the corresponding target.
When compiling for the CPU, the buffers by default will be allocated on the CPU.
When compiling for the GPU, the buffers by default will be allocated on the GPU.
With the option ``offloaf_copy=true`` set while compiling for the GPU, the buffers will be located on the CPU.
With the option ``offload_copy=true`` set while compiling for the GPU, the buffers will be located on the CPU.
Importing From ONNX
......
......@@ -28,6 +28,10 @@ Enable implicit offload copying
Disable fast math optimization
.. option:: --exhaustive-tune
Perform an exhaustive search to find the fastest version of generated kernels for selected backend
.. option:: --fp16
Quantize for fp16
......
......@@ -215,13 +215,14 @@ program
:rtype: list[shape]
.. py:method:: compile(t, offload_copy=True, fast_math=True)
.. py:method:: compile(t, offload_copy=True, fast_math=True, exhaustive_tune=False)
Compiles the program for the target and optimizes it.
:param target t: This is the target to compile the program for.
:param bool offload_copy: For targets with offloaded memory(such as the gpu), this will insert instructions during compilation to copy the input parameters to the offloaded memory and to copy the final result from the offloaded memory back to main memory.
:param bool fast_math: Optimize math functions to use faster approximate versions. There may be slight accuracy degredation when enabled.
:param exhaustive_tune: Flag to enable exhaustive search to find the fastest version of generated kernels for selected backend.
.. py:method:: get_main_module()
......
......@@ -10,52 +10,53 @@ The MIGraphX driver is installed with MIGraphX and can be found in `/opt/rocm/bi
See below for a comprehensive list of commands and option arguments, as well as some usage examples.
### Commands
| Command | Description |
| --- | ---|
| op | When followed by the option --list or -l, prints all operators of MIGraphX |
| params | Prints the input and output parameter shapes |
| run | Compiles, allocates parameters, evaluates, and prints input graph |
| read | Loads and prints input graph |
| compile | Compiles and prints input graph |
| verify | Runs reference and GPU implementations and checks outputs for consistency |
| perf | Compiles and runs input graph then prints performance report |
| Command | Description |
| ------- | -------------------------------------------------------------------------- |
| op | When followed by the option --list or -l, prints all operators of MIGraphX |
| params | Prints the input and output parameter shapes |
| run | Compiles, allocates parameters, evaluates, and prints input graph |
| read | Loads and prints input graph |
| compile | Compiles and prints input graph |
| verify | Runs reference and GPU implementations and checks outputs for consistency |
| perf | Compiles and runs input graph then prints performance report |
### Options
| Option | Description |
| --- | --- |
| --help \| -h | Show help |
| --model <resnet50\|inceptionv3\|alexnet> | Loads one of the three default models |
| --onnx | Load file as onnx graph |
| --tf | Load file as a tensorflow graph |
| --migraphx | Load file as a migraphx graph |
| --migraphx-json | Load file as a migraphx JSON graph |
| --batch | Set batch size for the model |
| --nhwc | Treat tensorflow format as nhwc |
| --nchw | Treat tensorflow format as nchw |
| --skip-unknown-operators | Skip unknown operators when parsing and continue to parse |
| --trim \| -t | Trim instructions from the end |
| --optimize \| -O | Optimize when reading |
| --graphviz \| -g | Print out a graphviz representation |
| --brief | Make the output brief |
| --cpp | Print out the program as cpp program |
| --json | Print out program as json |
| --text | Print out program in text format |
| --binary | Print out program in binary format |
| --output \| -o | Output to file |
| --fill0 | Fill parameter with 0s |
| --fill1 | Fill parameter with 1s |
| --gpu | Compile on the gpu |
| --cpu | Compile on the cpu |
| --ref | Compile on the reference implementation |
| --enable-offload-copy | Enable implicit offload copying |
| --disable-fast-math | Disable fast math optimization |
| --fp16 | Quantize for fp16 |
| --int8 | Quantize for int8 |
| --tolerance | Tolerance for errors |
| --per-instruction \| -i | Verify each instruction |
| --reduce \| -r | Reduce program and verify |
| --iterations \| -n | Number of iterations to run for perf report |
| --list \| -l | List all the operators of MIGraphX |
| Option | Description |
| ---------------------------------------- | --------------------------------------------------------- |
| --help \| -h | Show help |
| --model <resnet50\|inceptionv3\|alexnet> | Loads one of the three default models |
| --onnx | Load file as onnx graph |
| --tf | Load file as a tensorflow graph |
| --migraphx | Load file as a migraphx graph |
| --migraphx-json | Load file as a migraphx JSON graph |
| --batch | Set batch size for the model |
| --nhwc | Treat tensorflow format as nhwc |
| --nchw | Treat tensorflow format as nchw |
| --skip-unknown-operators | Skip unknown operators when parsing and continue to parse |
| --trim \| -t | Trim instructions from the end |
| --optimize \| -O | Optimize when reading |
| --graphviz \| -g | Print out a graphviz representation |
| --brief | Make the output brief |
| --cpp | Print out the program as cpp program |
| --json | Print out program as json |
| --text | Print out program in text format |
| --binary | Print out program in binary format |
| --output \| -o | Output to file |
| --fill0 | Fill parameter with 0s |
| --fill1 | Fill parameter with 1s |
| --gpu | Compile on the gpu |
| --cpu | Compile on the cpu |
| --ref | Compile on the reference implementation |
| --enable-offload-copy | Enable implicit offload copying |
| --disable-fast-math | Disable fast math optimization |
| --exhaustive-tune | Enable exhaustive search to find fastest kernel |
| --fp16 | Quantize for fp16 |
| --int8 | Quantize for int8 |
| --tolerance | Tolerance for errors |
| --per-instruction \| -i | Verify each instruction |
| --reduce \| -r | Reduce program and verify |
| --iterations \| -n | Number of iterations to run for perf report |
| --list \| -l | List all the operators of MIGraphX |
## Usage Examples
The examples below supply a simple MNIST ConvNet as the input graph. Models of higher complexity will have considerably larger outputs in most cases.
......
#!/usr/bin/cmake -P
#####################################################################################
# ####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
......@@ -22,9 +22,10 @@
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
# ####################################################################################
set(ARGS)
foreach(i RANGE 3 ${CMAKE_ARGC})
list(APPEND ARGS ${CMAKE_ARGV${i}})
endforeach()
......@@ -38,18 +39,19 @@ set(multiValueArgs)
cmake_parse_arguments(PARSE "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGS})
if(PARSE_help)
message("Usage: install_deps.cmake [options] [cmake-args]")
message("")
message("Options:")
message(" --prefix Set the prefix to install the dependencies.")
message("")
message("Commands:")
message(" help Show this message and exit.")
message("")
return()
message("Usage: install_deps.cmake [options] [cmake-args]")
message("")
message("Options:")
message(" --prefix Set the prefix to install the dependencies.")
message("")
message("Commands:")
message(" help Show this message and exit.")
message("")
return()
endif()
set(_PREFIX /usr/local)
if(PARSE_--prefix)
set(_PREFIX ${PARSE_--prefix})
endif()
......@@ -63,21 +65,23 @@ if(NOT CMakeGet_FOUND)
file(DOWNLOAD https://raw.githubusercontent.com/pfultz2/cmake-get/master/install.cmake ${FILENAME} STATUS RESULT_LIST)
list(GET RESULT_LIST 0 RESULT)
list(GET RESULT_LIST 1 RESULT_MESSAGE)
if(NOT RESULT EQUAL 0)
message(FATAL_ERROR "Download for install.cmake failed: ${RESULT_MESSAGE}")
endif()
execute_process(COMMAND ${CMAKE_COMMAND} -P ${FILENAME} ${PREFIX})
file(REMOVE ${FILENAME})
find_package(CMakeGet REQUIRED PATHS ${PREFIX})
endif()
# Set compiler to hcc if not set
# Set compiler to clang++ if not set
if(NOT DEFINED ENV{CXX} AND NOT DEFINED CMAKE_CXX_COMPILER AND NOT DEFINED CMAKE_TOOLCHAIN_FILE)
find_program(HCC hcc PATHS /opt/rocm PATH_SUFFIXES bin)
if(HCC)
set(ENV{CXX} ${HCC})
find_program(CLANG clang++ PATHS /opt/rocm /opt/rocm/llvm PATH_SUFFIXES bin)
if(CLANG)
set(ENV{CXX} ${CLANG})
else()
message(FATAL_ERROR "Cannot find hcc")
message(FATAL_ERROR "Cannot find clang++")
endif()
endif()
......
......@@ -134,6 +134,11 @@ void set_offload_copy(compile_options& options, bool value) { options.offload_co
void set_fast_math(compile_options& options, bool value) { options.fast_math = value; }
void set_exhaustive_tune_flag(compile_options& options, bool value)
{
options.exhaustive_tune = value;
}
void set_file_format(file_options& options, const char* format) { options.format = format; }
void set_default_dim_value(onnx_options& options, size_t value)
......@@ -1690,6 +1695,19 @@ migraphx_compile_options_set_fast_math(migraphx_compile_options_t compile_option
return api_error_result;
}
extern "C" migraphx_status
migraphx_compile_options_set_exhaustive_tune_flag(migraphx_compile_options_t compile_options,
bool value)
{
auto api_error_result = migraphx::try_([&] {
if(compile_options == nullptr)
MIGRAPHX_THROW(migraphx_status_bad_param,
"Bad parameter compile_options: Null pointer");
migraphx::set_exhaustive_tune_flag((compile_options->object), (value));
});
return api_error_result;
}
extern "C" migraphx_status
migraphx_parse_onnx(migraphx_program_t* out, const char* name, migraphx_onnx_options_t options)
{
......
......@@ -427,6 +427,10 @@ migraphx_compile_options_set_offload_copy(migraphx_compile_options_t compile_opt
migraphx_status migraphx_compile_options_set_fast_math(migraphx_compile_options_t compile_options,
bool value);
migraphx_status
migraphx_compile_options_set_exhaustive_tune_flag(migraphx_compile_options_t compile_options,
bool value);
migraphx_status
migraphx_parse_onnx(migraphx_program_t* out, const char* name, migraphx_onnx_options_t options);
......
......@@ -1015,6 +1015,12 @@ struct compile_options : MIGRAPHX_HANDLE_BASE(compile_options)
{
call(&migraphx_compile_options_set_fast_math, this->get_handle_ptr(), value);
}
/// Set or un-set exhaustive search to find fastest kernel
void set_exhaustive_tune_flag(bool value = true)
{
call(&migraphx_compile_options_set_exhaustive_tune_flag, this->get_handle_ptr(), value);
}
};
/// A program represents the all computation graphs to be compiled and executed
......
......@@ -354,6 +354,9 @@ def compile_options(h):
h.method('set_fast_math',
api.params(value='bool'),
invoke='migraphx::set_fast_math($@)')
h.method('set_exhaustive_tune_flag',
api.params(value='bool'),
invoke='migraphx::set_exhaustive_tune_flag($@)')
api.add_function('migraphx_parse_onnx',
......
......@@ -70,9 +70,6 @@ std::vector<char> src_compiler::compile(const std::vector<src_file>& srcs) const
if(not fs::exists(out_path))
MIGRAPHX_THROW("Output file missing: " + out);
if(process)
out_path = process(out_path);
return read_buffer(out_path.string());
}
......
......@@ -326,8 +326,7 @@ struct compiler
loader l;
program_params parameters;
compiler_target ct;
bool offload_copy = false;
bool fast_math = true;
compile_options co;
precision quantize = precision::fp32;
std::vector<std::string> fill0;
......@@ -337,19 +336,26 @@ struct compiler
l.parse(ap);
parameters.parse(ap);
ct.parse(ap);
ap(offload_copy,
ap(co.offload_copy,
{"--enable-offload-copy"},
ap.help("Enable implicit offload copying"),
ap.set_value(true));
ap(fast_math,
ap(co.fast_math,
{"--disable-fast-math"},
ap.help("Disable fast math optimization"),
ap.set_value(false));
ap(co.exhaustive_tune,
{"--exhaustive-tune"},
ap.help("Exhastively search for best tuning parameters for kernels"),
ap.set_value(true));
ap(quantize, {"--fp16"}, ap.help("Quantize for fp16"), ap.set_value(precision::fp16));
ap(quantize, {"--int8"}, ap.help("Quantize for int8"), ap.set_value(precision::int8));
}
auto params(const program& p) { return parameters.generate(p, ct.get_target(), offload_copy); }
auto params(const program& p)
{
return parameters.generate(p, ct.get_target(), co.offload_copy);
}
program compile()
{
......@@ -366,10 +372,7 @@ struct compiler
{
quantize_int8(p, t, {params(p)});
}
compile_options options;
options.offload_copy = offload_copy;
options.fast_math = fast_math;
p.compile(t, options);
p.compile(t, co);
l.save(p);
return p;
}
......@@ -402,60 +405,41 @@ struct params : command<params>
struct verify : command<verify>
{
loader l;
program_params parameters;
compiler_target ct;
compiler c;
double tolerance = 80;
bool per_instruction = false;
bool reduce = false;
bool offload_copy = false;
bool fast_math = true;
precision quantize = precision::fp32;
void parse(argument_parser& ap)
{
l.parse(ap);
parameters.parse(ap);
ct.parse(ap);
ap(offload_copy,
{"--enable-offload-copy"},
ap.help("Enable implicit offload copying"),
ap.set_value(true));
ap(fast_math,
{"--disable-fast-math"},
ap.help("Disable fast math optimization"),
ap.set_value(false));
c.parse(ap);
ap(tolerance, {"--tolerance"}, ap.help("Tolerance for errors"));
ap(per_instruction,
{"-i", "--per-instruction"},
ap.help("Verify each instruction"),
ap.set_value(true));
ap(reduce, {"-r", "--reduce"}, ap.help("Reduce program and verify"), ap.set_value(true));
ap(quantize, {"--fp16"}, ap.help("Quantize for fp16"), ap.set_value(precision::fp16));
}
void run()
{
auto p = l.load();
l.save(p);
auto p = c.l.load();
c.l.save(p);
std::cout << p << std::endl;
compile_options options;
options.offload_copy = offload_copy;
options.fast_math = fast_math;
auto t = ct.get_target();
auto m = parameters.generate(p, t, true);
auto t = c.ct.get_target();
auto m = c.parameters.generate(p, t, true);
if(per_instruction)
{
verify_instructions(p, t, options, quantize, tolerance);
verify_instructions(p, t, c.co, c.quantize, tolerance);
}
else if(reduce)
{
verify_reduced_program(p, t, options, quantize, m, tolerance);
verify_reduced_program(p, t, c.co, c.quantize, m, tolerance);
}
else
{
verify_program(l.file, p, t, options, quantize, m, tolerance);
verify_program(c.l.file, p, t, c.co, c.quantize, m, tolerance);
}
}
};
......
......@@ -108,8 +108,6 @@ target get_target(bool gpu)
return make_target("cpu");
}
void compile_program(program& p, bool gpu) { p.compile(get_target(gpu)); }
} // namespace MIGRAPHX_INLINE_NS
} // namespace driver
} // namespace migraphx
......@@ -37,7 +37,6 @@ parameter_map create_param_map(const program& p, const target& t, bool offload =
parameter_map fill_param_map(parameter_map& m, const program& p, bool gpu);
parameter_map create_param_map(const program& p, bool gpu = true);
target get_target(bool gpu);
void compile_program(program& p, bool gpu = true);
} // namespace MIGRAPHX_INLINE_NS
} // namespace driver
......
......@@ -32,8 +32,9 @@ inline namespace MIGRAPHX_INLINE_NS {
struct compile_options
{
bool offload_copy = false;
bool fast_math = true;
bool offload_copy = false;
bool fast_math = true;
bool exhaustive_tune = false;
tracer trace{};
};
......
......@@ -66,6 +66,7 @@ any_ptr get_queue_context(T&)
{
return {};
}
template <class T>
void wait_for_context(T&, any_ptr)
{
......@@ -412,6 +413,7 @@ inline const ValueType& any_cast(const context& x)
#endif
inline void migraphx_to_value(value& v, const context& ctx) { v = ctx.to_value(); }
inline void migraphx_from_value(const value& v, context& ctx) { ctx.from_value(v); }
#endif
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment