"docs/vscode:/vscode.git/clone" did not exist on "8267c7844504b55366525169187767ef92d1f499"
Commit 11e155c2 authored by Paul's avatar Paul
Browse files

Merge

parents 8a9c5bce aa7ff911
......@@ -4,7 +4,7 @@ CheckOptions:
- key: bugprone-unused-return-value.CheckedFunctions
value: '::std::async;::std::launder;::std::remove;::std::remove_if;::std::unique;::std::unique_ptr::release;::std::basic_string::empty;::std::vector::empty;::std::find;::std::find_if;::std::find_if_not;::std::all_of;::std::any_of;::std::none_of;::std::count;::std::count_if;::std::mismatch;::std::find_end;::std::find_first_of;::std::adjacent_find;::std::search;::std::search_n;::std::nth_element;::std::lower_bound;::std::upper_bound;::std::binary_search;::std::equal_range;::std::max;::std::max_element;::std::min;::std::min_element;::std::minmax;::std::minmax_element;::std::equal;::std::lexicographical_compare;::std::accumulate;::std::inner_product'
- key: cppcoreguidelines-macro-usage.AllowedRegexp
value: 'DEBUG|FALLTHROUGH|STRINGIZE|_HAS_|_THROW|_REQUIRES|_DECLARE_|_VISIT_|_REGISTER_|_GENERATE_|_DETAIL_|_TIDY_|_MANAGE_PTR|_MATCHER|DEVICE_SHARED|_WORKAROUND_'
value: 'DEBUG|ASSERT|ASSUME|UNREACHABLE|FALLTHROUGH|DEPRECATED|STRINGIZE|_HAS_|_THROW|_REQUIRES|_DECLARE_|_VISIT_|_REGISTER_|_GENERATE_|_DETAIL_|_TIDY_|_MANAGE_PTR|_MATCHER|DEVICE_SHARED|_WORKAROUND_'
- key: modernize-loop-convert.MinConfidence
value: risky
- key: modernize-loop-convert.NamingStyle
......
......@@ -4,7 +4,7 @@
# are installed, and if so, uses the installed version to format
# the staged changes.
base=clang-format-5.0
base=clang-format-10
format=""
yapf_base=yapf
yapf_format=""
......
......@@ -14,6 +14,9 @@ jobs:
runs-on: ubuntu-18.04
steps:
- name: Free space
run: sudo rm -rf /usr/local/android /usr/share/dotnet /usr/local/share/boost /opt/ghc /usr/local/share/chrom* /usr/share/swift /usr/local/julia* /usr/local/lib/android
- uses: actions/checkout@v2
# In this step, this action saves a list of existing images,
......@@ -60,6 +63,8 @@ jobs:
runs-on: ubuntu-18.04
steps:
- name: Free space
run: sudo rm -rf /usr/local/android /usr/share/dotnet /usr/local/share/boost /opt/ghc /usr/local/share/chrom* /usr/share/swift /usr/local/julia* /usr/local/lib/android
- uses: actions/checkout@v2
# In this step, this action saves a list of existing images,
......@@ -103,6 +108,8 @@ jobs:
runs-on: ubuntu-18.04
steps:
- name: Free space
run: sudo rm -rf /usr/local/android /usr/share/dotnet /usr/local/share/boost /opt/ghc /usr/local/share/chrom* /usr/share/swift /usr/local/julia* /usr/local/lib/android
- uses: actions/checkout@v2
# In this step, this action saves a list of existing images,
......@@ -127,7 +134,7 @@ jobs:
-o -iname '*.cpp.in' \
-o -iname '*.cl' \
| grep -v 'build/' \
| xargs -n 1 -P 1 -I{} -t sh -c 'clang-format-5.0 -style=file {} | diff - {}'
| xargs -n 1 -P 1 -I{} -t sh -c 'clang-format-10 -style=file {} | diff - {}'
find . -iname '*.py' \
| grep -v 'build/' \
| xargs -n 1 -P 1 -I{} -t sh -c 'yapf {} | diff - {}'
......@@ -136,17 +143,21 @@ jobs:
runs-on: ubuntu-18.04
steps:
- name: Free space
run: sudo rm -rf /usr/local/android /usr/share/dotnet /usr/local/share/boost /opt/ghc /usr/local/share/chrom* /usr/share/swift /usr/local/julia* /usr/local/lib/android
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.6
python-version: 3.8
- name: Install pyflakes
run: pip install pyflakes==2.3.1 mypy==0.931
run: pip install pyflakes==2.4.0 mypy==0.931
- name: Run pyflakes
run: |
pyflakes --version
pyflakes examples/ tools/ src/ test/ doc/
mypy --version
mypy tools/api.py
linux:
......@@ -171,6 +182,8 @@ jobs:
- codecov
steps:
- name: Free space
run: sudo rm -rf /usr/local/android /usr/share/dotnet /usr/local/share/boost /opt/ghc /usr/local/share/chrom* /usr/share/swift /usr/local/julia* /usr/local/lib/android
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
......
*.swp
#==============================================================================#
# File extensions to be ignored anywhere in the tree.
#==============================================================================#
# Temp files created by most text editors
*~
# Merge files created by git
*.orig
# Byte compiled python modules
*.pyc
*.pyd
# Vim swap files
.*.sw?
.sw?
# Visual Studio
.vs
/.vscode/*
# Sublime Text settings
*.sublime-workspace
*.sublime-project
# Eclipse Project settings
*.*project
.settings
# OS X specific files
.DS_store
#==============================================================================#
# Explicit files to ignore (only matches one).
#==============================================================================#
# Various tags
/tags
/TAGS
/GPATH
/GRTAGS
/GSYMS
/GTAGS
/ID
.gitusers
/compile_commands.json
/CMakeSettings.json
#==============================================================================#
# Directories to ignore (do not add trailing '/'s, they skip symlinks).
#==============================================================================#
# Nested build directory
/build*
# Downloaded models
test/onnx/models
# VS2017 and VSCode config files.
.vscode
.vs
......@@ -10,6 +10,12 @@ if( NOT MSVC_IDE AND NOT CMAKE_BUILD_TYPE )
set( CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." )
endif()
# Setup valid strings for build type
if (NOT CMAKE_CONFIGURATION_TYPES)
set(CMAKE_CONFIGURATION_TYPES "Debug;Release;RelWithDebInfo;MinSizeRel" CACHE STRING "Configs")
endif()
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${CMAKE_CONFIGURATION_TYPES})
# Default installation path
if(WIN32)
set(CMAKE_INSTALL_PREFIX "/opt/rocm/x86_64-w64-mingw32" CACHE PATH "")
......@@ -36,21 +42,15 @@ find_package(nlohmann_json 3.8.0 REQUIRED)
include(ROCMSetupVersion)
rocm_setup_version(VERSION 2.0)
rocm_setup_version(VERSION 2.3)
set(MIGRAPHX_SO_VERSION ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR})
option( BUILD_SHARED_LIBS "Build as a shared library" ON )
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.4")
message(FATAL_ERROR "MIGraph requires at least gcc 5.4")
endif()
endif()
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag("--cuda-host-only -x hip" HAS_HIP)
if(HAS_HIP)
message(STATUS "Enable miopen backend")
message(STATUS "Enable gpu backend")
set(MIGRAPHX_ENABLE_GPU On CACHE BOOL "")
else()
set(MIGRAPHX_ENABLE_GPU Off CACHE BOOL "")
......@@ -60,10 +60,12 @@ endif()
set(MIGRAPHX_ENABLE_CPU Off CACHE BOOL "")
set(CMAKE_CXX_STANDARD_DEFAULT "")
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-std=c++17)
add_compile_options(-std=c++17)
if(${CMAKE_VERSION} VERSION_LESS "3.12.0")
set(CONFIGURE_DEPENDS)
else()
add_compile_options(-std=c++14)
set(CONFIGURE_DEPENDS CONFIGURE_DEPENDS)
endif()
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
......@@ -91,11 +93,14 @@ rocm_enable_clang_tidy(
modernize-*
performance-*
readability-*
-bugprone-signed-char-misuse
-bugprone-easily-swappable-parameters
-bugprone-implicit-widening-of-multiplication-result
-bugprone-macro-parentheses
-bugprone-signed-char-misuse
# Disable the aliased reserved identifiers
-cert-dcl37-c
-cert-dcl51-cpp
-cert-err33-c
-cert-str34-c
# Disable all alpha checks by default
-clang-analyzer-alpha*
......@@ -125,6 +130,7 @@ rocm_enable_clang_tidy(
-cppcoreguidelines-pro-type-union-access
-cppcoreguidelines-pro-type-vararg
-cppcoreguidelines-special-member-functions
-cppcoreguidelines-virtual-class-destructor
-google-readability-*
-google-runtime-int
-google-runtime-references
......@@ -142,8 +148,10 @@ rocm_enable_clang_tidy(
-readability-convert-member-functions-to-static
-readability-else-after-return
-readability-function-cognitive-complexity
-readability-identifier-length
-readability-named-parameter
-readability-redundant-string-init
-readability-suspicious-call-argument
-readability-uppercase-literal-suffix
-*-avoid-c-arrays
-*-explicit-constructor
......@@ -176,7 +184,7 @@ rocm_enable_cppcheck(
style
performance
portability
SUPPRESS
SUPPRESS
ConfigurationNotChecked
unmatchedSuppression
unusedFunction
......@@ -195,6 +203,8 @@ rocm_enable_cppcheck(
useSmartPointer:*make_shared_array.hpp
constParameter:*src/targets/gpu/*.cpp
constParameter:*src/targets/gpu/*.hpp
# Suppress mlir_conv.cpp since this file will be deleted
*:*src/targets/gpu/mlir_conv.cpp
FORCE
INCONCLUSIVE
RULE_FILE
......@@ -214,6 +224,7 @@ rocm_enable_cppcheck(
CPPCHECK=1
__device__=
__host__=
__global__=
)
enable_testing()
......
FROM ubuntu:18.04
FROM ubuntu:20.04
ARG PREFIX=/usr/local
......@@ -6,26 +6,22 @@ ARG PREFIX=/usr/local
RUN dpkg --add-architecture i386
# Add rocm repository
RUN sh -c 'echo deb [arch=amd64 trusted=yes] http://repo.radeon.com/rocm/apt/4.5/ ubuntu main > /etc/apt/sources.list.d/rocm.list'
RUN sh -c 'echo deb [arch=amd64 trusted=yes] http://repo.radeon.com/rocm/apt/5.0.2/ ubuntu main > /etc/apt/sources.list.d/rocm.list'
# Install dependencies
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
apt-utils \
build-essential \
clang-format-5.0 \
clang-format-10 \
cmake \
curl \
doxygen \
g++-5 \
g++-7 \
gdb \
git \
lcov \
locales \
pkg-config \
python \
python-dev \
python-pip \
python3 \
python3-dev \
python3-pip \
......
......@@ -20,7 +20,7 @@ def rocmtestnode(Map conf) {
rm -rf build
mkdir build
cd build
CXX=${compiler} CXXFLAGS='-Werror' cmake -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ${flags} ..
CXX=${compiler} CXXFLAGS='-Werror' cmake -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ${flags} ..
make -j\$(nproc) generate all doc package check VERBOSE=1
"""
echo cmd
......@@ -112,17 +112,17 @@ rocmtest clang_debug: rocmnode('vega') { cmake_build ->
def debug_flags = "-g -O2 -fno-omit-frame-pointer -fsanitize=${sanitizers} -fno-sanitize-recover=${sanitizers}"
cmake_build("/opt/rocm/llvm/bin/clang++", "-DCMAKE_BUILD_TYPE=debug -DMIGRAPHX_ENABLE_PYTHON=Off -DMIGRAPHX_ENABLE_GPU=Off -DMIGRAPHX_ENABLE_CPU=On -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags}'")
}
}, clang_release_navi: rocmnode('navi21') { cmake_build ->
stage('HIP Clang Release Navi') {
cmake_build("/opt/rocm/llvm/bin/clang++", "-DCMAKE_BUILD_TYPE=release")
}
}
}//, clang_release_navi: rocmnode('navi21') { cmake_build ->
// stage('HIP Clang Release Navi') {
// cmake_build("/opt/rocm/llvm/bin/clang++", "-DCMAKE_BUILD_TYPE=release")
// }
//}
def onnxnode(name, body) {
return { label ->
rocmtestnode(variant: label, node: rocmnodename(name), docker_args: '-u root', body: body, pre: {
sh 'rm -rf ./build/*.deb'
unstash 'migraphx-package'
unstash 'migraphx-package'
})
}
}
......
......@@ -205,7 +205,7 @@ Depending on your setup `sudo` may be required for the pip install.
All the code is formatted using clang-format. To format a file, use:
```
clang-format-5.0 -style=file -i <path-to-source-file>
clang-format-10 -style=file -i <path-to-source-file>
```
Also, githooks can be installed to format the code per-commit:
......
################################################################################
#
# MIT License
#
# Copyright (c) 2017 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
include(CMakeParseArguments)
include(MainDoc)
find_program(DOXYGEN_EXECUTABLE NAMES doxygen
PATH_SUFFIXES bin
DOC "Doxygen documentation generator"
)
mark_as_advanced(DOXYGEN_EXECUTABLE)
find_path(DOT_EXECUTABLE NAMES dot
PATH_SUFFIXES bin
DOC "Graphviz"
)
mark_as_advanced(DOT_EXECUTABLE)
set(DOXYGEN_ARGS
ABBREVIATE_BRIEF
ALIASES
ALLEXTERNALS
ALLOW_UNICODE_NAMES
ALPHABETICAL_INDEX
ALWAYS_DETAILED_SEC
AUTOLINK_SUPPORT
BINARY_TOC
BRIEF_MEMBER_DESC
BUILTIN_STL_SUPPORT
CALLER_GRAPH
CALL_GRAPH
CASE_SENSE_NAMES
CHM_FILE
CHM_INDEX_ENCODING
CITE_BIB_FILES
CLANG_ASSISTED_PARSING
CLANG_OPTIONS
CLASS_DIAGRAMS
CLASS_GRAPH
COLLABORATION_GRAPH
COLS_IN_ALPHA_INDEX
COMPACT_LATEX
COMPACT_RTF
CPP_CLI_SUPPORT
CREATE_SUBDIRS
DIAFILE_DIRS
DIA_PATH
DIRECTORY_GRAPH
DISABLE_INDEX
DISTRIBUTE_GROUP_DOC
DOCBOOK_OUTPUT
DOCBOOK_PROGRAMLISTING
DOCSET_BUNDLE_ID
DOCSET_FEEDNAME
DOCSET_PUBLISHER_ID
DOCSET_PUBLISHER_NAME
DOTFILE_DIRS
DOT_CLEANUP
DOT_FONTNAME
DOT_FONTPATH
DOT_FONTSIZE
DOT_GRAPH_MAX_NODES
DOT_IMAGE_FORMAT
DOT_MULTI_TARGETS
DOT_NUM_THREADS
# DOT_PATH
DOT_TRANSPARENT
DOXYFILE_ENCODING
ECLIPSE_DOC_ID
ENABLED_SECTIONS
ENABLE_PREPROCESSING
ENUM_VALUES_PER_LINE
EXAMPLE_PATH
EXAMPLE_PATTERNS
EXAMPLE_RECURSIVE
EXCLUDE
EXCLUDE_PATTERNS
EXCLUDE_SYMBOLS
EXCLUDE_SYMLINKS
EXPAND_AS_DEFINED
EXPAND_ONLY_PREDEF
EXTENSION_MAPPING
EXTERNAL_GROUPS
EXTERNAL_PAGES
EXTERNAL_SEARCH
EXTERNAL_SEARCH_ID
EXTRACT_ALL
EXTRACT_ANON_NSPACES
EXTRACT_LOCAL_CLASSES
EXTRACT_LOCAL_METHODS
EXTRACT_PACKAGE
EXTRACT_PRIVATE
EXTRACT_STATIC
EXTRA_PACKAGES
EXTRA_SEARCH_MAPPINGS
EXT_LINKS_IN_WINDOW
FILE_PATTERNS
FILE_VERSION_FILTER
FILTER_PATTERNS
FILTER_SOURCE_FILES
FILTER_SOURCE_PATTERNS
FORCE_LOCAL_INCLUDES
FORMULA_FONTSIZE
FORMULA_TRANSPARENT
FULL_PATH_NAMES
GENERATE_AUTOGEN_DEF
GENERATE_BUGLIST
GENERATE_CHI
GENERATE_DEPRECATEDLIST
GENERATE_DOCBOOK
GENERATE_DOCSET
GENERATE_ECLIPSEHELP
GENERATE_HTML
GENERATE_HTMLHELP
GENERATE_LATEX
GENERATE_LEGEND
GENERATE_MAN
GENERATE_PERLMOD
GENERATE_QHP
GENERATE_RTF
GENERATE_TAGFILE
GENERATE_TESTLIST
GENERATE_TODOLIST
GENERATE_TREEVIEW
GENERATE_XML
GRAPHICAL_HIERARCHY
GROUP_GRAPHS
GROUP_NESTED_COMPOUNDS
# HAVE_DOT
HHC_LOCATION
HIDE_COMPOUND_REFERENCE
HIDE_FRIEND_COMPOUNDS
HIDE_IN_BODY_DOCS
HIDE_SCOPE_NAMES
HIDE_UNDOC_CLASSES
HIDE_UNDOC_MEMBERS
HIDE_UNDOC_RELATIONS
HTML_COLORSTYLE_GAMMA
HTML_COLORSTYLE_HUE
HTML_COLORSTYLE_SAT
HTML_DYNAMIC_SECTIONS
HTML_EXTRA_FILES
HTML_EXTRA_STYLESHEET
HTML_FILE_EXTENSION
HTML_FOOTER
HTML_HEADER
HTML_INDEX_NUM_ENTRIES
HTML_OUTPUT
HTML_STYLESHEET
HTML_TIMESTAMP
IDL_PROPERTY_SUPPORT
IGNORE_PREFIX
IMAGE_PATH
INCLUDED_BY_GRAPH
INCLUDE_FILE_PATTERNS
INCLUDE_GRAPH
INCLUDE_PATH
INHERIT_DOCS
INLINE_GROUPED_CLASSES
INLINE_INFO
INLINE_INHERITED_MEMB
INLINE_SIMPLE_STRUCTS
INLINE_SOURCES
INPUT
INPUT_ENCODING
INPUT_FILTER
INTERACTIVE_SVG
INTERNAL_DOCS
JAVADOC_AUTOBRIEF
LATEX_BATCHMODE
LATEX_BIB_STYLE
LATEX_CMD_NAME
LATEX_EXTRA_FILES
LATEX_EXTRA_STYLESHEET
LATEX_FOOTER
LATEX_HEADER
LATEX_HIDE_INDICES
LATEX_OUTPUT
LATEX_SOURCE_CODE
LATEX_TIMESTAMP
LAYOUT_FILE
LOOKUP_CACHE_SIZE
MACRO_EXPANSION
MAKEINDEX_CMD_NAME
MAN_EXTENSION
MAN_LINKS
MAN_OUTPUT
MAN_SUBDIR
MARKDOWN_SUPPORT
MATHJAX_CODEFILE
MATHJAX_EXTENSIONS
MATHJAX_FORMAT
MATHJAX_RELPATH
MAX_DOT_GRAPH_DEPTH
MAX_INITIALIZER_LINES
MSCFILE_DIRS
MSCGEN_PATH
MULTILINE_CPP_IS_BRIEF
OPTIMIZE_FOR_FORTRAN
OPTIMIZE_OUTPUT_FOR_C
OPTIMIZE_OUTPUT_JAVA
OPTIMIZE_OUTPUT_VHDL
OUTPUT_DIRECTORY
OUTPUT_LANGUAGE
PAPER_TYPE
PDF_HYPERLINKS
PERLMOD_LATEX
PERLMOD_MAKEVAR_PREFIX
PERLMOD_PRETTY
PERL_PATH
PLANTUML_CFG_FILE
PLANTUML_INCLUDE_PATH
PLANTUML_JAR_PATH
PREDEFINED
PROJECT_BRIEF
PROJECT_LOGO
PROJECT_NAME
PROJECT_NUMBER
QCH_FILE
QHG_LOCATION
QHP_CUST_FILTER_ATTRS
QHP_CUST_FILTER_NAME
QHP_NAMESPACE
QHP_SECT_FILTER_ATTRS
QHP_VIRTUAL_FOLDER
QT_AUTOBRIEF
QUIET
RECURSIVE
REFERENCED_BY_RELATION
REFERENCES_LINK_SOURCE
REFERENCES_RELATION
REPEAT_BRIEF
RTF_EXTENSIONS_FILE
RTF_HYPERLINKS
RTF_OUTPUT
RTF_SOURCE_CODE
RTF_STYLESHEET_FILE
SEARCHDATA_FILE
SEARCHENGINE
SEARCHENGINE_URL
SEARCH_INCLUDES
SEPARATE_MEMBER_PAGES
SERVER_BASED_SEARCH
SHORT_NAMES
SHOW_FILES
SHOW_GROUPED_MEMB_INC
SHOW_INCLUDE_FILES
SHOW_NAMESPACES
SHOW_USED_FILES
SIP_SUPPORT
SKIP_FUNCTION_MACROS
SORT_BRIEF_DOCS
SORT_BY_SCOPE_NAME
SORT_GROUP_NAMES
SORT_MEMBERS_CTORS_1ST
SORT_MEMBER_DOCS
SOURCE_BROWSER
SOURCE_TOOLTIPS
STRICT_PROTO_MATCHING
STRIP_CODE_COMMENTS
STRIP_FROM_INC_PATH
STRIP_FROM_PATH
SUBGROUPING
TAB_SIZE
TAGFILES
TCL_SUBST
TEMPLATE_RELATIONS
TOC_EXPAND
TOC_INCLUDE_HEADINGS
TREEVIEW_WIDTH
TYPEDEF_HIDES_STRUCT
UML_LIMIT_NUM_FIELDS
UML_LOOK
USE_HTAGS
USE_MATHJAX
USE_MDFILE_AS_MAINPAGE
USE_PDFLATEX
VERBATIM_HEADERS
WARNINGS
WARN_AS_ERROR
WARN_FORMAT
WARN_IF_DOC_ERROR
WARN_IF_UNDOCUMENTED
WARN_LOGFILE
WARN_NO_PARAMDOC
XML_OUTPUT
XML_PROGRAMLISTING
)
set(DOXYGEN_CONFIG_FILE "${CMAKE_CURRENT_BINARY_DIR}/doxygen/doxygen.conf" CACHE PATH "Path to generated doxygen configuration file")
function(add_doxygen_doc)
set(options)
set(oneValueArgs)
set(multiValueArgs DEPENDS ${DOXYGEN_ARGS})
cmake_parse_arguments(PARSE "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
file(WRITE ${DOXYGEN_CONFIG_FILE} "# Auto-generated doxygen configuration file\n")
if(NOT PARSE_STRIP_FROM_PATH)
set(PARSE_STRIP_FROM_PATH ${CMAKE_SOURCE_DIR})
endif()
foreach(ARG ${DOXYGEN_ARGS})
if(PARSE_${ARG})
string(REPLACE ";" " " ARG_VALUE "${PARSE_${ARG}}")
file(APPEND ${DOXYGEN_CONFIG_FILE} "\n${ARG} = ${ARG_VALUE}\n")
endif()
endforeach()
if(PARSE_OUTPUT_DIRECTORY)
if(NOT EXISTS ${PARSE_OUTPUT_DIRECTORY})
file(MAKE_DIRECTORY ${PARSE_OUTPUT_DIRECTORY})
endif()
endif()
if(DOT_EXECUTABLE)
file(APPEND ${DOXYGEN_CONFIG_FILE} "\nDOT_PATH = \"${DOT_EXECUTABLE}\"\n")
file(APPEND ${DOXYGEN_CONFIG_FILE} "\nHAVE_DOT = YES\n")
else()
file(APPEND ${DOXYGEN_CONFIG_FILE} "\nHAVE_DOT = NO\n")
endif()
add_custom_target(doxygen
${DOXYGEN_EXECUTABLE} ${DOXYGEN_CONFIG_FILE}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
COMMENT "Building documentation with doxygen"
)
if(PARSE_OUTPUT_DIRECTORY)
clean_doc_output(${PARSE_OUTPUT_DIRECTORY})
endif()
mark_as_doc(doxygen)
if(PARSE_DEPENDS)
add_dependencies(doxygen ${PARSE_DEPENDS})
endif()
endfunction()
......@@ -39,6 +39,8 @@ function(generate_embed_source EMBED_NAME)
file(WRITE "${PARSE_HEADER}" "
#include <unordered_map>
#include <string>
#include <utility>
const std::unordered_map<std::string, std::pair<const char*,const char*>>& ${EMBED_NAME}();
")
......@@ -94,5 +96,6 @@ function(add_embed_library EMBED_NAME)
generate_embed_source(${EMBED_NAME} SRC ${SRC_FILE} HEADER ${HEADER_FILE} OBJECTS ${OUTPUT_FILES} SYMBOLS ${SYMBOLS})
add_library(${EMBED_NAME} STATIC ${OUTPUT_FILES} "${SRC_FILE}")
target_include_directories(${EMBED_NAME} PUBLIC "${EMBED_DIR}/include")
target_compile_options(${EMBED_NAME} PRIVATE -Wno-reserved-identifier)
set_target_properties(${EMBED_NAME} PROPERTIES POSITION_INDEPENDENT_CODE On)
endfunction()
......@@ -96,6 +96,7 @@ else()
-Wno-gnu-zero-variadic-macro-arguments
-Wno-missing-prototypes
-Wno-nested-anon-types
-Wno-option-ignored
-Wno-padded
-Wno-shorten-64-to-32
-Wno-sign-conversion
......
################################################################################
#
# MIT License
#
# Copyright (c) 2017 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
if(NOT TARGET doc)
add_custom_target(doc)
endif()
function(mark_as_doc)
add_dependencies(doc ${ARGN})
endfunction()
function(clean_doc_output DIR)
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ${DIR})
endfunction()
......@@ -54,6 +54,10 @@ function(py_add_module NAME)
endfunction()
set(PYTHON_SEARCH_VERSIONS 2.7 3.5 3.6 3.7 3.8 3.9)
set(PYTHON_DISABLE_VERSIONS "" CACHE STRING "")
foreach(PYTHON_DISABLE_VERSION ${PYTHON_DISABLE_VERSIONS})
list(REMOVE_ITEM PYTHON_SEARCH_VERSIONS ${PYTHON_DISABLE_VERSION})
endforeach()
set(_PYTHON_VERSIONS)
foreach(PYTHON_VERSION ${PYTHON_SEARCH_VERSIONS})
......
################################################################################
#
# MIT License
#
# Copyright (c) 2017 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
include(CMakeParseArguments)
include(MainDoc)
include(DoxygenDoc)
find_program(SPHINX_EXECUTABLE NAMES sphinx-build
HINTS
$ENV{SPHINX_DIR}
PATH_SUFFIXES bin
DOC "Sphinx documentation generator"
)
mark_as_advanced(SPHINX_EXECUTABLE)
set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/sphinx/_build")
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/sphinx/_doctrees")
# HTML output directory
set(SPHINX_DEFAULT_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/sphinx/html")
function(add_sphinx_doc SRC_DIR)
set(options)
set(oneValueArgs BUILDER OUTPUT_DIR)
set(multiValueArgs DEPENDS VARS TEMPLATE_VARS)
cmake_parse_arguments(PARSE "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
string(TOUPPER ${PARSE_BUILDER} BUILDER)
set(ADDITIONAL_ARGS)
foreach(VAR ${PARSE_VARS})
list(APPEND ADDITIONAL_ARGS -D ${VAR})
endforeach()
foreach(VAR ${PARSE_TEMPLATE_VARS})
list(APPEND ADDITIONAL_ARGS -A ${VAR})
endforeach()
if(PARSE_OUTPUT_DIR)
get_filename_component(OUTPUT_DIR ${PARSE_OUTPUT_DIR} ABSOLUTE)
set(SPHINX_${BUILDER}_DIR ${OUTPUT_DIR} CACHE PATH "Path to ${PARSE_BUILDER} output")
else()
set(SPHINX_${BUILDER}_DIR "${CMAKE_CURRENT_BINARY_DIR}/sphinx/${PARSE_BUILDER}" CACHE PATH "Path to ${PARSE_BUILDER} output")
endif()
add_custom_target(sphinx-${BUILDER}
${SPHINX_EXECUTABLE}
-b ${PARSE_BUILDER}
-d "${SPHINX_CACHE_DIR}"
${ADDITIONAL_ARGS}
"${SRC_DIR}"
"${SPHINX_${BUILDER}_DIR}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Building ${PARSE_BUILDER} documentation with Sphinx"
)
clean_doc_output(${SPHINX_${BUILDER}_DIR})
clean_doc_output(${SPHINX_CACHE_DIR})
clean_doc_output(${BINARY_BUILD_DIR})
mark_as_doc(sphinx-${BUILDER})
if(PARSE_DEPENDS)
add_dependencies(sphinx-${BUILDER} ${PARSE_DEPENDS})
endif()
endfunction()
......@@ -2,6 +2,6 @@ pfultz2/rocm-recipes
facebook/zstd@v1.4.5 -X subdir -DCMAKE_DIR=build/cmake
ccache@v4.1
pcre,pfultz2/pcre@8.45 -H sha256:d6f7182602a775a7d500a0cedca6449af0400c6493951513046d17615ed0bf11
danmar/cppcheck@2.6 -DHAVE_RULES=1
RadeonOpenCompute/rocm-cmake@ececd2eccae4d01e7ec154efe90ac43ebf4df317 --build
danmar/cppcheck@2.8 -DHAVE_RULES=1
RadeonOpenCompute/rocm-cmake@1ebf7e7bc61bb5e949c171562b421264065230a7 --build
-f requirements.txt
project(migraphx-doc)
find_package(ROCM REQUIRED)
include(DoxygenDoc)
include(ROCMDoxygenDoc)
set(DOXYGEN_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/doxygen/)
add_doxygen_doc(
set(DOXYGEN_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/doxygen)
rocm_add_doxygen_doc(
OUTPUT_DIRECTORY ${DOXYGEN_OUTPUT}
INPUT
${PROJECT_SOURCE_DIR}/src
${CMAKE_SOURCE_DIR}/src
INCLUDE_PATH
${PROJECT_SOURCE_DIR}/src/include
${PROJECT_SOURCE_DIR}/src/targets/cpu/include
${PROJECT_SOURCE_DIR}/src/targets/gpu/include
${CMAKE_SOURCE_DIR}/src/include
${CMAKE_SOURCE_DIR}/src/targets/cpu/include
${CMAKE_SOURCE_DIR}/src/targets/gpu/include
STRIP_FROM_INC_PATH
${PROJECT_SOURCE_DIR}/src/include
${PROJECT_SOURCE_DIR}/src/targets/cpu/include
${PROJECT_SOURCE_DIR}/src/targets/gpu/include
${CMAKE_SOURCE_DIR}/src/include
${CMAKE_SOURCE_DIR}/src/targets/cpu/include
${CMAKE_SOURCE_DIR}/src/targets/gpu/include
EXCLUDE_PATTERNS
${PROJECT_SOURCE_DIR}/src/targets/gpu/kernels
${PROJECT_SOURCE_DIR}/src/targets/gpu/device
${CMAKE_SOURCE_DIR}/src/targets/gpu/kernels
${CMAKE_SOURCE_DIR}/src/targets/gpu/device
SEARCH_INCLUDES YES
MACRO_EXPANSION YES
RECURSIVE YES
......@@ -36,13 +38,14 @@ add_doxygen_doc(
EXTRACT_ALL YES
ENUM_VALUES_PER_LINE 1
FULL_PATH_NAMES YES
WARN_LOGFILE "${DOXYGEN_OUTPUT}/DoxygenWarningLog.txt"
PREDEFINED DOXYGEN
)
include(SphinxDoc)
add_sphinx_doc(src
include(ROCMSphinxDoc)
rocm_add_sphinx_doc(src
BUILDER html
OUTPUT_DIR html
OUTPUT_DIR html
VARS
breathe_projects.proj=${DOXYGEN_OUTPUT}/xml
breathe_default_project=proj
......@@ -51,7 +54,7 @@ add_sphinx_doc(src
find_package(LATEX)
if(LATEX_FOUND)
add_sphinx_doc(src
rocm_add_sphinx_doc(src
BUILDER latex
OUTPUT_DIR pdf
VARS
......@@ -60,6 +63,6 @@ if(LATEX_FOUND)
DEPENDS doxygen
)
else()
message("Latex builder not found. Latex builder is required only for building the PDF documentation for MIGraph and is not necessary for building the library, or any other components. To build PDF documentation run make in ${CMAKE_CURRENT_SOURCE_DIR}/pdf, once a latex builder is installed.")
message("Latex builder not found. Latex builder is required only for building the PDF documentation for MIGraphX and is not necessary for building the library, or any other components. To build PDF documentation run make in ${CMAKE_CURRENT_SOURCE_DIR}/pdf, once a latex builder is installed.")
endif()
Developer Guide
Contributor Guide
===============
.. toctree::
:maxdepth: 2
:caption: Contents:
overview
dev_intro
dev/data
dev/operators
dev/program
......
MIGraphX Fundamentals
======================
MIGraphX provides an optimized execution engine for deep learning neural networks.
We will cover some simple operations in the MIGraphX framework here.
For a quick start guide to using MIGraphX, look in the examples directory: ``https://github.com/ROCmSoftwarePlatform/AMDMIGraphX/tree/develop/examples/migraphx``.
Location of the Examples
-------------------------
The ``ref_dev_examples.cpp`` can be found in the test directory (``/test``).
The executable file ``test_ref_dev_examples`` based on this file will be created in the ``bin/`` of the build directory after running ``make -j$(nproc) test_ref_dev_examples``.
The executable will also be created when running ``make -j$(nproc) check``, alongside with all the other tests.
Directions for building MIGraphX from source can be found in the main README file: ``https://github.com/ROCmSoftwarePlatform/AMDMIGraphX#readme``.
Adding Two Literals
--------------------
A program is a collection of modules, which are collections of instructions to be executed when calling `eval <migraphx::program::eval>`.
Each instruction has an associated `operation <migraphx::operation>` which represents the computation to be performed by the instruction.
We start with a snippet of the simple ``add_two_literals()`` function::
// create the program and get a pointer to the main module
migraphx::program p;
auto* mm = p.get_main_module();
// add two literals to the program
auto one = mm->add_literal(1);
auto two = mm->add_literal(2);
// make the add operation between the two literals and add it to the program
mm->add_instruction(migraphx::make_op("add"), one, two);
// compile the program on the reference device
p.compile(migraphx::ref::target{});
// evaulate the program and retreive the result
auto result = p.eval({}).back();
std::cout << "add_two_literals: 1 + 2 = " << result << "\n";
We start by creating a simple ``migraphx::program`` object and then getting a pointer to the main module of it.
The program is a collection of ``modules`` that start executing from the main module, so instructions are added to the modules rather than directly onto the program object.
We then use the `add_literal <migraphx::program::add_literal>` function to add an instruction that stores the literal number ``1`` while returning an `instruction_ref <migraphx::instruction_ref>`.
The returned `instruction_ref <migraphx::instruction_ref>` can be used in another instruction as an input.
We use the same `add_literal <migraphx::program::add_literal>` function to add a ``2`` to the program.
After creating the literals, we then create the instruction to add the numbers together.
This is done by using the `add_instruction <migraphx::program::add_instruction>` function with the ``"add"`` `operation <migraphx::program::operation>` created by `make_op <migraphx::program::make_op>` along with the previous `add_literal` `instruction_ref <migraphx::instruction_ref>` for the input arguments of the instruction.
Finally, we can run this `program <migraphx::program>` by compiling it for the reference target (CPU) and then running it with `eval <migraphx::program::eval>`
The result is then retreived and printed to the console.
We can compile the program for the GPU as well, but the file will have to be moved to the ``test/gpu/`` directory and the correct target must be included::
#include <migraphx/gpu/target.hpp>
Using Parameters
-----------------
The previous program will always produce the same value of adding ``1`` and ``2``.
In the next program we want to pass an input to a program and compute a value based on the input.
We can modify the program to take an input parameter ``x``, as seen in the ``add_parameter()`` function::
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int32_type, {1}};
// add a "x" parameter with the shape s
auto x = mm->add_parameter("x", s);
auto two = mm->add_literal(2);
// add the "add" instruction between the "x" parameter and "two" to the module
mm->add_instruction(migraphx::make_op("add"), x, two);
p.compile(migraphx::ref::target{});
This adds a parameter of type ``int32``, and compiles it for the CPU.
To run the program, we need to pass the parameter as a ``parameter_map`` when we call `eval <migraphx::program::eval>`.
We create the ``parameter_map`` by setting the ``x`` key to an `argument <migraphx::argument>` object with an ``int`` data type::
// create a parameter_map object for passing a value to the "x" parameter
std::vector<int> data = {4};
migraphx::parameter_map params;
params["x"] = migraphx::argument(s, data.data());
auto result = p.eval(params).back();
std::cout << "add_parameters: 4 + 2 = " << result << "\n";
EXPECT(result.at<int>() == 6);
Handling Tensor Data
---------------------
In the previous examples we have only been dealing with scalars, but the `shape <migraphx::shape>` class can describe multi-dimensional tensors.
For example, we can compute a simple convolution::
migraphx::program p;
auto* mm = p.get_main_module();
// create shape objects for the input tensor and weights
migraphx::shape input_shape{migraphx::shape::float_type, {2, 3, 4, 4}};
migraphx::shape weights_shape{migraphx::shape::float_type, {3, 3, 3, 3}};
// create the parameters and add the "convolution" operation to the module
auto input = mm->add_parameter("X", input_shape);
auto weights = mm->add_parameter("W", weights_shape);
mm->add_instruction(migraphx::make_op("convolution", {{"padding", {1, 1}}, {"stride", {2, 2}}}), input, weights);
Here we create two parameters for both the ``input`` and ``weights``.
In the previous examples, we created simple literals, however, most programs will take data from allocated buffers (usually on the GPU).
In this case, we can create `argument <migraphx::argument>` objects directly from the pointers to the buffers::
// Compile the program
p.compile(migraphx::ref::target{});
// Allocated buffers by the user
std::vector<float> a = ...;
std::vector<float> c = ...;
// Solution vector
std::vector<float> sol = ...;
// Create the arguments in a parameter_map
migraphx::parameter_map params;
params["X"] = migraphx::argument(input_shape, a.data());
params["W"] = migraphx::argument(weights_shape, c.data());
// Evaluate and confirm the result
auto result = p.eval(params).back();
std::vector<float> results_vector(64);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
An `argument <migraphx::argument>` can handle memory buffers from either the GPU or the CPU.
By default when running the `program <migraphx::program>`, buffers are allocated on the corresponding target.
When compiling for the CPU, the buffers by default will be allocated on the CPU.
When compiling for the GPU, the buffers by default will be allocated on the GPU.
With the option ``offloaf_copy=true`` set while compiling for the GPU, the buffers will be located on the CPU.
Importing From ONNX
--------------------
A `program <migraphx::program>` can be built directly from an onnx file using the MIGraphX ONNX parser.
This makes it easier to use neural networks directly from other frameworks.
In this case, there is an ``parse_onnx`` function::
program p = migraphx::parse_onnx("model.onnx");
p.compile(migraphx::gpu::target{});
......@@ -13,7 +13,7 @@ Welcome to AMD MIGraphX's documentation!
py_user_guide
cpp_user_guide
driver
developer_guide
contributor_guide
Indices and tables
......
Overview
========
MIGraphX provides an optimized execution engine for deep learning neural networks.
Building a program
------------------
A program consists of a set of instructions to be executed when calling `eval <migraphx::program::eval>`. Each instruction has an associated `operation <migraphx::operation>` which represents the computation to be performed by the instruction.
We can start by building a simple program to add two numbers together::
program p;
instruction_ref one = p.add_literal(1);
instruction_ref two = p.add_literal(2);
p.add_instruction(add{}, one, two);
The `add_literal <migraphx::program::add_literal>` function will add an instruction to the program to store a literal number. The `instruction_ref <migraphx::instruction_ref>` is a reference to the instruction in the program, which can be used to compose the output of the instruction with another instruction.
After creating the literals, we then create the instruction to add the numbers together. This is done by using the `add{} <migraphx::add>` operation class along with the `instruction_ref <migraphx::instruction_ref>` for the input arguments of the instruction.
Finally, we can run this `program <migraphx::program>` by compiling it for the cpu and then running it with `eval <migraphx::program::eval>`::
p.compile(cpu::target{});
argument result = p.eval({});
The easiest way to see the result is to print it::
std::cout << result;
Which will print ``3``.
We can also compile the program for the gpu as well.
Adding parameters
-----------------
Of course, this program will always produce the same value which is quite uninteresting. Instead, we want to pass an input to a program and compute a value based on the input. This can be done with a parameter. For example, we can modify the program to take an input ``x``::
program p;
instruction_ref x = p.add_parameter("x", {shape::int64_type});
instruction_ref two = p.add_literal(2);
p.add_instruction(add{}, x, two);
p.compile(cpu::target{});
This adds a parameter of type ``int64``, and compiles it for the ``cpu``. To run the program, we need to pass the parameter to it when we call `eval <migraphx::program::eval>`::
argument result = p.eval({
{"x", literal{1}.get_argument()}
});
std::cout << result;
This will print ``3``.
A parameter is given as an `argument <migraphx::argument>`. In this case, the simplest way of creating an `argument <migraphx::argument>` is from a `literal <migraphx::literal>`.
Tensor data
-----------
In this example we are just creating numbers, but the `shape <migraphx::shape>` class can describe multi-dimensional tensors. For example, we can build a simple network with convolution and relu::
program p;
instruction_ref input = p.add_parameter("x", shape{shape::float_type, {1, 3, 32, 32}});
instruction_ref weights = p.add_parameter("w", shape{shape::float_type, {1, 3, 5, 5}});
instruction_ref conv = p.add_instruction(convolution{}, input, weights);
p.add_instruction(activation{"relu"}, conv);
Here we create two parameters for both the ``input`` and ``weights``. In the previous examples, we just created simple literals, however, most programs will take data from already allocated buffers(usually on the GPU). In this case, we can create `argument <migraphx::argument>` objects directly from the pointers to the buffers::
// Compile the program
p.compile(gpu::target{});
// Allocated buffers by the user
float* input = ...;
float* weights = ...;
// Create the arguments
argument input_arg{shape{shape::float_type, {1, 3, 32, 32}}, input};
argument weights_arg{shape{shape::float_type, {1, 3, 32, 32}}, weights};
p.eval({{"x", input_arg}, {"w", weights_arg}})
An `argument <migraphx::argument>` can handle memory buffers from either the GPU or the CPU, but when running the `program <migraphx::program>`, buffers should be allocated for the corresponding target. That is, when compiling for the CPU, the buffers should be allocated on the CPU, and when compiling for the GPU the buffers should be allocated on the GPU.
Importing from onnx
-------------------
A `program <migraphx::program>` can be built directly from an onnx file, which makes it easier to use neural networks directly from other frameworks. In this case, there is an ``parse_onnx`` function::
program p = parse_onnx("model.onnx");
p.compile(gpu::target{});
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment