// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
namespace Microsoft.ML.OnnxRuntime
{
///
/// Graph optimization level to use with SessionOptions
/// [https://github.com/microsoft/onnxruntime/blob/main/docs/ONNX_Runtime_Graph_Optimizations.md]
///
public enum GraphOptimizationLevel
{
ORT_DISABLE_ALL = 0,
ORT_ENABLE_BASIC = 1,
ORT_ENABLE_EXTENDED = 2,
ORT_ENABLE_ALL = 99
}
///
/// Controls whether you want to execute operators in the graph sequentially or in parallel.
/// Usually when the model has many branches, setting this option to ExecutionMode.ORT_PARALLEL
/// will give you better performance.
/// See [ONNX_Runtime_Perf_Tuning.md] for more details.
///
public enum ExecutionMode
{
ORT_SEQUENTIAL = 0,
ORT_PARALLEL = 1,
}
///
/// Holds the options for creating an InferenceSession
///
public class SessionOptions : SafeHandle
{
// Delay-loaded CUDA or cuDNN DLLs. Currently, delayload is disabled. See cmake/CMakeLists.txt for more information.
private static string[] cudaDelayLoadedLibs = { };
private static string[] trtDelayLoadedLibs = { };
#region Constructor and Factory methods
///
/// Constructs an empty SessionOptions
///
public SessionOptions()
: base(IntPtr.Zero, true)
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtCreateSessionOptions(out handle));
}
///
/// A helper method to construct a SessionOptions object for CUDA execution.
/// Use only if CUDA is installed and you have the onnxruntime package specific to this Execution Provider.
///
///
/// A SessionsOptions() object configured for execution on deviceId
public static SessionOptions MakeSessionOptionWithCudaProvider(int deviceId = 0)
{
CheckCudaExecutionProviderDLLs();
SessionOptions options = new SessionOptions();
options.AppendExecutionProvider_CUDA(deviceId);
return options;
}
///
/// A helper method to construct a SessionOptions object for CUDA execution provider.
/// Use only if CUDA is installed and you have the onnxruntime package specific to this Execution Provider.
///
/// CUDA EP provider options
/// A SessionsOptions() object configured for execution on provider options
public static SessionOptions MakeSessionOptionWithCudaProvider(OrtCUDAProviderOptions cudaProviderOptions)
{
CheckCudaExecutionProviderDLLs();
SessionOptions options = new SessionOptions();
try
{
options.AppendExecutionProvider_CUDA(cudaProviderOptions);
return options;
}
catch (Exception)
{
options.Dispose();
throw;
}
}
///
/// A helper method to construct a SessionOptions object for TensorRT execution.
/// Use only if CUDA/TensorRT are installed and you have the onnxruntime package specific to this Execution Provider.
///
///
/// A SessionsOptions() object configured for execution on deviceId
public static SessionOptions MakeSessionOptionWithTensorrtProvider(int deviceId = 0)
{
CheckTensorrtExecutionProviderDLLs();
SessionOptions options = new SessionOptions();
try
{
options.AppendExecutionProvider_Tensorrt(deviceId);
options.AppendExecutionProvider_CUDA(deviceId);
return options;
}
catch (Exception)
{
options.Dispose();
throw;
}
}
///
/// A helper method to construct a SessionOptions object for TensorRT execution provider.
/// Use only if CUDA/TensorRT are installed and you have the onnxruntime package specific to this Execution Provider.
///
/// TensorRT EP provider options
/// A SessionsOptions() object configured for execution on provider options
public static SessionOptions MakeSessionOptionWithTensorrtProvider(OrtTensorRTProviderOptions trtProviderOptions)
{
CheckTensorrtExecutionProviderDLLs();
SessionOptions options = new SessionOptions();
try
{
// Make sure that CUDA EP uses the same device id as TensorRT EP.
options.AppendExecutionProvider_Tensorrt(trtProviderOptions);
options.AppendExecutionProvider_CUDA(trtProviderOptions.GetDeviceId());
return options;
}
catch (Exception)
{
options.Dispose();
throw;
}
}
///
/// A helper method to construct a SessionOptions object for TVM execution.
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// settings string, comprises of comma separated key:value pairs. default is empty
/// A SessionsOptions() object configured for execution with TVM
public static SessionOptions MakeSessionOptionWithTvmProvider(String settings = "")
{
SessionOptions options = new SessionOptions();
options.AppendExecutionProvider_Tvm(settings);
return options;
}
///
/// A helper method to construct a SessionOptions object for ROCM execution.
/// Use only if ROCM is installed and you have the onnxruntime package specific to this Execution Provider.
///
/// Device Id
/// A SessionsOptions() object configured for execution on deviceId
public static SessionOptions MakeSessionOptionWithRocmProvider(int deviceId = 0)
{
SessionOptions options = new SessionOptions();
options.AppendExecutionProvider_ROCM(deviceId);
return options;
}
#endregion
#region ExecutionProviderAppends
///
/// Appends CPU EP to a list of available execution providers for the session.
///
/// 1 - use arena, 0 - do not use arena
public void AppendExecutionProvider_CPU(int useArena = 1)
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_CPU(handle, useArena));
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// 1 - use allocation arena, 0 - otherwise
public void AppendExecutionProvider_Dnnl(int useArena = 1)
{
#if __MOBILE__
throw new NotSupportedException("The DNNL Execution Provider is not supported in this build");
#else
NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_Dnnl(handle, useArena));
#endif
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// integer device ID
public void AppendExecutionProvider_CUDA(int deviceId = 0)
{
#if __MOBILE__
throw new NotSupportedException("The CUDA Execution Provider is not supported in this build");
#else
NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_CUDA(handle, deviceId));
#endif
}
///
/// Append a CUDA EP instance (based on specified configuration) to the SessionOptions instance.
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// CUDA EP provider options
public void AppendExecutionProvider_CUDA(OrtCUDAProviderOptions cudaProviderOptions)
{
#if __MOBILE__
throw new NotSupportedException("The CUDA Execution Provider is not supported in this build");
#else
NativeApiStatus.VerifySuccess(NativeMethods.SessionOptionsAppendExecutionProvider_CUDA_V2(handle, cudaProviderOptions.Handle));
#endif
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// device identification
public void AppendExecutionProvider_DML(int deviceId = 0)
{
#if __MOBILE__
throw new NotSupportedException("The DML Execution Provider is not supported in this build");
#else
NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_DML(handle, deviceId));
#endif
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// device identification, default empty string
public void AppendExecutionProvider_OpenVINO(string deviceId = "")
{
#if __MOBILE__
throw new NotSupportedException("The OpenVINO Execution Provider is not supported in this build");
#else
var deviceIdPinned = GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(deviceId), GCHandleType.Pinned);
using (var pinnedDeviceIdName = new PinnedGCHandle(deviceIdPinned))
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_OpenVINO(handle, pinnedDeviceIdName.Pointer));
}
#endif
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// device identification
public void AppendExecutionProvider_Tensorrt(int deviceId = 0)
{
#if __MOBILE__
throw new NotSupportedException("The TensorRT Execution Provider is not supported in this build");
#else
NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_Tensorrt(handle, deviceId));
#endif
}
///
/// Append a TensorRT EP instance (based on specified configuration) to the SessionOptions instance.
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// TensorRT EP provider options
public void AppendExecutionProvider_Tensorrt(OrtTensorRTProviderOptions trtProviderOptions)
{
#if __MOBILE__
throw new NotSupportedException("The TensorRT Execution Provider is not supported in this build");
#else
NativeApiStatus.VerifySuccess(NativeMethods.SessionOptionsAppendExecutionProvider_TensorRT_V2(handle, trtProviderOptions.Handle));
#endif
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// Device Id
public void AppendExecutionProvider_ROCM(int deviceId = 0)
{
#if __MOBILE__
throw new NotSupportedException("The ROCM Execution Provider is not supported in this build");
#else
NativeApiStatus.VerifySuccess(
NativeMethods.OrtSessionOptionsAppendExecutionProvider_ROCM(handle, deviceId));
#endif
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// device identification
public void AppendExecutionProvider_MIGraphX(int deviceId = 0)
{
#if __MOBILE__
throw new NotSupportedException($"The MIGraphX Execution Provider is not supported in this build");
#else
NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_MIGraphX(handle, deviceId));
#endif
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// NNAPI specific flag mask
public void AppendExecutionProvider_Nnapi(NnapiFlags nnapiFlags = NnapiFlags.NNAPI_FLAG_USE_NONE)
{
#if __ANDROID__
NativeApiStatus.VerifySuccess(
NativeMethods.OrtSessionOptionsAppendExecutionProvider_Nnapi(handle, (uint)nnapiFlags));
#else
throw new NotSupportedException("The NNAPI Execution Provider is not supported in this build");
#endif
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// CoreML specific flags
public void AppendExecutionProvider_CoreML(CoreMLFlags coremlFlags = CoreMLFlags.COREML_FLAG_USE_NONE)
{
#if __IOS__
NativeApiStatus.VerifySuccess(
NativeMethods.OrtSessionOptionsAppendExecutionProvider_CoreML(handle, (uint)coremlFlags));
#else
#if __ENABLE_COREML__
// only attempt if this is OSX
if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
{
NativeApiStatus.VerifySuccess(
NativeMethods.OrtSessionOptionsAppendExecutionProvider_CoreML(handle, (uint)coremlFlags));
}
else
#endif
{
throw new NotSupportedException("The CoreML Execution Provider is not supported in this build");
}
#endif
}
///
/// Use only if you have the onnxruntime package specific to this Execution Provider.
///
/// string with TVM specific settings
public void AppendExecutionProvider_Tvm(string settings = "")
{
#if __MOBILE__
throw new NotSupportedException("The TVM Execution Provider is not supported in this build");
#else
var settingsPinned = GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(settings), GCHandleType.Pinned);
using (var pinnedSettingsName = new PinnedGCHandle(settingsPinned))
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_Tvm(handle, pinnedSettingsName.Pointer));
}
#endif
}
///
/// Append SNPE or XNNPACK execution provider
///
/// Execution provider to add. 'SNPE' or 'XNNPACK' are currently supported.
/// Optional key/value pairs to specify execution provider options.
public void AppendExecutionProvider(string providerName, Dictionary providerOptions = null)
{
if (providerName != "SNPE" && providerName != "XNNPACK")
{
throw new NotSupportedException(
"Only SNPE and XNNPACK execution providers can be enabled by this method.");
}
using (var cleanupList = new DisposableList())
{
string[] ep = { providerName }; // put in array so we can use ConvertNamesToUtf8 for everything
var epArray = NativeOnnxValueHelper.ConvertNamesToUtf8(ep, n => n, cleanupList);
if (providerOptions == null)
{
providerOptions = new Dictionary();
}
var keysArray = NativeOnnxValueHelper.ConvertNamesToUtf8(
providerOptions.Keys.ToArray(), n => n, cleanupList);
var valuesArray = NativeOnnxValueHelper.ConvertNamesToUtf8(
providerOptions.Values.ToArray(), n => n, cleanupList);
NativeApiStatus.VerifySuccess(NativeMethods.SessionOptionsAppendExecutionProvider(
handle, epArray[0], keysArray, valuesArray, (UIntPtr)providerOptions.Count));
}
}
#endregion //ExecutionProviderAppends
#region Public Methods
///
/// (Deprecated) Loads a DLL named 'libraryPath' and looks for this entry point:
/// OrtStatus* RegisterCustomOps(OrtSessionOptions* options, const OrtApiBase* api);
/// It then passes in the provided session options to this function along with the api base.
/// Deprecated in favor of RegisterCustomOpLibraryV2() because it provides users with the library handle
/// to release when all sessions relying on it are destroyed
///
/// path to the custom op library
[ObsoleteAttribute("RegisterCustomOpLibrary(...) is obsolete. Use RegisterCustomOpLibraryV2(...) instead.", false)]
public void RegisterCustomOpLibrary(string libraryPath)
{
IntPtr libraryHandle = IntPtr.Zero;
var libraryPathPinned = GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(libraryPath), GCHandleType.Pinned);
using (var pinnedlibraryPath = new PinnedGCHandle(libraryPathPinned))
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtRegisterCustomOpsLibrary(handle, pinnedlibraryPath.Pointer, out libraryHandle));
}
}
///
/// Loads a DLL named 'libraryPath' and looks for this entry point:
/// OrtStatus* RegisterCustomOps(OrtSessionOptions* options, const OrtApiBase* api);
/// It then passes in the provided session options to this function along with the api base.
/// The handle to the loaded library is returned in 'libraryHandle'.
/// It can be unloaded by the caller after all sessions using the passed in
/// session options are destroyed, or if an error occurs and it is non null.
/// Hint: .NET Core 3.1 has a 'NativeLibrary' class that can be used to free the library handle
///
/// Custom op library path
/// out parameter, library handle
public void RegisterCustomOpLibraryV2(string libraryPath, out IntPtr libraryHandle)
{
var libraryPathPinned = GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(libraryPath), GCHandleType.Pinned);
using (var pinnedlibraryPath = new PinnedGCHandle(libraryPathPinned))
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtRegisterCustomOpsLibrary(handle, pinnedlibraryPath.Pointer, out libraryHandle));
}
}
///
/// Add a pre-allocated initializer to a session. If a model contains an initializer with a name
/// that is same as the name passed to this API call, ORT will use this initializer instance
/// instead of deserializing one from the model file. This is useful when you want to share
/// the same initializer across sessions.
///
/// name of the initializer
/// OrtValue containing the initializer. Lifetime of 'val' and the underlying initializer buffer must be
/// managed by the user (created using the CreateTensorWithDataAsOrtValue API) and it must outlive the session object
public void AddInitializer(string name, OrtValue ortValue)
{
var utf8NamePinned = GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(name), GCHandleType.Pinned);
using (var pinnedName = new PinnedGCHandle(utf8NamePinned))
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtAddInitializer(handle, pinnedName.Pointer, ortValue.Handle));
}
}
///
/// Set a single session configuration entry as a pair of strings
/// If a configuration with same key exists, this will overwrite the configuration with the given configValue
///
/// config key name
/// config key value
public void AddSessionConfigEntry(string configKey, string configValue)
{
using (var pinnedConfigKeyName = new PinnedGCHandle(GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(configKey), GCHandleType.Pinned)))
using (var pinnedConfigValueName = new PinnedGCHandle(GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(configValue), GCHandleType.Pinned)))
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtAddSessionConfigEntry(handle,
pinnedConfigKeyName.Pointer, pinnedConfigValueName.Pointer));
}
}
///
/// Override symbolic dimensions (by specific denotation strings) with actual values if known at session initialization time to enable
/// optimizations that can take advantage of fixed values (such as memory planning, etc)
///
/// denotation name
/// denotation value
public void AddFreeDimensionOverride(string dimDenotation, long dimValue)
{
var utf8DimDenotationPinned = GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(dimDenotation), GCHandleType.Pinned);
using (var pinnedDimDenotation = new PinnedGCHandle(utf8DimDenotationPinned))
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtAddFreeDimensionOverride(handle, pinnedDimDenotation.Pointer, dimValue));
}
}
///
/// Override symbolic dimensions (by specific name strings) with actual values if known at session initialization time to enable
/// optimizations that can take advantage of fixed values (such as memory planning, etc)
///
/// dimension name
/// dimension value
public void AddFreeDimensionOverrideByName(string dimName, long dimValue)
{
var utf8DimNamePinned = GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(dimName), GCHandleType.Pinned);
using (var pinnedDimName = new PinnedGCHandle(utf8DimNamePinned))
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtAddFreeDimensionOverrideByName(handle, pinnedDimName.Pointer, dimValue));
}
}
#endregion
internal IntPtr Handle
{
get
{
return handle;
}
}
#region Public Properties
///
/// Overrides SafeHandle.IsInvalid
///
/// returns true if handle is equal to Zero
public override bool IsInvalid { get { return handle == IntPtr.Zero; } }
///
/// Enables the use of the memory allocation patterns in the first Run() call for subsequent runs. Default = true.
///
/// returns enableMemoryPattern flag value
public bool EnableMemoryPattern
{
get
{
return _enableMemoryPattern;
}
set
{
if (!_enableMemoryPattern && value)
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtEnableMemPattern(handle));
_enableMemoryPattern = true;
}
else if (_enableMemoryPattern && !value)
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtDisableMemPattern(handle));
_enableMemoryPattern = false;
}
}
}
private bool _enableMemoryPattern = true;
///
/// Path prefix to use for output of profiling data
///
public string ProfileOutputPathPrefix
{
get; set;
} = "onnxruntime_profile_"; // this is the same default in C++ implementation
///
/// Enables profiling of InferenceSession.Run() calls. Default is false
///
/// returns _enableProfiling flag value
public bool EnableProfiling
{
get
{
return _enableProfiling;
}
set
{
if (!_enableProfiling && value)
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtEnableProfiling(handle, NativeOnnxValueHelper.GetPlatformSerializedString(ProfileOutputPathPrefix)));
_enableProfiling = true;
}
else if (_enableProfiling && !value)
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtDisableProfiling(handle));
_enableProfiling = false;
}
}
}
private bool _enableProfiling = false;
///
/// Set filepath to save optimized model after graph level transformations. Default is empty, which implies saving is disabled.
///
/// returns _optimizedModelFilePath flag value
public string OptimizedModelFilePath
{
get
{
return _optimizedModelFilePath;
}
set
{
if (value != _optimizedModelFilePath)
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSetOptimizedModelFilePath(handle, NativeOnnxValueHelper.GetPlatformSerializedString(value)));
_optimizedModelFilePath = value;
}
}
}
private string _optimizedModelFilePath = "";
///
/// Enables Arena allocator for the CPU memory allocations. Default is true.
///
/// returns _enableCpuMemArena flag value
public bool EnableCpuMemArena
{
get
{
return _enableCpuMemArena;
}
set
{
if (!_enableCpuMemArena && value)
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtEnableCpuMemArena(handle));
_enableCpuMemArena = true;
}
else if (_enableCpuMemArena && !value)
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtDisableCpuMemArena(handle));
_enableCpuMemArena = false;
}
}
}
private bool _enableCpuMemArena = true;
///
/// Log Id to be used for the session. Default is empty string.
///
/// returns _logId value
public string LogId
{
get
{
return _logId;
}
set
{
var logIdPinned = GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(value), GCHandleType.Pinned);
using (var pinnedlogIdName = new PinnedGCHandle(logIdPinned))
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSetSessionLogId(handle, pinnedlogIdName.Pointer));
}
_logId = value;
}
}
private string _logId = "";
///
/// Log Severity Level for the session logs. Default = ORT_LOGGING_LEVEL_WARNING
///
/// returns _logSeverityLevel value
public OrtLoggingLevel LogSeverityLevel
{
get
{
return _logSeverityLevel;
}
set
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSetSessionLogSeverityLevel(handle, value));
_logSeverityLevel = value;
}
}
private OrtLoggingLevel _logSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_WARNING;
///
/// Log Verbosity Level for the session logs. Default = 0. Valid values are >=0.
/// This takes into effect only when the LogSeverityLevel is set to ORT_LOGGING_LEVEL_VERBOSE.
///
/// returns _logVerbosityLevel value
public int LogVerbosityLevel
{
get
{
return _logVerbosityLevel;
}
set
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSetSessionLogVerbosityLevel(handle, value));
_logVerbosityLevel = value;
}
}
private int _logVerbosityLevel = 0;
///
// Sets the number of threads used to parallelize the execution within nodes
// A value of 0 means ORT will pick a default
///
/// returns _intraOpNumThreads value
public int IntraOpNumThreads
{
get
{
return _intraOpNumThreads;
}
set
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSetIntraOpNumThreads(handle, value));
_intraOpNumThreads = value;
}
}
private int _intraOpNumThreads = 0; // set to what is set in C++ SessionOptions by default;
///
// Sets the number of threads used to parallelize the execution of the graph (across nodes)
// If sequential execution is enabled this value is ignored
// A value of 0 means ORT will pick a default
///
/// returns _interOpNumThreads value
public int InterOpNumThreads
{
get
{
return _interOpNumThreads;
}
set
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSetInterOpNumThreads(handle, value));
_interOpNumThreads = value;
}
}
private int _interOpNumThreads = 0; // set to what is set in C++ SessionOptions by default;
///
/// Sets the graph optimization level for the session. Default is set to ORT_ENABLE_ALL.
///
/// returns _graphOptimizationLevel value
public GraphOptimizationLevel GraphOptimizationLevel
{
get
{
return _graphOptimizationLevel;
}
set
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSetSessionGraphOptimizationLevel(handle, value));
_graphOptimizationLevel = value;
}
}
private GraphOptimizationLevel _graphOptimizationLevel = GraphOptimizationLevel.ORT_ENABLE_ALL;
///
/// Sets the execution mode for the session. Default is set to ORT_SEQUENTIAL.
/// See [ONNX_Runtime_Perf_Tuning.md] for more details.
///
/// returns _executionMode value
public ExecutionMode ExecutionMode
{
get
{
return _executionMode;
}
set
{
NativeApiStatus.VerifySuccess(NativeMethods.OrtSetSessionExecutionMode(handle, value));
_executionMode = value;
}
}
private ExecutionMode _executionMode = ExecutionMode.ORT_SEQUENTIAL;
#endregion
#region Private Methods
#if !__MOBILE__
// Declared, but called only if OS = Windows.
[DllImport("kernel32.dll")]
private static extern IntPtr LoadLibrary(string dllToLoad);
[DllImport("kernel32.dll")]
static extern uint GetSystemDirectory([Out] StringBuilder lpBuffer, uint uSize);
#else
private static IntPtr LoadLibrary(string dllToLoad)
{
throw new NotSupportedException();
}
static uint GetSystemDirectory([Out] StringBuilder lpBuffer, uint uSize)
{
throw new NotSupportedException();
}
#endif
private static bool CheckCudaExecutionProviderDLLs()
{
if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
{
foreach (var dll in cudaDelayLoadedLibs)
{
IntPtr handle = LoadLibrary(dll);
if (handle != IntPtr.Zero)
continue;
var sysdir = new StringBuilder(String.Empty, 2048);
GetSystemDirectory(sysdir, (uint)sysdir.Capacity);
throw new OnnxRuntimeException(
ErrorCode.NoSuchFile,
$"kernel32.LoadLibrary():'{dll}' not found. CUDA is required for GPU execution. " +
$". Verify it is available in the system directory={sysdir}. Else copy it to the output folder."
);
}
}
return true;
}
private static bool CheckTensorrtExecutionProviderDLLs()
{
if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
{
foreach (var dll in trtDelayLoadedLibs)
{
IntPtr handle = LoadLibrary(dll);
if (handle != IntPtr.Zero)
continue;
var sysdir = new StringBuilder(String.Empty, 2048);
GetSystemDirectory(sysdir, (uint)sysdir.Capacity);
throw new OnnxRuntimeException(
ErrorCode.NoSuchFile,
$"kernel32.LoadLibrary():'{dll}' not found. TensorRT/CUDA are required for GPU execution. " +
$". Verify it is available in the system directory={sysdir}. Else copy it to the output folder."
);
}
}
return true;
}
#endregion
#region SafeHandle
///
/// Overrides SafeHandle.ReleaseHandle() to properly dispose of
/// the native instance of SessionOptions
///
/// always returns true
protected override bool ReleaseHandle()
{
NativeMethods.OrtReleaseSessionOptions(handle);
handle = IntPtr.Zero;
return true;
}
#endregion
}
}