Commit 546b4279 authored by limm's avatar limm
Browse files

add csrc and mmdeploy module

parent 502f4fb9
Pipeline #2810 canceled with stages
# Copyright (c) OpenMMLab. All rights reserved.
add_subdirectory(backend_ops)
if (MMDEPLOY_BUILD_SDK)
# include OpenCV for SDK modules since many of them depends on it
include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake)
add_subdirectory(core)
add_subdirectory(execution)
add_subdirectory(utils)
add_subdirectory(archive)
add_subdirectory(device)
add_subdirectory(graph)
add_subdirectory(model)
add_subdirectory(operation)
add_subdirectory(preprocess)
add_subdirectory(net)
add_subdirectory(codebase)
add_subdirectory(apis)
endif ()
# Copyright (c) OpenMMLab. All rights reserved.
add_subdirectory(c)
add_subdirectory(cxx)
add_subdirectory(java)
# add python subdir conditionally since it's designed to work as
# a standalone project also
if (MMDEPLOY_BUILD_SDK_PYTHON_API)
add_subdirectory(python)
endif ()
# Copyright (c) OpenMMLab. All rights reserved.
project(capis)
include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake)
set(CAPI_OBJS)
macro(add_object name)
add_library(${name} OBJECT ${ARGN})
set_target_properties(${name} PROPERTIES POSITION_INDEPENDENT_CODE 1)
target_compile_definitions(${name} PRIVATE -DMMDEPLOY_API_EXPORTS=1)
if (NOT MSVC)
target_compile_options(${name} PRIVATE $<$<COMPILE_LANGUAGE:CXX>:-fvisibility=hidden>)
endif ()
target_link_libraries(${name} PRIVATE mmdeploy::core)
target_include_directories(${name} PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
$<INSTALL_INTERFACE:include>)
set(CAPI_OBJS ${CAPI_OBJS} ${name})
mmdeploy_export(${name})
endmacro()
set(COMMON_LIST
common
model
executor
pipeline)
set(TASK_LIST ${MMDEPLOY_TASKS})
foreach (TASK ${COMMON_LIST})
set(TARGET_NAME mmdeploy_${TASK})
set(OBJECT_NAME mmdeploy_${TASK}_obj)
add_object(${OBJECT_NAME} ${CMAKE_CURRENT_SOURCE_DIR}/mmdeploy/${TASK}.cpp)
mmdeploy_add_library(${TARGET_NAME})
target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_NAME})
target_include_directories(${TARGET_NAME} PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
$<INSTALL_INTERFACE:include>)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/mmdeploy/${TASK}.h
DESTINATION include/mmdeploy)
endforeach ()
target_link_libraries(mmdeploy_executor PUBLIC
mmdeploy_execution mmdeploy_common)
target_link_libraries(mmdeploy_pipeline PUBLIC
mmdeploy_executor mmdeploy_model mmdeploy_common)
foreach (TASK ${TASK_LIST})
set(TARGET_NAME mmdeploy_${TASK})
set(OBJECT_NAME mmdeploy_${TASK}_obj)
add_object(${OBJECT_NAME} ${CMAKE_CURRENT_SOURCE_DIR}/mmdeploy/${TASK}.cpp)
mmdeploy_add_library(${TARGET_NAME})
target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_NAME}
mmdeploy_pipeline)
target_include_directories(${TARGET_NAME} PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
$<INSTALL_INTERFACE:include>)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/mmdeploy/${TASK}.h
DESTINATION include/mmdeploy)
endforeach ()
install(DIRECTORY ${CMAKE_SOURCE_DIR}/demo/csrc/ DESTINATION example/cpp
FILES_MATCHING
PATTERN "*.cpp"
PATTERN "CMakeLists.txt"
)
if (MMDEPLOY_BUILD_SDK_CSHARP_API OR MMDEPLOY_BUILD_SDK_MONOLITHIC)
add_library(mmdeploy SHARED)
mmdeploy_load_static(mmdeploy MMDeployStaticModules)
mmdeploy_load_dynamic(mmdeploy MMDeployDynamicModules)
target_link_libraries(mmdeploy PRIVATE ${CAPI_OBJS} mmdeploy_execution)
target_include_directories(mmdeploy PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
$<INSTALL_INTERFACE:include>)
set(MMDEPLOY_VERSION ${MMDEPLOY_VERSION_MAJOR}
.${MMDEPLOY_VERSION_MINOR}
.${MMDEPLOY_VERSION_PATCH})
string(REPLACE ";" "" MMDEPLOY_VERSION ${MMDEPLOY_VERSION})
set_target_properties(mmdeploy PROPERTIES
VERSION ${MMDEPLOY_VERSION}
SOVERSION ${MMDEPLOY_VERSION_MAJOR})
mmdeploy_add_rpath(mmdeploy)
mmdeploy_export_impl(mmdeploy)
endif ()
// Copyright (c) OpenMMLab. All rights reserved.
#include "mmdeploy/classifier.h"
#include <numeric>
#include "mmdeploy/archive/value_archive.h"
#include "mmdeploy/codebase/mmcls/mmcls.h"
#include "mmdeploy/common_internal.h"
#include "mmdeploy/core/device.h"
#include "mmdeploy/core/graph.h"
#include "mmdeploy/core/utils/formatter.h"
#include "mmdeploy/handle.h"
#include "mmdeploy/pipeline.h"
using namespace mmdeploy;
using namespace std;
int mmdeploy_classifier_create(mmdeploy_model_t model, const char* device_name, int device_id,
mmdeploy_classifier_t* classifier) {
mmdeploy_context_t context{};
auto ec = mmdeploy_context_create_by_device(device_name, device_id, &context);
if (ec != MMDEPLOY_SUCCESS) {
return ec;
}
ec = mmdeploy_classifier_create_v2(model, context, classifier);
mmdeploy_context_destroy(context);
return ec;
}
int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name,
int device_id, mmdeploy_classifier_t* classifier) {
mmdeploy_model_t model{};
if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) {
return ec;
}
auto ec = mmdeploy_classifier_create(model, device_name, device_id, classifier);
mmdeploy_model_destroy(model);
return ec;
}
int mmdeploy_classifier_create_v2(mmdeploy_model_t model, mmdeploy_context_t context,
mmdeploy_classifier_t* classifier) {
return mmdeploy_pipeline_create_from_model(model, context, (mmdeploy_pipeline_t*)classifier);
}
int mmdeploy_classifier_create_input(const mmdeploy_mat_t* mats, int mat_count,
mmdeploy_value_t* value) {
return mmdeploy_common_create_input(mats, mat_count, value);
}
int mmdeploy_classifier_apply(mmdeploy_classifier_t classifier, const mmdeploy_mat_t* mats,
int mat_count, mmdeploy_classification_t** results,
int** result_count) {
wrapped<mmdeploy_value_t> input;
if (auto ec = mmdeploy_classifier_create_input(mats, mat_count, input.ptr())) {
return ec;
}
wrapped<mmdeploy_value_t> output;
if (auto ec = mmdeploy_classifier_apply_v2(classifier, input, output.ptr())) {
return ec;
}
if (auto ec = mmdeploy_classifier_get_result(output, results, result_count)) {
return ec;
}
return MMDEPLOY_SUCCESS;
}
int mmdeploy_classifier_apply_v2(mmdeploy_classifier_t classifier, mmdeploy_value_t input,
mmdeploy_value_t* output) {
return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)classifier, input, output);
}
int mmdeploy_classifier_apply_async(mmdeploy_classifier_t classifier, mmdeploy_sender_t input,
mmdeploy_sender_t* output) {
return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)classifier, input, output);
}
int mmdeploy_classifier_get_result(mmdeploy_value_t output, mmdeploy_classification_t** results,
int** result_count) {
if (!output || !results || !result_count) {
return MMDEPLOY_E_INVALID_ARG;
}
try {
Value& value = Cast(output)->front();
auto classify_outputs = from_value<vector<mmcls::Labels>>(value);
vector<int> _result_count;
_result_count.reserve(classify_outputs.size());
for (const auto& cls_output : classify_outputs) {
_result_count.push_back((int)cls_output.size());
}
auto total = std::accumulate(begin(_result_count), end(_result_count), 0);
std::unique_ptr<int[]> result_count_data(new int[_result_count.size()]{});
std::copy(_result_count.begin(), _result_count.end(), result_count_data.get());
std::unique_ptr<mmdeploy_classification_t[]> result_data(
new mmdeploy_classification_t[total]{});
auto result_ptr = result_data.get();
for (const auto& cls_output : classify_outputs) {
for (const auto& label : cls_output) {
result_ptr->label_id = label.label_id;
result_ptr->score = label.score;
++result_ptr;
}
}
*result_count = result_count_data.release();
*results = result_data.release();
return MMDEPLOY_SUCCESS;
} catch (const std::exception& e) {
MMDEPLOY_ERROR("unhandled exception: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MMDEPLOY_E_FAIL;
}
void mmdeploy_classifier_release_result(mmdeploy_classification_t* results, const int* result_count,
int count) {
delete[] results;
delete[] result_count;
}
void mmdeploy_classifier_destroy(mmdeploy_classifier_t classifier) {
mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)classifier);
}
// Copyright (c) OpenMMLab. All rights reserved.
/**
* @file classifier.h
* @brief Interface to MMClassification task
*/
#ifndef MMDEPLOY_CLASSIFIER_H
#define MMDEPLOY_CLASSIFIER_H
#include "mmdeploy/common.h"
#include "mmdeploy/executor.h"
#include "mmdeploy/model.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct mmdeploy_classification_t {
int label_id;
float score;
} mmdeploy_classification_t;
typedef struct mmdeploy_classifier* mmdeploy_classifier_t;
/**
* @brief Create classifier's handle
* @param[in] model an instance of mmclassification sdk model created by
* \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
* @param[in] device_id id of device.
* @param[out] classifier instance of a classifier, which must be destroyed
* by \ref mmdeploy_classifier_destroy
* @return status of creating classifier's handle
*/
MMDEPLOY_API int mmdeploy_classifier_create(mmdeploy_model_t model, const char* device_name,
int device_id, mmdeploy_classifier_t* classifier);
/**
* @brief Create classifier's handle
* @param[in] model_path path of mmclassification sdk model exported by mmdeploy model converter
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
* @param[in] device_id id of device.
* @param[out] classifier instance of a classifier, which must be destroyed
* by \ref mmdeploy_classifier_destroy
* @return status of creating classifier's handle
*/
MMDEPLOY_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name,
int device_id,
mmdeploy_classifier_t* classifier);
/**
* @brief Use classifier created by \ref mmdeploy_classifier_create_by_path to get label
* information of each image in a batch
* @param[in] classifier classifier's handle created by \ref mmdeploy_classifier_create_by_path
* @param[in] mats a batch of images
* @param[in] mat_count number of images in the batch
* @param[out] results a linear buffer to save classification results of each
* image, which must be freed by \ref mmdeploy_classifier_release_result
* @param[out] result_count a linear buffer with length being \p mat_count to save the number of
* classification results of each image. It must be released by \ref
* mmdeploy_classifier_release_result
* @return status of inference
*/
MMDEPLOY_API int mmdeploy_classifier_apply(mmdeploy_classifier_t classifier,
const mmdeploy_mat_t* mats, int mat_count,
mmdeploy_classification_t** results, int** result_count);
/**
* @brief Release the inference result buffer created \ref mmdeploy_classifier_apply
* @param[in] results classification results buffer
* @param[in] result_count \p results size buffer
* @param[in] count length of \p result_count
*/
MMDEPLOY_API void mmdeploy_classifier_release_result(mmdeploy_classification_t* results,
const int* result_count, int count);
/**
* @brief Destroy classifier's handle
* @param[in] classifier classifier's handle created by \ref mmdeploy_classifier_create_by_path
*/
MMDEPLOY_API void mmdeploy_classifier_destroy(mmdeploy_classifier_t classifier);
/******************************************************************************
* Experimental asynchronous APIs */
/**
* @brief Same as \ref mmdeploy_classifier_create, but allows to control execution context of tasks
* via context
*/
MMDEPLOY_API int mmdeploy_classifier_create_v2(mmdeploy_model_t model, mmdeploy_context_t context,
mmdeploy_classifier_t* classifier);
/**
* @brief Pack classifier inputs into mmdeploy_value_t
* @param[in] mats a batch of images
* @param[in] mat_count number of images in the batch
* @param[out] value the packed value
* @return status of the operation
*/
MMDEPLOY_API int mmdeploy_classifier_create_input(const mmdeploy_mat_t* mats, int mat_count,
mmdeploy_value_t* value);
/**
* @brief Same as \ref mmdeploy_classifier_apply, but input and output are packed in \ref
* mmdeploy_value_t.
*/
MMDEPLOY_API int mmdeploy_classifier_apply_v2(mmdeploy_classifier_t classifier,
mmdeploy_value_t input, mmdeploy_value_t* output);
/**
* @brief Apply classifier asynchronously
* @param[in] classifier handle of the classifier
* @param[in] input input sender that will be consumed by the operation
* @param[out] output output sender
* @return status of the operation
*/
MMDEPLOY_API int mmdeploy_classifier_apply_async(mmdeploy_classifier_t classifier,
mmdeploy_sender_t input,
mmdeploy_sender_t* output);
/**
*
* @param[in] output output obtained by applying a classifier
* @param[out] results a linear buffer containing classification results of each image, released by
* \ref mmdeploy_classifier_release_result
* @param[out] result_count a linear buffer containing the number of results for each input image,
* released by \ref mmdeploy_classifier_release_result
* @return status of the operation
*/
MMDEPLOY_API int mmdeploy_classifier_get_result(mmdeploy_value_t output,
mmdeploy_classification_t** results,
int** result_count);
#ifdef __cplusplus
}
#endif
#endif // MMDEPLOY_CLASSIFIER_H
#include "mmdeploy/common.h"
#include "mmdeploy/common_internal.h"
#include "mmdeploy/core/mat.h"
#include "mmdeploy/core/profiler.h"
#include "mmdeploy/executor_internal.h"
mmdeploy_value_t mmdeploy_value_copy(mmdeploy_value_t value) {
if (!value) {
return nullptr;
}
return Guard([&] { return Take(Value(*Cast(value))); });
}
void mmdeploy_value_destroy(mmdeploy_value_t value) { delete Cast(value); }
int mmdeploy_context_create(mmdeploy_context_t* context) {
*context = (mmdeploy_context_t) new Value;
return 0;
}
int mmdeploy_context_create_by_device(const char* device_name, int device_id,
mmdeploy_context_t* context) {
mmdeploy_device_t device{};
int ec = MMDEPLOY_SUCCESS;
mmdeploy_context_t _context{};
ec = mmdeploy_context_create(&_context);
if (ec != MMDEPLOY_SUCCESS) {
return ec;
}
ec = mmdeploy_device_create(device_name, device_id, &device);
if (ec != MMDEPLOY_SUCCESS) {
return ec;
}
ec = mmdeploy_context_add(_context, MMDEPLOY_TYPE_DEVICE, nullptr, device);
mmdeploy_device_destroy(device);
if (ec == MMDEPLOY_SUCCESS) {
*context = _context;
}
return ec;
}
void mmdeploy_context_destroy(mmdeploy_context_t context) { delete Cast(context); }
int mmdeploy_common_create_input(const mmdeploy_mat_t* mats, int mat_count,
mmdeploy_value_t* value) {
if (mat_count && mats == nullptr) {
return MMDEPLOY_E_INVALID_ARG;
}
try {
auto input = std::make_unique<Value>(Value{Value::kArray});
for (int i = 0; i < mat_count; ++i) {
input->front().push_back({{"ori_img", Cast(mats[i])}});
}
*value = Cast(input.release());
} catch (const std::exception& e) {
MMDEPLOY_ERROR("unhandled exception: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MMDEPLOY_SUCCESS;
}
int mmdeploy_device_create(const char* device_name, int device_id, mmdeploy_device_t* device) {
Device tmp(device_name, device_id);
if (tmp.platform_id() == -1) {
MMDEPLOY_ERROR("Device \"{}\" not found", device_name);
return MMDEPLOY_E_INVALID_ARG;
}
*device = (mmdeploy_device_t) new Device(tmp);
return MMDEPLOY_SUCCESS;
}
void mmdeploy_device_destroy(mmdeploy_device_t device) { delete (Device*)device; }
int mmdeploy_profiler_create(const char* path, mmdeploy_profiler_t* profiler) {
*profiler = (mmdeploy_profiler_t) new profiler::Profiler(path);
return MMDEPLOY_SUCCESS;
}
void mmdeploy_profiler_destroy(mmdeploy_profiler_t profiler) {
if (profiler) {
auto p = (profiler::Profiler*)profiler;
p->Release();
delete p;
}
}
int mmdeploy_context_add(mmdeploy_context_t context, mmdeploy_context_type_t type, const char* name,
const void* object) {
auto& ctx = *Cast(context);
switch (type) {
case MMDEPLOY_TYPE_DEVICE: {
const auto& device = *(Device*)object;
ctx["device"] = device;
ctx["stream"] = Stream(device);
break;
}
case MMDEPLOY_TYPE_SCHEDULER:
ctx["scheduler"][name] = *Cast((const mmdeploy_scheduler_t)object);
break;
case MMDEPLOY_TYPE_MODEL:
ctx["model"][name] = *Cast((const mmdeploy_model_t)object);
break;
case MMDEPLOY_TYPE_PROFILER: {
const auto& profiler = *(profiler::Profiler*)object;
profiler::Scope* root(profiler.scope());
ctx["scope"] = root;
break;
}
default:
return MMDEPLOY_E_NOT_SUPPORTED;
}
return 0;
}
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_COMMON_H
#define MMDEPLOY_COMMON_H
#include <stdint.h> // NOLINT
#ifndef MMDEPLOY_EXPORT
#ifdef _MSC_VER
#define MMDEPLOY_EXPORT __declspec(dllexport)
#else
#define MMDEPLOY_EXPORT __attribute__((visibility("default")))
#endif
#endif
#ifndef MMDEPLOY_API
#ifdef MMDEPLOY_API_EXPORTS
#define MMDEPLOY_API MMDEPLOY_EXPORT
#else
#define MMDEPLOY_API
#endif
#endif
// clang-format off
typedef enum mmdeploy_pixel_format_t{
MMDEPLOY_PIXEL_FORMAT_BGR,
MMDEPLOY_PIXEL_FORMAT_RGB,
MMDEPLOY_PIXEL_FORMAT_GRAYSCALE,
MMDEPLOY_PIXEL_FORMAT_NV12,
MMDEPLOY_PIXEL_FORMAT_NV21,
MMDEPLOY_PIXEL_FORMAT_BGRA,
MMDEPLOY_PIXEL_FORMAT_COUNT
} mmdeploy_pixel_format_t;
typedef enum mmdeploy_data_type_t{
MMDEPLOY_DATA_TYPE_FLOAT,
MMDEPLOY_DATA_TYPE_HALF,
MMDEPLOY_DATA_TYPE_UINT8,
MMDEPLOY_DATA_TYPE_INT32,
MMDEPLOY_DATA_TYPE_COUNT
} mmdeploy_data_type_t;
typedef enum mmdeploy_status_t {
MMDEPLOY_SUCCESS = 0,
MMDEPLOY_E_INVALID_ARG = 1,
MMDEPLOY_E_NOT_SUPPORTED = 2,
MMDEPLOY_E_OUT_OF_RANGE = 3,
MMDEPLOY_E_OUT_OF_MEMORY = 4,
MMDEPLOY_E_FILE_NOT_EXIST = 5,
MMDEPLOY_E_FAIL = 6,
MMDEPLOY_STATUS_COUNT = 7
} mmdeploy_status_t;
// clang-format on
typedef struct mmdeploy_device* mmdeploy_device_t;
typedef struct mmdeploy_profiler* mmdeploy_profiler_t;
typedef struct mmdeploy_mat_t {
uint8_t* data;
int height;
int width;
int channel;
mmdeploy_pixel_format_t format;
mmdeploy_data_type_t type;
mmdeploy_device_t device;
} mmdeploy_mat_t;
typedef struct mmdeploy_rect_t {
float left;
float top;
float right;
float bottom;
} mmdeploy_rect_t;
typedef struct mmdeploy_point_t {
float x;
float y;
} mmdeploy_point_t;
typedef struct mmdeploy_value* mmdeploy_value_t;
typedef struct mmdeploy_context* mmdeploy_context_t;
typedef enum mmdeploy_context_type_t {
MMDEPLOY_TYPE_DEVICE = 0,
MMDEPLOY_TYPE_STREAM = 1,
MMDEPLOY_TYPE_MODEL = 2,
MMDEPLOY_TYPE_SCHEDULER = 3,
MMDEPLOY_TYPE_MAT = 4,
MMDEPLOY_TYPE_PROFILER = 5,
} mmdeploy_context_type_t;
#if __cplusplus
extern "C" {
#endif
/**
* Copy value
* @param value
* @return
*/
MMDEPLOY_API mmdeploy_value_t mmdeploy_value_copy(mmdeploy_value_t value);
/**
* Destroy value
* @param value
*/
MMDEPLOY_API void mmdeploy_value_destroy(mmdeploy_value_t value);
/**
* Create device handle
* @param device_name
* @param device_id
* @param device
* @return
*/
MMDEPLOY_API int mmdeploy_device_create(const char* device_name, int device_id,
mmdeploy_device_t* device);
/**
* Destroy device handle
* @param device
*/
MMDEPLOY_API void mmdeploy_device_destroy(mmdeploy_device_t device);
/**
* Create profiler
* @param path path to save the profile data
* @param profiler handle for profiler, should be added to context and deleted by
* mmdeploy_profiler_destroy
* @return status of create
*/
MMDEPLOY_API int mmdeploy_profiler_create(const char* path, mmdeploy_profiler_t* profiler);
/**
* Destroy profiler handle
* @param profiler handle for profiler, profile data will be written to disk after this call
*/
MMDEPLOY_API void mmdeploy_profiler_destroy(mmdeploy_profiler_t profiler);
/**
* Create context
* @param context
* @return
*/
MMDEPLOY_API int mmdeploy_context_create(mmdeploy_context_t* context);
/**
* Create context
* @param device_name
* @param device_id
* @param context
* @return
*/
MMDEPLOY_API int mmdeploy_context_create_by_device(const char* device_name, int device_id,
mmdeploy_context_t* context);
/**
* Destroy context
* @param context
*/
MMDEPLOY_API void mmdeploy_context_destroy(mmdeploy_context_t context);
/**
* Add context object
* @param context
* @param type
* @param name
* @param object
* @return
*/
MMDEPLOY_API int mmdeploy_context_add(mmdeploy_context_t context, mmdeploy_context_type_t type,
const char* name, const void* object);
/**
* Create input value from array of mats
* @param mats
* @param mat_count
* @param value
* @return
*/
MMDEPLOY_API int mmdeploy_common_create_input(const mmdeploy_mat_t* mats, int mat_count,
mmdeploy_value_t* value);
#if __cplusplus
}
#endif
#endif // MMDEPLOY_COMMON_H
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_CSRC_APIS_C_COMMON_INTERNAL_H_
#define MMDEPLOY_CSRC_APIS_C_COMMON_INTERNAL_H_
#include "mmdeploy/common.h"
#include "mmdeploy/core/mat.h"
#include "mmdeploy/core/value.h"
#include "mmdeploy/handle.h"
#include "mmdeploy/model.h"
#include "mmdeploy/pipeline.h"
using namespace mmdeploy;
namespace {
inline mmdeploy_value_t Cast(Value* s) { return reinterpret_cast<mmdeploy_value_t>(s); }
inline Value* Cast(mmdeploy_value_t s) { return reinterpret_cast<Value*>(s); }
inline Value Take(mmdeploy_value_t v) {
auto value = std::move(*Cast(v));
mmdeploy_value_destroy(v);
return value;
}
inline Value* Cast(mmdeploy_context_t c) { return reinterpret_cast<Value*>(c); }
inline mmdeploy_value_t Take(Value v) {
return Cast(new Value(std::move(v))); // NOLINT
}
inline mmdeploy_pipeline_t Cast(AsyncHandle* pipeline) {
return reinterpret_cast<mmdeploy_pipeline_t>(pipeline);
}
inline AsyncHandle* Cast(mmdeploy_pipeline_t pipeline) {
return reinterpret_cast<AsyncHandle*>(pipeline);
}
inline mmdeploy_model_t Cast(Model* model) { return reinterpret_cast<mmdeploy_model_t>(model); }
inline Model* Cast(mmdeploy_model_t model) { return reinterpret_cast<Model*>(model); }
inline Mat Cast(const mmdeploy_mat_t& mat) {
return Mat{mat.height, mat.width, PixelFormat(mat.format),
DataType(mat.type), mat.data, mat.device ? *(const Device*)mat.device : Device{0}};
}
template <typename F>
std::invoke_result_t<F> Guard(F f) {
try {
return f();
} catch (const std::exception& e) {
MMDEPLOY_ERROR("unhandled exception: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return nullptr;
}
template <typename T, typename SFINAE = void>
class wrapped {};
template <typename T>
class wrapped<T, std::void_t<decltype(Cast(T{}))>> {
public:
wrapped() noexcept : v_(nullptr) {}
explicit wrapped(T v) noexcept : v_(v) {}
void reset() {
if (v_) {
delete Cast(v_);
v_ = nullptr;
}
}
~wrapped() { reset(); }
wrapped(const wrapped&) = delete;
wrapped& operator=(const wrapped&) = delete;
wrapped(wrapped&& other) noexcept : v_(other.release()) {}
wrapped& operator=(wrapped&& other) noexcept {
reset();
v_ = other.release();
return *this;
}
T release() noexcept { return std::exchange(v_, nullptr); }
auto operator*() { return Cast(v_); }
auto operator-> () { return Cast(v_); }
T* ptr() noexcept { return &v_; }
operator T() const noexcept { return v_; } // NOLINT
private:
T v_;
};
} // namespace
#endif // MMDEPLOY_CSRC_APIS_C_COMMON_INTERNAL_H_
// Copyright (c) OpenMMLab. All rights reserved.
#include "mmdeploy/detector.h"
#include <deque>
#include <numeric>
#include "mmdeploy/apis/c/mmdeploy/common_internal.h"
#include "mmdeploy/apis/c/mmdeploy/model.h"
#include "mmdeploy/apis/c/mmdeploy/pipeline.h"
#include "mmdeploy/archive/value_archive.h"
#include "mmdeploy/codebase/mmdet/mmdet.h"
#include "mmdeploy/core/device.h"
#include "mmdeploy/core/model.h"
#include "mmdeploy/core/mpl/structure.h"
#include "mmdeploy/core/utils/formatter.h"
#include "mmdeploy/core/value.h"
using namespace std;
using namespace mmdeploy;
using ResultType = mmdeploy::Structure<mmdeploy_detection_t, //
std::vector<int>, //
std::deque<mmdeploy_instance_mask_t>, //
std::vector<mmdeploy::framework::Buffer>>; //
int mmdeploy_detector_create(mmdeploy_model_t model, const char* device_name, int device_id,
mmdeploy_detector_t* detector) {
mmdeploy_context_t context{};
auto ec = mmdeploy_context_create_by_device(device_name, device_id, &context);
if (ec != MMDEPLOY_SUCCESS) {
return ec;
}
ec = mmdeploy_detector_create_v2(model, context, detector);
mmdeploy_context_destroy(context);
return ec;
}
int mmdeploy_detector_create_v2(mmdeploy_model_t model, mmdeploy_context_t context,
mmdeploy_detector_t* detector) {
return mmdeploy_pipeline_create_from_model(model, context, (mmdeploy_pipeline_t*)detector);
}
int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, int device_id,
mmdeploy_detector_t* detector) {
mmdeploy_model_t model{};
if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) {
return ec;
}
auto ec = mmdeploy_detector_create(model, device_name, device_id, detector);
mmdeploy_model_destroy(model);
return ec;
}
int mmdeploy_detector_create_input(const mmdeploy_mat_t* mats, int mat_count,
mmdeploy_value_t* input) {
return mmdeploy_common_create_input(mats, mat_count, input);
}
int mmdeploy_detector_apply(mmdeploy_detector_t detector, const mmdeploy_mat_t* mats, int mat_count,
mmdeploy_detection_t** results, int** result_count) {
wrapped<mmdeploy_value_t> input;
if (auto ec = mmdeploy_detector_create_input(mats, mat_count, input.ptr())) {
return ec;
}
wrapped<mmdeploy_value_t> output;
if (auto ec = mmdeploy_detector_apply_v2(detector, input, output.ptr())) {
return ec;
}
if (auto ec = mmdeploy_detector_get_result(output, results, result_count)) {
return ec;
}
return MMDEPLOY_SUCCESS;
}
int mmdeploy_detector_apply_v2(mmdeploy_detector_t detector, mmdeploy_value_t input,
mmdeploy_value_t* output) {
return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)detector, input, output);
}
int mmdeploy_detector_apply_async(mmdeploy_detector_t detector, mmdeploy_sender_t input,
mmdeploy_sender_t* output) {
return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)detector, input, output);
}
int mmdeploy_detector_get_result(mmdeploy_value_t output, mmdeploy_detection_t** results,
int** result_count) {
if (!output || !results || !result_count) {
return MMDEPLOY_E_INVALID_ARG;
}
try {
Value& value = Cast(output)->front();
auto detector_outputs = from_value<vector<mmdet::Detections>>(value);
vector<int> _result_count(detector_outputs.size());
size_t total = 0;
for (size_t i = 0; i < detector_outputs.size(); ++i) {
_result_count[i] = static_cast<int>(detector_outputs[i].size());
total += detector_outputs[i].size();
}
ResultType r({total, 1, 1, 1});
auto [result_data, result_count_vec, masks, buffers] = r.pointers();
auto result_ptr = result_data;
for (const auto& det_output : detector_outputs) {
for (const auto& detection : det_output) {
result_ptr->label_id = detection.label_id;
result_ptr->score = detection.score;
const auto& bbox = detection.bbox;
result_ptr->bbox = {bbox[0], bbox[1], bbox[2], bbox[3]};
auto mask_byte_size = detection.mask.byte_size();
if (mask_byte_size) {
auto& mask = detection.mask;
result_ptr->mask = &masks->emplace_back();
buffers->push_back(mask.buffer());
result_ptr->mask->data = mask.data<char>();
result_ptr->mask->width = mask.width();
result_ptr->mask->height = mask.height();
}
++result_ptr;
}
}
*result_count_vec = std::move(_result_count);
*result_count = result_count_vec->data();
*results = result_data;
r.release();
return MMDEPLOY_SUCCESS;
} catch (const std::exception& e) {
MMDEPLOY_ERROR("unhandled exception: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MMDEPLOY_E_FAIL;
}
void mmdeploy_detector_release_result(mmdeploy_detection_t* results, const int* result_count,
int count) {
auto num_dets = std::accumulate(result_count, result_count + count, 0);
ResultType deleter({static_cast<size_t>(num_dets), 1, 1, 1}, results);
}
void mmdeploy_detector_destroy(mmdeploy_detector_t detector) {
mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)detector);
}
// Copyright (c) OpenMMLab. All rights reserved.
/**
* @file detector.h
* @brief Interface to MMDetection task
*/
#ifndef MMDEPLOY_DETECTOR_H
#define MMDEPLOY_DETECTOR_H
#include "mmdeploy/common.h"
#include "mmdeploy/executor.h"
#include "mmdeploy/model.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct mmdeploy_instance_mask_t {
char* data;
int height;
int width;
} mmdeploy_instance_mask_t;
typedef struct mmdeploy_detection_t {
int label_id;
float score;
mmdeploy_rect_t bbox;
mmdeploy_instance_mask_t* mask;
} mmdeploy_detection_t;
typedef struct mmdeploy_detector* mmdeploy_detector_t;
/**
* @brief Create detector's handle
* @param[in] model an instance of mmdetection sdk model created by
* \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
* @param[in] device_id id of device.
* @param[out] detector instance of a detector
* @return status of creating detector's handle
*/
MMDEPLOY_API int mmdeploy_detector_create(mmdeploy_model_t model, const char* device_name,
int device_id, mmdeploy_detector_t* detector);
/**
* @brief Create detector's handle
* @param[in] model_path path of mmdetection sdk model exported by mmdeploy model converter
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
* @param[in] device_id id of device.
* @param[out] detector instance of a detector
* @return status of creating detector's handle
*/
MMDEPLOY_API int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name,
int device_id, mmdeploy_detector_t* detector);
/**
* @brief Apply detector to batch images and get their inference results
* @param[in] detector detector's handle created by \ref mmdeploy_detector_create_by_path
* @param[in] mats a batch of images
* @param[in] mat_count number of images in the batch
* @param[out] results a linear buffer to save detection results of each image. It must be released
* by \ref mmdeploy_detector_release_result
* @param[out] result_count a linear buffer with length being \p mat_count to save the number of
* detection results of each image. And it must be released by \ref
* mmdeploy_detector_release_result
* @return status of inference
*/
MMDEPLOY_API int mmdeploy_detector_apply(mmdeploy_detector_t detector, const mmdeploy_mat_t* mats,
int mat_count, mmdeploy_detection_t** results,
int** result_count);
/** @brief Release the inference result buffer created by \ref mmdeploy_detector_apply
* @param[in] results detection results buffer
* @param[in] result_count \p results size buffer
* @param[in] count length of \p result_count
*/
MMDEPLOY_API void mmdeploy_detector_release_result(mmdeploy_detection_t* results,
const int* result_count, int count);
/**
* @brief Destroy detector's handle
* @param[in] detector detector's handle created by \ref mmdeploy_detector_create_by_path
*/
MMDEPLOY_API void mmdeploy_detector_destroy(mmdeploy_detector_t detector);
/******************************************************************************
* Experimental asynchronous APIs */
/**
* @brief Same as \ref mmdeploy_detector_create, but allows to control execution context of tasks
* via context
*/
MMDEPLOY_API int mmdeploy_detector_create_v2(mmdeploy_model_t model, mmdeploy_context_t context,
mmdeploy_detector_t* detector);
/**
* @brief Pack detector inputs into mmdeploy_value_t
* @param[in] mats a batch of images
* @param[in] mat_count number of images in the batch
* @return the created value
*/
MMDEPLOY_API int mmdeploy_detector_create_input(const mmdeploy_mat_t* mats, int mat_count,
mmdeploy_value_t* input);
/**
* @brief Same as \ref mmdeploy_detector_apply, but input and output are packed in \ref
* mmdeploy_value_t.
*/
MMDEPLOY_API int mmdeploy_detector_apply_v2(mmdeploy_detector_t detector, mmdeploy_value_t input,
mmdeploy_value_t* output);
/**
* @brief Apply detector asynchronously
* @param[in] detector handle to the detector
* @param[in] input input sender
* @return output sender
*/
MMDEPLOY_API int mmdeploy_detector_apply_async(mmdeploy_detector_t detector,
mmdeploy_sender_t input, mmdeploy_sender_t* output);
/**
* @brief Unpack detector output from a mmdeploy_value_t
* @param[in] output output obtained by applying a detector
* @param[out] results a linear buffer to save detection results of each image. It must be released
* by \ref mmdeploy_detector_release_result
* @param[out] result_count a linear buffer with length number of input images to save the number of
* detection results of each image. Must be released by \ref
* mmdeploy_detector_release_result
* @return status of the operation
*/
MMDEPLOY_API int mmdeploy_detector_get_result(mmdeploy_value_t output,
mmdeploy_detection_t** results, int** result_count);
#ifdef __cplusplus
}
#endif
#endif // MMDEPLOY_DETECTOR_H
// Copyright (c) OpenMMLab. All rights reserved.
#include "mmdeploy/executor.h"
#include "mmdeploy/common.h"
#include "mmdeploy/common_internal.h"
#include "mmdeploy/execution/when_all_value.h"
#include "mmdeploy/executor_internal.h"
using namespace mmdeploy;
namespace {
mmdeploy_scheduler_t CreateScheduler(const char* type, const Value& config = Value()) {
try {
auto creator = gRegistry<SchedulerType>().Get(type);
if (!creator) {
MMDEPLOY_ERROR("Creator for {} not found. Available schedulers: {}", type,
gRegistry<SchedulerType>().List());
return nullptr;
}
return Cast(new SchedulerType(creator->Create(config)));
} catch (const std::exception& e) {
MMDEPLOY_ERROR("failed to create Scheduler: {} ({}), config: {}", type, e.what(), config);
return nullptr;
}
}
} // namespace
mmdeploy_sender_t mmdeploy_sender_copy(mmdeploy_sender_t input) {
if (!input) {
return nullptr;
}
return Take(SenderType(*Cast(input)));
}
int mmdeploy_sender_destroy(mmdeploy_sender_t sender) {
delete Cast(sender);
return 0;
}
mmdeploy_scheduler_t mmdeploy_executor_inline() { return CreateScheduler("Inline"); }
mmdeploy_scheduler_t mmdeploy_executor_system_pool() {
// create a thread pool context and hold its shared handle
static auto scheduler = *Cast(CreateScheduler("ThreadPool"));
// return a copy of the handle to the thread pool
return Cast(new SchedulerType(scheduler));
}
mmdeploy_scheduler_t mmdeploy_executor_create_thread_pool(int num_threads) {
return CreateScheduler("ThreadPool", {{"num_threads", num_threads}});
}
mmdeploy_scheduler_t mmdeploy_executor_create_thread() { return CreateScheduler("SingleThread"); }
mmdeploy_scheduler_t mmdeploy_executor_dynamic_batch(mmdeploy_scheduler_t scheduler,
int max_batch_size, int timeout) {
if (!scheduler) {
return nullptr;
}
return CreateScheduler(
"DynamicBatch",
{{"scheduler", *Cast(scheduler)}, {"max_batch_size", max_batch_size}, {"timeout", timeout}});
}
int mmdeploy_scheduler_destroy(mmdeploy_scheduler_t scheduler) {
delete Cast(scheduler);
return 0;
}
mmdeploy_sender_t mmdeploy_executor_just(mmdeploy_value_t value) {
if (value) {
return Guard([&] { return Take(Just(*Cast(value))); });
} else {
return Take(Just(Value()));
}
}
mmdeploy_sender_t mmdeploy_executor_schedule(mmdeploy_scheduler_t scheduler) {
if (!scheduler) {
return nullptr;
}
return Guard([&] { return Take(Then(Schedule(*Cast(scheduler)), [] { return Value(); })); });
}
mmdeploy_sender_t mmdeploy_executor_transfer_just(mmdeploy_scheduler_t scheduler,
mmdeploy_value_t value) {
if (!scheduler || !value) {
return nullptr;
}
return Guard([&] { return Take(TransferJust(*Cast(scheduler), *Cast(value))); });
}
mmdeploy_sender_t mmdeploy_executor_transfer(mmdeploy_sender_t input,
mmdeploy_scheduler_t scheduler) {
if (!input || !scheduler) {
return nullptr;
}
return Guard([&] { return Take(Transfer(Take(input), *Cast(scheduler))); });
}
mmdeploy_sender_t mmdeploy_executor_on(mmdeploy_scheduler_t scheduler, mmdeploy_sender_t input) {
if (!scheduler || !input) {
return nullptr;
}
return Guard([&] { return Take(On(*Cast(scheduler), Take(input))); });
}
mmdeploy_sender_t mmdeploy_executor_then(mmdeploy_sender_t input, mmdeploy_then_fn_t fn,
void* context) {
if (!input || !fn) {
return nullptr;
}
return Guard([&] {
return Take(Then(Take(input), [fn, context](Value args) {
auto out = Cast(fn(Take(std::move(args)), context));
Value ret(std::move(*out));
delete out;
return ret;
}));
});
}
mmdeploy_sender_t mmdeploy_executor_let_value(mmdeploy_sender_t input, mmdeploy_let_value_fn_t fn,
void* context) {
if (!input || !fn) {
return nullptr;
}
return Guard([&] {
return Take(LetValue(Take(input), [fn, context](Value& args) {
auto out = Cast(fn(Cast(&args), context));
SenderType ret(std::move(*out));
delete out;
return ret;
}));
});
}
mmdeploy_sender_t mmdeploy_executor_split(mmdeploy_sender_t input) {
if (!input) {
return nullptr;
}
return Guard([&] { return Take(Split(Take(input))); });
}
mmdeploy_sender_t mmdeploy_executor_when_all(mmdeploy_sender_t inputs[], int32_t n) {
if (!inputs) {
return nullptr;
}
return Guard([&] {
std::vector<SenderType> senders;
senders.reserve(n);
for (int i = 0; i < n; ++i) {
senders.emplace_back(Take(inputs[i]));
}
return Take(
Then(WhenAll(std::move(senders)), [](Value::Array&& v) { return Value(std::move(v)); }));
});
}
mmdeploy_sender_t mmdeploy_executor_ensure_started(mmdeploy_sender_t input) {
if (!input) {
return nullptr;
}
return Guard([&] { return Take(EnsureStarted(Take(input))); });
}
int mmdeploy_executor_start_detached(mmdeploy_sender_t input) {
if (!input) {
return MMDEPLOY_E_INVALID_ARG;
}
try {
StartDetached(Take(input));
return 0;
} catch (...) {
}
return MMDEPLOY_E_FAIL;
}
mmdeploy_value_t mmdeploy_executor_sync_wait(mmdeploy_sender_t input) {
if (!input) {
return nullptr;
}
return Guard([&] { return Take(std::get<Value>(SyncWait(Take(input)))); });
}
int mmdeploy_executor_sync_wait_v2(mmdeploy_sender_t sender, mmdeploy_value_t* value) {
if (!sender) {
return MMDEPLOY_E_INVALID_ARG;
}
auto result = mmdeploy_executor_sync_wait(sender);
if (!result) {
return MMDEPLOY_E_FAIL;
}
if (value) {
*value = result;
} else {
mmdeploy_value_destroy(result);
}
return MMDEPLOY_SUCCESS;
}
void mmdeploy_executor_execute(mmdeploy_scheduler_t scheduler, void (*fn)(void*), void* context) {
Execute(*Cast(scheduler), [fn, context] { fn(context); });
}
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_CSRC_APIS_C_EXECUTOR_H_
#define MMDEPLOY_CSRC_APIS_C_EXECUTOR_H_
#include "mmdeploy/common.h"
#if __cplusplus
extern "C" {
#endif
/******************************************************************************
* Experimental asynchronous APIs */
typedef mmdeploy_value_t (*mmdeploy_then_fn_t)(mmdeploy_value_t, void*);
typedef mmdeploy_value_t (*mmdeploy_then_fn_v2_t)(mmdeploy_value_t*, void*);
typedef int (*mmdeploy_then_fn_v3_t)(mmdeploy_value_t* input, mmdeploy_value_t* output, void*);
struct mmdeploy_sender;
struct mmdeploy_scheduler;
typedef struct mmdeploy_sender* mmdeploy_sender_t;
typedef struct mmdeploy_scheduler* mmdeploy_scheduler_t;
typedef mmdeploy_sender_t (*mmdeploy_let_value_fn_t)(mmdeploy_value_t, void*);
///////////////////////////////////////////////////////////////////////////////
// Scheduler
///////////////////////////////////////////////////////////////////////////////
MMDEPLOY_API mmdeploy_scheduler_t mmdeploy_executor_inline();
MMDEPLOY_API mmdeploy_scheduler_t mmdeploy_executor_system_pool();
/**
* Create a thread pool with the given number of worker threads
* @param[in] num_threads
* @return the handle to the created thread pool
*/
MMDEPLOY_API mmdeploy_scheduler_t mmdeploy_executor_create_thread_pool(int num_threads);
MMDEPLOY_API mmdeploy_scheduler_t mmdeploy_executor_create_thread();
MMDEPLOY_API mmdeploy_scheduler_t mmdeploy_executor_dynamic_batch(mmdeploy_scheduler_t scheduler,
int max_batch_size, int timeout);
MMDEPLOY_API int mmdeploy_scheduler_destroy(mmdeploy_scheduler_t scheduler);
///////////////////////////////////////////////////////////////////////////////
// Utilities
///////////////////////////////////////////////////////////////////////////////
/**
* @brief Create a copy of a copyable sender. Only senders created by \ref mmdeploy_executor_split
* is copyable for now.
* @param[in] input copyable sender,
* @return the sender created, or nullptr if the sender is not copyable
*/
MMDEPLOY_API mmdeploy_sender_t mmdeploy_sender_copy(mmdeploy_sender_t input);
/**
* @brief Destroy a sender, notice that all sender adapters will consume input senders, only unused
* senders should be destroyed using this function.
* @param[in] input
*/
MMDEPLOY_API int mmdeploy_sender_destroy(mmdeploy_sender_t sender);
///////////////////////////////////////////////////////////////////////////////
// Sender factories
///////////////////////////////////////////////////////////////////////////////
/**
* @brief Create a sender that sends the provided value
* @param[in] value
* @return created sender
*/
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_just(mmdeploy_value_t value);
/**
* @brief
* @param[in] scheduler
* @return the sender created
*/
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_schedule(mmdeploy_scheduler_t scheduler);
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_transfer_just(mmdeploy_scheduler_t scheduler,
mmdeploy_value_t value);
///////////////////////////////////////////////////////////////////////////////
// Sender adapters
///////////////////////////////////////////////////////////////////////////////
/**
* Transfer the execution to the execution agent of the provided scheduler
* @param[in] input
* @param[in] scheduler
* @return the sender created
*/
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_transfer(mmdeploy_sender_t input,
mmdeploy_scheduler_t scheduler);
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_on(mmdeploy_scheduler_t scheduler,
mmdeploy_sender_t input);
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_then(mmdeploy_sender_t input,
mmdeploy_then_fn_t fn, void* context);
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_let_value(mmdeploy_sender_t input,
mmdeploy_let_value_fn_t fn,
void* context);
/**
* Convert the input sender into a sender that is copyable via \ref mmdeploy_sender_copy. Notice
* that this function doesn't make the sender multi-shot, it just return a sender that is copyable.
* @param[in] input
* @return the sender that is copyable
*/
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_split(mmdeploy_sender_t input);
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_when_all(mmdeploy_sender_t inputs[], int32_t n);
MMDEPLOY_API mmdeploy_sender_t mmdeploy_executor_ensure_started(mmdeploy_sender_t input);
///////////////////////////////////////////////////////////////////////////////
// Sender consumers
///////////////////////////////////////////////////////////////////////////////
MMDEPLOY_API int mmdeploy_executor_start_detached(mmdeploy_sender_t input);
MMDEPLOY_API mmdeploy_value_t mmdeploy_executor_sync_wait(mmdeploy_sender_t input);
MMDEPLOY_API int mmdeploy_executor_sync_wait_v2(mmdeploy_sender_t input, mmdeploy_value_t* output);
MMDEPLOY_API void mmdeploy_executor_execute(mmdeploy_scheduler_t scheduler, void (*fn)(void*),
void* context);
#if __cplusplus
}
#endif
#endif // MMDEPLOY_CSRC_APIS_C_EXECUTOR_H_
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_CSRC_APIS_C_EXECUTOR_INTERNAL_H_
#define MMDEPLOY_CSRC_APIS_C_EXECUTOR_INTERNAL_H_
#include "mmdeploy/execution/schedulers/registry.h"
#include "mmdeploy/executor.h"
using namespace mmdeploy;
using SenderType = TypeErasedSender<Value>;
using SchedulerType = TypeErasedScheduler<Value>;
namespace {
inline SchedulerType* Cast(mmdeploy_scheduler_t s) { return reinterpret_cast<SchedulerType*>(s); }
inline mmdeploy_scheduler_t Cast(SchedulerType* s) {
return reinterpret_cast<mmdeploy_scheduler_t>(s);
}
inline SenderType* Cast(mmdeploy_sender_t s) { return reinterpret_cast<SenderType*>(s); }
inline mmdeploy_sender_t Cast(SenderType* s) { return reinterpret_cast<mmdeploy_sender_t>(s); }
inline SenderType Take(mmdeploy_sender_t s) {
auto sender = std::move(*Cast(s));
mmdeploy_sender_destroy(s);
return sender;
}
inline mmdeploy_sender_t Take(SenderType s) { return Cast(new SenderType(std::move(s))); }
template <typename T, std::enable_if_t<_is_sender<T>, int> = 0>
inline mmdeploy_sender_t Take(T& s) {
return Take(SenderType(std::move(s)));
}
} // namespace
#endif // MMDEPLOY_CSRC_APIS_C_EXECUTOR_INTERNAL_H_
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_APIS_C_HANDLE_H_
#define MMDEPLOY_SRC_APIS_C_HANDLE_H_
#include <memory>
#include "mmdeploy/core/device.h"
#include "mmdeploy/core/graph.h"
#include "mmdeploy/core/value.h"
#include "mmdeploy/graph/common.h"
#include "mmdeploy/graph/static_router.h"
namespace mmdeploy {
using namespace framework;
namespace {
class AsyncHandle {
public:
AsyncHandle(const char* device_name, int device_id, Value config)
: AsyncHandle(SetContext(std::move(config), device_name, device_id)) {}
explicit AsyncHandle(const Value& config) {
if (auto builder = graph::Builder::CreateFromConfig(config).value()) {
node_ = builder->Build().value();
} else {
MMDEPLOY_ERROR("failed to find creator for node");
throw_exception(eEntryNotFound);
}
}
graph::Sender<Value> Process(graph::Sender<Value> input) {
return node_->Process(std::move(input));
}
private:
static Value SetContext(Value config, const char* device_name, int device_id) {
Device device(device_name, device_id);
Stream stream(device);
config["context"].update({{"device", device}, {"stream", stream}});
return config;
}
std::unique_ptr<graph::Node> node_;
};
} // namespace
} // namespace mmdeploy
#endif // MMDEPLOY_SRC_APIS_C_HANDLE_H_
// Copyright (c) OpenMMLab. All rights reserved.
// clang-format off
#include "mmdeploy/model.h"
#include <memory>
#include "mmdeploy/common_internal.h"
#include "mmdeploy/core/logger.h"
#include "mmdeploy/core/model.h"
// clang-format on
using namespace mmdeploy;
int mmdeploy_model_create_by_path(const char* path, mmdeploy_model_t* model) {
try {
auto ptr = std::make_unique<Model>(path);
*model = reinterpret_cast<mmdeploy_model_t>(ptr.release());
return MMDEPLOY_SUCCESS;
} catch (const std::exception& e) {
MMDEPLOY_ERROR("failed to create model: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MMDEPLOY_E_FAIL;
}
int mmdeploy_model_create(const void* buffer, int size, mmdeploy_model_t* model) {
try {
auto ptr = std::make_unique<Model>(buffer, size);
*model = reinterpret_cast<mmdeploy_model_t>(ptr.release());
return MMDEPLOY_SUCCESS;
} catch (const std::exception& e) {
MMDEPLOY_ERROR("failed to create model: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MMDEPLOY_E_FAIL;
}
void mmdeploy_model_destroy(mmdeploy_model_t model) { delete reinterpret_cast<Model*>(model); }
// Copyright (c) OpenMMLab. All rights reserved.
/**
* @file model.h
* @brief Interface to MMDeploy SDK Model
*/
#ifndef MMDEPLOY_SRC_APIS_C_MODEL_H_
#define MMDEPLOY_SRC_APIS_C_MODEL_H_
#include "mmdeploy/common.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct mmdeploy_model* mmdeploy_model_t;
/**
* @brief Create SDK Model instance from given model path
* @param[in] path model path
* @param[out] model sdk model instance that must be destroyed by \ref mmdeploy_model_destroy
* @return status code of the operation
*/
MMDEPLOY_API int mmdeploy_model_create_by_path(const char* path, mmdeploy_model_t* model);
/**
* @brief Create SDK Model instance from memory
* @param[in] buffer a linear buffer contains the model information
* @param[in] size size of \p buffer in bytes
* @param[out] model sdk model instance that must be destroyed by \ref mmdeploy_model_destroy
* @return status code of the operation
*/
MMDEPLOY_API int mmdeploy_model_create(const void* buffer, int size, mmdeploy_model_t* model);
/**
* @brief Destroy model instance
* @param[in] model sdk model instance created by \ref mmdeploy_model_create_by_path or \ref
* mmdeploy_model_create
*/
MMDEPLOY_API void mmdeploy_model_destroy(mmdeploy_model_t model);
#ifdef __cplusplus
}
#endif
#endif // MMDEPLOY_SRC_APIS_C_MODEL_H_
// Copyright (c) OpenMMLab. All rights reserved.
#include "mmdeploy/pipeline.h"
#include "mmdeploy/common_internal.h"
#include "mmdeploy/executor_internal.h"
#include "mmdeploy/handle.h"
int mmdeploy_pipeline_create_v3(mmdeploy_value_t config, mmdeploy_context_t context,
mmdeploy_pipeline_t* pipeline) {
try {
auto _config = *Cast(config);
if (context) {
if (!_config.contains("context")) {
_config["context"] = Value::Object();
}
update(_config["context"].object(), Cast(context)->object(), 2);
}
auto _handle = std::make_unique<AsyncHandle>(std::move(_config));
*pipeline = Cast(_handle.release());
return MMDEPLOY_SUCCESS;
} catch (const std::exception& e) {
MMDEPLOY_ERROR("exception caught: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MMDEPLOY_E_FAIL;
}
int mmdeploy_pipeline_create_from_model(mmdeploy_model_t model, mmdeploy_context_t context,
mmdeploy_pipeline_t* pipeline) {
auto config = Cast(model)->ReadConfig("pipeline.json");
auto _context = *Cast(context);
_context["model"] = *Cast(model);
return mmdeploy_pipeline_create_v3(Cast(&config.value()), (mmdeploy_context_t)&_context,
pipeline);
}
int mmdeploy_pipeline_apply_async(mmdeploy_pipeline_t pipeline, mmdeploy_sender_t input,
mmdeploy_sender_t* output) {
if (!pipeline || !input || !output) {
return MMDEPLOY_E_INVALID_ARG;
}
try {
auto h = Cast(pipeline);
*output = Take(h->Process(Take(input)));
return MMDEPLOY_SUCCESS;
} catch (const std::exception& e) {
MMDEPLOY_ERROR("exception caught: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MMDEPLOY_E_FAIL;
}
void mmdeploy_pipeline_destroy(mmdeploy_pipeline_t pipeline) {
if (pipeline != nullptr) {
delete Cast(pipeline);
}
}
int mmdeploy_pipeline_apply(mmdeploy_pipeline_t pipeline, mmdeploy_value_t input,
mmdeploy_value_t* output) {
auto input_sender = mmdeploy_executor_just(input);
if (!input_sender) {
return MMDEPLOY_E_FAIL;
}
mmdeploy_sender_t output_sender{};
if (auto ec = mmdeploy_pipeline_apply_async(pipeline, input_sender, &output_sender)) {
return ec;
}
auto _output = mmdeploy_executor_sync_wait(output_sender);
if (!_output) {
return MMDEPLOY_E_FAIL;
}
*output = _output;
return MMDEPLOY_SUCCESS;
}
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_CSRC_APIS_C_PIPELINE_H_
#define MMDEPLOY_CSRC_APIS_C_PIPELINE_H_
#include "mmdeploy/common.h"
#include "mmdeploy/executor.h"
#include "mmdeploy/model.h"
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* Experimental pipeline APIs */
typedef struct mmdeploy_pipeline* mmdeploy_pipeline_t;
/**
* Create pipeline
* @param config
* @param context
* @param pipeline
* @return
*/
MMDEPLOY_API int mmdeploy_pipeline_create_v3(mmdeploy_value_t config, mmdeploy_context_t context,
mmdeploy_pipeline_t* pipeline);
/**
* Create pipeline from internal pipeline config of the model
* @param model
* @param context
* @param pipeline
* @return
*/
MMDEPLOY_API int mmdeploy_pipeline_create_from_model(mmdeploy_model_t model,
mmdeploy_context_t context,
mmdeploy_pipeline_t* pipeline);
/**
* @brief Apply pipeline
* @param[in] pipeline handle of the pipeline
* @param[in] input input value
* @param[out] output output value
* @return status of the operation
*/
MMDEPLOY_API int mmdeploy_pipeline_apply(mmdeploy_pipeline_t pipeline, mmdeploy_value_t input,
mmdeploy_value_t* output);
/**
* Apply pipeline asynchronously
* @param pipeline handle of the pipeline
* @param input input sender that will be consumed by the operation
* @param output output sender
* @return status of the operation
*/
MMDEPLOY_API int mmdeploy_pipeline_apply_async(mmdeploy_pipeline_t pipeline,
mmdeploy_sender_t input, mmdeploy_sender_t* output);
/**
* @brief destroy pipeline
* @param[in] pipeline
*/
MMDEPLOY_API void mmdeploy_pipeline_destroy(mmdeploy_pipeline_t pipeline);
#ifdef __cplusplus
}
#endif
#endif // MMDEPLOY_CSRC_APIS_C_PIPELINE_H_
// Copyright (c) OpenMMLab. All rights reserved.
#include "mmdeploy/pose_detector.h"
#include <numeric>
#include "mmdeploy/codebase/mmpose/mmpose.h"
#include "mmdeploy/common_internal.h"
#include "mmdeploy/core/device.h"
#include "mmdeploy/core/graph.h"
#include "mmdeploy/core/mat.h"
#include "mmdeploy/core/utils/formatter.h"
#include "mmdeploy/handle.h"
#include "mmdeploy/pipeline.h"
using namespace std;
using namespace mmdeploy;
int mmdeploy_pose_detector_create(mmdeploy_model_t model, const char* device_name, int device_id,
mmdeploy_pose_detector_t* detector) {
mmdeploy_context_t context{};
auto ec = mmdeploy_context_create_by_device(device_name, device_id, &context);
if (ec != MMDEPLOY_SUCCESS) {
return ec;
}
ec = mmdeploy_pose_detector_create_v2(model, context, detector);
mmdeploy_context_destroy(context);
return ec;
}
int mmdeploy_pose_detector_create_by_path(const char* model_path, const char* device_name,
int device_id, mmdeploy_pose_detector_t* detector) {
mmdeploy_model_t model{};
if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) {
return ec;
}
auto ec = mmdeploy_pose_detector_create(model, device_name, device_id, detector);
mmdeploy_model_destroy(model);
return ec;
}
int mmdeploy_pose_detector_apply(mmdeploy_pose_detector_t detector, const mmdeploy_mat_t* mats,
int mat_count, mmdeploy_pose_detection_t** results) {
return mmdeploy_pose_detector_apply_bbox(detector, mats, mat_count, nullptr, nullptr, results);
}
int mmdeploy_pose_detector_apply_bbox(mmdeploy_pose_detector_t detector, const mmdeploy_mat_t* mats,
int mat_count, const mmdeploy_rect_t* bboxes,
const int* bbox_count, mmdeploy_pose_detection_t** results) {
wrapped<mmdeploy_value_t> input;
if (auto ec =
mmdeploy_pose_detector_create_input(mats, mat_count, bboxes, bbox_count, input.ptr())) {
return ec;
}
wrapped<mmdeploy_value_t> output;
if (auto ec = mmdeploy_pose_detector_apply_v2(detector, input, output.ptr())) {
return ec;
}
if (auto ec = mmdeploy_pose_detector_get_result(output, results)) {
return ec;
}
return MMDEPLOY_SUCCESS;
}
void mmdeploy_pose_detector_release_result(mmdeploy_pose_detection_t* results, int count) {
if (results == nullptr) {
return;
}
for (int i = 0; i < count; ++i) {
delete[] results[i].point;
delete[] results[i].score;
}
delete[] results;
}
void mmdeploy_pose_detector_destroy(mmdeploy_pose_detector_t detector) {
mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)detector);
}
int mmdeploy_pose_detector_create_v2(mmdeploy_model_t model, mmdeploy_context_t context,
mmdeploy_pose_detector_t* detector) {
return mmdeploy_pipeline_create_from_model(model, context, (mmdeploy_pipeline_t*)detector);
}
int mmdeploy_pose_detector_create_input(const mmdeploy_mat_t* mats, int mat_count,
const mmdeploy_rect_t* bboxes, const int* bbox_count,
mmdeploy_value_t* value) {
if (mat_count && mats == nullptr) {
return MMDEPLOY_E_INVALID_ARG;
}
try {
Value::Array input_images;
auto add_bbox = [&](const Mat& img, const mmdeploy_rect_t* bbox) {
Value::Array b;
if (bbox) {
float width = bbox->right - bbox->left + 1;
float height = bbox->bottom - bbox->top + 1;
b = {bbox->left, bbox->top, width, height, 1.0};
} else {
b = {0, 0, img.width(), img.height(), 1.0};
}
input_images.push_back({{"ori_img", img}, {"bbox", std::move(b)}});
};
for (int i = 0; i < mat_count; ++i) {
auto _mat = Cast(mats[i]);
if (bboxes && bbox_count) {
for (int j = 0; j < bbox_count[i]; ++j) {
add_bbox(_mat, bboxes++);
}
} else { // inference whole image
add_bbox(_mat, nullptr);
}
}
*value = Take(Value{std::move(input_images)});
return MMDEPLOY_SUCCESS;
} catch (const std::exception& e) {
MMDEPLOY_ERROR("unhandled exception: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MMDEPLOY_E_FAIL;
}
int mmdeploy_pose_detector_apply_v2(mmdeploy_pose_detector_t detector, mmdeploy_value_t input,
mmdeploy_value_t* output) {
return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)detector, input, output);
}
int mmdeploy_pose_detector_apply_async(mmdeploy_pose_detector_t detector, mmdeploy_sender_t input,
mmdeploy_sender_t* output) {
return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)detector, input, output);
}
int mmdeploy_pose_detector_get_result(mmdeploy_value_t output,
mmdeploy_pose_detection_t** results) {
if (!output || !results) {
return MMDEPLOY_E_INVALID_ARG;
}
try {
std::vector<mmpose::PoseDetectorOutput> detections;
from_value(Cast(output)->front(), detections);
size_t count = detections.size();
auto deleter = [&](mmdeploy_pose_detection_t* p) {
mmdeploy_pose_detector_release_result(p, static_cast<int>(count));
};
std::unique_ptr<mmdeploy_pose_detection_t[], decltype(deleter)> _results(
new mmdeploy_pose_detection_t[count]{}, deleter);
size_t result_idx = 0;
for (const auto& bbox_result : detections) {
auto& res = _results[result_idx++];
auto size = bbox_result.key_points.size();
res.point = new mmdeploy_point_t[size];
res.score = new float[size];
res.length = static_cast<int>(size);
for (int k = 0; k < size; k++) {
res.point[k].x = bbox_result.key_points[k].bbox[0];
res.point[k].y = bbox_result.key_points[k].bbox[1];
res.score[k] = bbox_result.key_points[k].score;
}
}
*results = _results.release();
return MMDEPLOY_SUCCESS;
} catch (const std::exception& e) {
MMDEPLOY_ERROR("unhandled exception: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MMDEPLOY_E_FAIL;
}
// Copyright (c) OpenMMLab. All rights reserved.
/**
* @file pose_detector.h
* @brief Interface to MMPose task
*/
#ifndef MMDEPLOY_SRC_APIS_C_POSE_DETECTOR_H_
#define MMDEPLOY_SRC_APIS_C_POSE_DETECTOR_H_
#include "mmdeploy/common.h"
#include "mmdeploy/executor.h"
#include "mmdeploy/model.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct mmdeploy_pose_detection_t {
mmdeploy_point_t* point; ///< keypoint
float* score; ///< keypoint score
int length; ///< number of keypoint
} mmdeploy_pose_detection_t;
typedef struct mmdeploy_pose_detector* mmdeploy_pose_detector_t;
/**
* @brief Create a pose detector instance
* @param[in] model an instance of mmpose model created by
* \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
* @param[in] device_id id of device.
* @param[out] detector handle of the created pose detector, which must be destroyed
* by \ref mmdeploy_pose_detector_destroy
* @return status code of the operation
*/
MMDEPLOY_API int mmdeploy_pose_detector_create(mmdeploy_model_t model, const char* device_name,
int device_id, mmdeploy_pose_detector_t* detector);
/**
* @brief Create a pose detector instance
* @param[in] model_path path to pose detection model
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
* @param[in] device_id id of device.
* @param[out] detector handle of the created pose detector, which must be destroyed
* by \ref mmdeploy_pose_detector_destroy
* @return status code of the operation
*/
MMDEPLOY_API int mmdeploy_pose_detector_create_by_path(const char* model_path,
const char* device_name, int device_id,
mmdeploy_pose_detector_t* detector);
/**
* @brief Apply pose detector to a batch of images with full image roi
* @param[in] detector pose detector's handle created by \ref
* mmdeploy_pose_detector_create_by_path
* @param[in] images a batch of images
* @param[in] count number of images in the batch
* @param[out] results a linear buffer contains the pose result, must be release
* by \ref mmdeploy_pose_detector_release_result
* @return status code of the operation
*/
MMDEPLOY_API int mmdeploy_pose_detector_apply(mmdeploy_pose_detector_t detector,
const mmdeploy_mat_t* mats, int mat_count,
mmdeploy_pose_detection_t** results);
/**
* @brief Apply pose detector to a batch of images supplied with bboxes(roi)
* @param[in] detector pose detector's handle created by \ref
* mmdeploy_pose_detector_create_by_path
* @param[in] images a batch of images
* @param[in] image_count number of images in the batch
* @param[in] bboxes bounding boxes(roi) detected by mmdet
* @param[in] bbox_count number of bboxes of each \p images, must be same length as \p images
* @param[out] results a linear buffer contains the pose result, which has the same length as \p
* bboxes, must be release by \ref mmdeploy_pose_detector_release_result
* @return status code of the operation
*/
MMDEPLOY_API int mmdeploy_pose_detector_apply_bbox(mmdeploy_pose_detector_t detector,
const mmdeploy_mat_t* mats, int mat_count,
const mmdeploy_rect_t* bboxes,
const int* bbox_count,
mmdeploy_pose_detection_t** results);
/** @brief Release result buffer returned by \ref mmdeploy_pose_detector_apply or \ref
* mmdeploy_pose_detector_apply_bbox
* @param[in] results result buffer by pose detector
* @param[in] count length of \p result
*/
MMDEPLOY_API void mmdeploy_pose_detector_release_result(mmdeploy_pose_detection_t* results,
int count);
/**
* @brief destroy pose_detector
* @param[in] detector handle of pose_detector created by \ref
* mmdeploy_pose_detector_create_by_path or \ref mmdeploy_pose_detector_create
*/
MMDEPLOY_API void mmdeploy_pose_detector_destroy(mmdeploy_pose_detector_t detector);
/******************************************************************************
* Experimental asynchronous APIs */
MMDEPLOY_API int mmdeploy_pose_detector_create_v2(mmdeploy_model_t model,
mmdeploy_context_t context,
mmdeploy_pose_detector_t* detector);
MMDEPLOY_API int mmdeploy_pose_detector_create_input(const mmdeploy_mat_t* mats, int mat_count,
const mmdeploy_rect_t* bboxes,
const int* bbox_count,
mmdeploy_value_t* value);
MMDEPLOY_API int mmdeploy_pose_detector_apply_v2(mmdeploy_pose_detector_t detector,
mmdeploy_value_t input, mmdeploy_value_t* output);
MMDEPLOY_API int mmdeploy_pose_detector_apply_async(mmdeploy_pose_detector_t detector,
mmdeploy_sender_t input,
mmdeploy_sender_t* output);
MMDEPLOY_API int mmdeploy_pose_detector_get_result(mmdeploy_value_t output,
mmdeploy_pose_detection_t** results);
#ifdef __cplusplus
}
#endif
#endif // MMDEPLOY_SRC_APIS_C_POSE_DETECTOR_H_
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment