Commit 546b4279 authored by limm's avatar limm
Browse files

add csrc and mmdeploy module

parent 502f4fb9
Pipeline #2810 canceled with stages
// Copyright (c) OpenMMLab. All rights reserved.
#include "mmdeploy/core/graph.h"
#include "mmdeploy/archive/value_archive.h"
#include "mmdeploy/core/registry.h"
#include "mmdeploy/graph/common.h"
#include "mmdeploy/graph/flattened.h"
namespace mmdeploy::graph {
namespace {
struct Expr {
string lhs;
string rhs;
char operation{0};
};
// parse expressions like "x", "x=y", "x=*y" or "x=+y"
Expr ParseExpr(const string& str) {
Expr expr;
bool split{};
for (const auto& c : str) {
switch (c) {
case '=':
split = true;
break;
case '*':
case '+':
expr.operation = c;
break;
default:
(split ? &expr.rhs : &expr.lhs)->push_back(c);
}
}
if (!split) {
expr.rhs = expr.lhs;
}
return std::move(expr);
}
} // namespace
Result<void> Builder::SetInputs() {
OUTCOME_TRY(auto inputs, ParseStringArray(config_["input"]));
vector<string> inputs_internal;
for (const auto& input : inputs) {
auto expr = ParseExpr(input);
inputs_.push_back(expr.rhs);
inputs_internal.push_back(expr.lhs);
flatten_.push_back(expr.operation == '*');
broadcast_.push_back(expr.operation == '+');
}
config_["input"] = to_value(inputs_internal);
return success();
}
Result<void> Builder::SetOutputs() {
OUTCOME_TRY(auto outputs, ParseStringArray(config_["output"]));
vector<string> outputs_internal;
for (const auto& output : outputs) {
auto expr = ParseExpr(output);
outputs_.push_back(expr.lhs);
outputs_internal.push_back(expr.rhs);
unflatten_.push_back(expr.operation == '*');
}
config_["output"] = to_value(outputs_internal);
return success();
}
Builder::Builder(Value config) : config_(std::move(config)) {
name_ = config_.value<std::string>("name", "");
}
Result<unique_ptr<Node>> Builder::Build() {
OUTCOME_TRY(SetInputs());
OUTCOME_TRY(SetOutputs());
OUTCOME_TRY(auto node, BuildImpl());
// use Throttle to constraint resource usage
if (auto throttle = config_.value("throttle", 0)) {
MMDEPLOY_ERROR("Throttle is not implemented yet");
return Status(eNotSupported);
}
// create a FlattenedScope to flatten inputs and unflatten outputs
if (std::count(std::begin(flatten_), std::end(flatten_), true)) {
node = std::make_unique<Flattened>(std::move(node), flatten_, broadcast_, unflatten_);
}
return std::move(node);
}
Result<unique_ptr<Builder>> Builder::CreateFromConfig(const Value& config) {
// MMDEPLOY_WARN("config: {}", config);
auto type = config.value<string>("type", "");
auto cfg = config;
// backward compatibility
if (type.empty()) {
if (config.contains("pipeline")) {
type = "Pipeline";
cfg = config["pipeline"];
if (config.contains("context")) {
cfg["context"] = config["context"];
}
}
}
auto creator = gRegistry<Builder>().Get(type);
if (!creator) {
MMDEPLOY_ERROR("failed to find node creator: {}", type);
return Status(eEntryNotFound);
}
auto builder = creator->Create(cfg);
if (!builder) {
MMDEPLOY_ERROR("failed to create node builder: {}", type);
return Status(eFail);
}
return std::move(builder);
}
Result<std::vector<std::string>> ParseStringArray(const Value& value) {
if (value.is_string()) {
return std::vector{value.get<std::string>()};
} else if (value.is_array()) {
return from_value<std::vector<std::string>>(value);
}
return Status(eInvalidArgument);
}
MMDEPLOY_DEFINE_REGISTRY(Builder);
} // namespace mmdeploy::graph
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_EXPERIMENTAL_PIPELINE_IR_H_
#define MMDEPLOY_SRC_EXPERIMENTAL_PIPELINE_IR_H_
#include "mmdeploy/core/model.h"
#include "mmdeploy/core/module.h"
#include "mmdeploy/core/mpl/span.h"
#include "mmdeploy/core/registry.h"
#include "mmdeploy/core/status_code.h"
#include "mmdeploy/core/utils/formatter.h"
#include "mmdeploy/execution/schedulers/registry.h"
namespace mmdeploy::graph {
using std::pair;
using std::string;
using std::unique_ptr;
using std::vector;
template <class... Ts>
using Sender = TypeErasedSender<Ts...>;
class MMDEPLOY_API Node {
public:
virtual ~Node() = default;
virtual Sender<Value> Process(Sender<Value> input) = 0;
struct process_t {
Sender<Value> operator()(Sender<Value> sender, Node* node) const {
return node->Process(std::move(sender));
}
};
__closure::_BinderBack<process_t, Node*> Process() { return {{}, {}, {this}}; }
};
class MMDEPLOY_API Builder {
public:
virtual ~Builder() = default;
const vector<string>& inputs() const noexcept { return inputs_; }
const vector<string>& outputs() const noexcept { return outputs_; }
const string& name() const noexcept { return name_; }
Result<unique_ptr<Node>> Build();
static Result<unique_ptr<Builder>> CreateFromConfig(const Value& config);
protected:
explicit Builder(Value config);
Result<void> SetInputs();
Result<void> SetOutputs();
virtual Result<unique_ptr<Node>> BuildImpl() = 0;
protected:
Value config_;
string name_;
vector<string> inputs_;
vector<string> outputs_;
vector<bool> flatten_;
vector<bool> broadcast_;
vector<bool> unflatten_;
};
MMDEPLOY_API Result<std::vector<std::string>> ParseStringArray(const Value& value);
MMDEPLOY_DECLARE_REGISTRY(Builder, std::unique_ptr<Builder>(const Value& config));
} // namespace mmdeploy::graph
#endif // MMDEPLOY_SRC_EXPERIMENTAL_PIPELINE_IR_H_
// Copyright (c) OpenMMLab. All rights reserved.
#include "logger.h"
#include <cstdlib>
#if SPDLOG_VER_MAJOR >= 1
#if defined(__ANDROID__)
#include <spdlog/sinks/android_sink.h>
#else
#include <spdlog/sinks/stdout_color_sinks.h>
#if defined(_MSC_VER)
#include <spdlog/sinks/stdout_sinks.h>
#endif
#endif
#endif
#if SPDLOG_VER_MAJOR >= 1 && SPDLOG_VER_MINOR >= 6
#define MMDEPLOY_SPDLOG_HAS_LOAD_ENV_LEVELS 1
#include <spdlog/cfg/env.h>
#endif
namespace mmdeploy {
static void LoadEnvLevels() {
auto p = std::getenv("SPDLOG_LEVEL");
if (p) {
const std::string str(p);
if (str == "trace") {
spdlog::set_level(spdlog::level::trace);
} else if (str == "debug") {
spdlog::set_level(spdlog::level::debug);
} else if (str == "info") {
spdlog::set_level(spdlog::level::info);
} else if (str == "warn") {
spdlog::set_level(spdlog::level::warn);
} else if (str == "err") {
spdlog::set_level(spdlog::level::err);
} else if (str == "critical") {
spdlog::set_level(spdlog::level::critical);
} else if (str == "off") {
spdlog::set_level(spdlog::level::off);
}
}
}
std::shared_ptr<spdlog::logger> CreateDefaultLogger() {
#if MMDEPLOY_SPDLOG_HAS_LOAD_ENV_LEVELS
spdlog::cfg::load_env_levels();
#else
LoadEnvLevels();
#endif
constexpr const auto logger_name = "mmdeploy";
#if defined(__ANDROID__)
return spdlog::android_logger_mt(logger_name);
#elif defined(_MSC_VER)
return spdlog::stdout_logger_mt(logger_name);
#else
return spdlog::stdout_color_mt(logger_name);
#endif
}
std::shared_ptr<spdlog::logger> &gLogger() {
// ! leaky singleton
static auto ptr = new std::shared_ptr<spdlog::logger>{CreateDefaultLogger()};
return *ptr;
}
spdlog::logger *GetLogger() { return gLogger().get(); }
void SetLogger(spdlog::logger *logger) {
gLogger() = std::shared_ptr<spdlog::logger>(logger, [](auto) {});
}
} // namespace mmdeploy
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef CORE_LOG_H
#define CORE_LOG_H
#include <spdlog/spdlog.h>
#include "mmdeploy/core/macro.h"
namespace mmdeploy {
MMDEPLOY_API spdlog::logger *GetLogger();
MMDEPLOY_API void SetLogger(spdlog::logger *logger);
} // namespace mmdeploy
// Honor spdlog settings if supported
#if defined(SPDLOG_ACTIVE_LEVEL) && defined(SPDLOG_LEVEL_OFF)
#define MMDEPLOY_LEVEL_TRACE SPDLOG_LEVEL_TRACE
#define MMDEPLOY_LEVEL_DEBUG SPDLOG_LEVEL_DEBUG
#define MMDEPLOY_LEVEL_INFO SPDLOG_LEVEL_INFO
#define MMDEPLOY_LEVEL_WARN SPDLOG_LEVEL_WARN
#define MMDEPLOY_LEVEL_ERROR SPDLOG_LEVEL_ERROR
#define MMDEPLOY_LEVEL_CRITICAL SPDLOG_LEVEL_CRITICAL
#define MMDEPLOY_LEVEL_OFF SPDLOG_LEVEL_OFF
#if !defined(MMDEPLOY_ACTIVE_LEVEL)
#define MMDEPLOY_ACTIVE_LEVEL SPDLOG_ACTIVE_LEVEL
#endif
#else
#define MMDEPLOY_LEVEL_TRACE 0
#define MMDEPLOY_LEVEL_DEBUG 1
#define MMDEPLOY_LEVEL_INFO 2
#define MMDEPLOY_LEVEL_WARN 3
#define MMDEPLOY_LEVEL_ERROR 4
#define MMDEPLOY_LEVEL_CRITICAL 5
#define MMDEPLOY_LEVEL_OFF 6
#if !defined(MMDEPLOY_ACTIVE_LEVEL)
#define MMDEPLOY_ACTIVE_LEVEL MMDEPLOY_LEVEL_INFO
#endif
#endif
#ifdef SPDLOG_LOGGER_CALL
#define MMDEPLOY_LOG(level, ...) SPDLOG_LOGGER_CALL(mmdeploy::GetLogger(), level, __VA_ARGS__)
#else
#define MMDEPLOY_LOG(level, ...) mmdeploy::GetLogger()->log(level, __VA_ARGS__)
#endif
#if MMDEPLOY_ACTIVE_LEVEL <= MMDEPLOY_LEVEL_TRACE
#define MMDEPLOY_TRACE(...) MMDEPLOY_LOG(spdlog::level::trace, __VA_ARGS__)
#else
#define MMDEPLOY_TRACE(...) (void)0;
#endif
#if MMDEPLOY_ACTIVE_LEVEL <= MMDEPLOY_LEVEL_DEBUG
#define MMDEPLOY_DEBUG(...) MMDEPLOY_LOG(spdlog::level::debug, __VA_ARGS__)
#else
#define MMDEPLOY_DEBUG(...) (void)0;
#endif
#if MMDEPLOY_ACTIVE_LEVEL <= MMDEPLOY_LEVEL_INFO
#define MMDEPLOY_INFO(...) MMDEPLOY_LOG(spdlog::level::info, __VA_ARGS__)
#else
#define MMDEPLOY_INFO(...) (void)0;
#endif
#if MMDEPLOY_ACTIVE_LEVEL <= MMDEPLOY_LEVEL_WARN
#define MMDEPLOY_WARN(...) MMDEPLOY_LOG(spdlog::level::warn, __VA_ARGS__)
#else
#define MMDEPLOY_WARN(...) (void)0;
#endif
#if MMDEPLOY_ACTIVE_LEVEL <= MMDEPLOY_LEVEL_ERROR
#define MMDEPLOY_ERROR(...) MMDEPLOY_LOG(spdlog::level::err, __VA_ARGS__)
#else
#define MMDEPLOY_ERROR(...) (void)0;
#endif
#if MMDEPLOY_ACTIVE_LEVEL <= MMDEPLOY_LEVEL_CRITICAL
#define MMDEPLOY_CRITICAL(...) MMDEPLOY_LOG(spdlog::level::critical, __VA_ARGS__)
#else
#define MMDEPLOY_CRITICAL(...) (void)0;
#endif
#endif // !CORE_LOG_H
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_CORE_MARCO_H_
#define MMDEPLOY_SRC_CORE_MARCO_H_
#ifndef MMDEPLOY_EXPORT
#ifdef _MSC_VER
#define MMDEPLOY_EXPORT __declspec(dllexport)
#else
#define MMDEPLOY_EXPORT __attribute__((visibility("default")))
#endif
#endif
#ifndef MMDEPLOY_API
#ifdef MMDEPLOY_API_EXPORTS
#define MMDEPLOY_API MMDEPLOY_EXPORT
#else
#define MMDEPLOY_API
#endif
#endif
#define _MMDEPLOY_PP_CONCAT_IMPL(s1, s2) s1##s2
#define MMDEPLOY_PP_CONCAT(s1, s2) _MMDEPLOY_PP_CONCAT_IMPL(s1, s2)
#define MMDEPLOY_PP_EXPAND(...) __VA_ARGS__
// ! Be aware of ODR violation when using __COUNTER__
#ifdef __COUNTER__
#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_PP_CONCAT(str, __COUNTER__)
#else
#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_PP_CONCAT(str, __LINE__)
#endif
#define MMDEPLOY_PP_NARG(...) _MMDEPLOY_PP_NARG(__VA_ARGS__, _MMDEPLOY_PP_RESQ_N())
#define _MMDEPLOY_PP_NARG(...) _MMDEPLOY_PP_ARG_N(__VA_ARGS__)
#define _MMDEPLOY_PP_ARG_N(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \
_17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, \
_31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, \
_45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, \
_59, _60, _61, _62, _63, N, ...) \
N
#define _MMDEPLOY_PP_RESQ_N() \
63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \
39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, \
16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
#define MMDEPLOY_PP_MAP_1(f, x) f(x)
#define MMDEPLOY_PP_MAP_2(f, x, ...) f(x), MMDEPLOY_PP_MAP_1(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_3(f, x, ...) f(x), MMDEPLOY_PP_MAP_2(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_4(f, x, ...) f(x), MMDEPLOY_PP_MAP_3(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_5(f, x, ...) f(x), MMDEPLOY_PP_MAP_4(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_6(f, x, ...) f(x), MMDEPLOY_PP_MAP_5(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_7(f, x, ...) f(x), MMDEPLOY_PP_MAP_6(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_8(f, x, ...) f(x), MMDEPLOY_PP_MAP_7(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_9(f, x, ...) f(x), MMDEPLOY_PP_MAP_8(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_10(f, x, ...) f(x), MMDEPLOY_PP_MAP_9(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_11(f, x, ...) f(x), MMDEPLOY_PP_MAP_10(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_12(f, x, ...) f(x), MMDEPLOY_PP_MAP_11(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_13(f, x, ...) f(x), MMDEPLOY_PP_MAP_12(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_14(f, x, ...) f(x), MMDEPLOY_PP_MAP_13(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_15(f, x, ...) f(x), MMDEPLOY_PP_MAP_14(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_16(f, x, ...) f(x), MMDEPLOY_PP_MAP_15(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_17(f, x, ...) f(x), MMDEPLOY_PP_MAP_16(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_18(f, x, ...) f(x), MMDEPLOY_PP_MAP_17(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_19(f, x, ...) f(x), MMDEPLOY_PP_MAP_18(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_20(f, x, ...) f(x), MMDEPLOY_PP_MAP_19(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_21(f, x, ...) f(x), MMDEPLOY_PP_MAP_20(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_22(f, x, ...) f(x), MMDEPLOY_PP_MAP_21(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_23(f, x, ...) f(x), MMDEPLOY_PP_MAP_22(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_24(f, x, ...) f(x), MMDEPLOY_PP_MAP_23(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_25(f, x, ...) f(x), MMDEPLOY_PP_MAP_24(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_26(f, x, ...) f(x), MMDEPLOY_PP_MAP_25(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_27(f, x, ...) f(x), MMDEPLOY_PP_MAP_26(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_28(f, x, ...) f(x), MMDEPLOY_PP_MAP_27(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_29(f, x, ...) f(x), MMDEPLOY_PP_MAP_28(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_30(f, x, ...) f(x), MMDEPLOY_PP_MAP_29(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_31(f, x, ...) f(x), MMDEPLOY_PP_MAP_30(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_32(f, x, ...) f(x), MMDEPLOY_PP_MAP_31(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_33(f, x, ...) f(x), MMDEPLOY_PP_MAP_32(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_34(f, x, ...) f(x), MMDEPLOY_PP_MAP_33(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_35(f, x, ...) f(x), MMDEPLOY_PP_MAP_34(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_36(f, x, ...) f(x), MMDEPLOY_PP_MAP_35(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_37(f, x, ...) f(x), MMDEPLOY_PP_MAP_36(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_38(f, x, ...) f(x), MMDEPLOY_PP_MAP_37(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_39(f, x, ...) f(x), MMDEPLOY_PP_MAP_38(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_40(f, x, ...) f(x), MMDEPLOY_PP_MAP_39(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_41(f, x, ...) f(x), MMDEPLOY_PP_MAP_40(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_42(f, x, ...) f(x), MMDEPLOY_PP_MAP_41(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_43(f, x, ...) f(x), MMDEPLOY_PP_MAP_42(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_44(f, x, ...) f(x), MMDEPLOY_PP_MAP_43(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_45(f, x, ...) f(x), MMDEPLOY_PP_MAP_44(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_46(f, x, ...) f(x), MMDEPLOY_PP_MAP_45(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_47(f, x, ...) f(x), MMDEPLOY_PP_MAP_46(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_48(f, x, ...) f(x), MMDEPLOY_PP_MAP_47(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_49(f, x, ...) f(x), MMDEPLOY_PP_MAP_48(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_50(f, x, ...) f(x), MMDEPLOY_PP_MAP_49(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_51(f, x, ...) f(x), MMDEPLOY_PP_MAP_50(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_52(f, x, ...) f(x), MMDEPLOY_PP_MAP_51(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_53(f, x, ...) f(x), MMDEPLOY_PP_MAP_52(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_54(f, x, ...) f(x), MMDEPLOY_PP_MAP_53(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_55(f, x, ...) f(x), MMDEPLOY_PP_MAP_54(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_56(f, x, ...) f(x), MMDEPLOY_PP_MAP_55(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_57(f, x, ...) f(x), MMDEPLOY_PP_MAP_56(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_58(f, x, ...) f(x), MMDEPLOY_PP_MAP_57(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_59(f, x, ...) f(x), MMDEPLOY_PP_MAP_58(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_60(f, x, ...) f(x), MMDEPLOY_PP_MAP_59(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_61(f, x, ...) f(x), MMDEPLOY_PP_MAP_60(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_62(f, x, ...) f(x), MMDEPLOY_PP_MAP_61(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_63(f, x, ...) f(x), MMDEPLOY_PP_MAP_62(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP_64(f, x, ...) f(x), MMDEPLOY_PP_MAP_63(f, __VA_ARGS__)
#define MMDEPLOY_PP_MAP(f, ...) \
_MMDEPLOY_PP_MAP_IMPL1(f, MMDEPLOY_PP_NARG(__VA_ARGS__), __VA_ARGS__)
#define _MMDEPLOY_PP_MAP_IMPL1(f, n, ...) \
_MMDEPLOY_PP_MAP_IMPL2(f, MMDEPLOY_PP_CONCAT(MMDEPLOY_PP_MAP_, n), __VA_ARGS__)
#define _MMDEPLOY_PP_MAP_IMPL2(f, M_, ...) M_(f, __VA_ARGS__)
#endif // MMDEPLOY_SRC_CORE_MARCO_H_
// Copyright (c) OpenMMLab. All rights reserved.
#include "mat.h"
namespace mmdeploy::framework {
Mat::Mat(int h, int w, PixelFormat format, DataType type, Device device, Allocator allocator)
: format_(format), type_(type), width_(w), height_(h) {
int bits_per_pixel = 0;
switch (format) {
case PixelFormat::kGRAYSCALE:
channel_ = 1;
bits_per_pixel = 8;
break;
case PixelFormat::kNV12: // fall through
case PixelFormat::kNV21:
channel_ = 1;
bits_per_pixel = 12;
assert(w % 2 == 0);
break;
case PixelFormat::kBGR: // fall through
case PixelFormat::kRGB:
channel_ = 3;
bits_per_pixel = 24;
break;
case PixelFormat::kBGRA:
channel_ = 4;
bits_per_pixel = 32;
break;
default:
throw_exception(eNotSupported);
}
size_ = height_ * width_ * channel_;
bytes_ = height_ * width_ * bits_per_pixel / 8;
switch (type) {
case DataType::kFLOAT:
bytes_ *= sizeof(float);
break;
case DataType::kHALF:
bytes_ *= 2;
break;
case DataType::kINT32:
bytes_ *= sizeof(int32_t);
break;
case DataType::kINT8:
break;
default:
throw_exception(eNotSupported);
break;
}
if (device.platform_id() >= 0 && bytes_ > 0) {
buf_ = Buffer(device, bytes_, std::move(allocator));
}
}
Mat::Mat(int h, int w, PixelFormat format, DataType type, std::shared_ptr<void> data, Device device)
: Mat(h, w, format, type, device) {
buf_ = Buffer(device, bytes_, std::move(data));
}
Mat::Mat(int h, int w, PixelFormat format, DataType type, void* data, Device device)
: Mat(h, w, format, type, device) {
buf_ = Buffer(device, bytes_, data);
}
Device Mat::device() const { return buf_.GetDevice(); }
Buffer& Mat::buffer() { return buf_; }
const Buffer& Mat::buffer() const { return buf_; }
} // namespace mmdeploy::framework
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef CORE_MAT_H
#define CORE_MAT_H
#include <memory>
#include <vector>
#include "mmdeploy/core/device.h"
#include "mmdeploy/core/mpl/type_traits.h"
#include "mmdeploy/core/types.h"
namespace mmdeploy {
namespace framework {
class MMDEPLOY_API Mat final {
public:
Mat() = default;
/**
* @brief construct a Mat for an image
* @param h height of an image
* @param w width of an image
* @param format pixel format of an image, rgb, bgr, gray etc. Note that in
* case of nv12 or nv21, height is the real height of an image,
* not height * 3 / 2
* @param type data type of an pixel in each channel
* @param device location Mat's buffer stores
*/
Mat(int h, int w, PixelFormat format, DataType type, Device device = Device{0},
Allocator allocator = {});
/**@brief construct a Mat for an image using custom data
* @example
* ``` c++
* cv::Mat image = imread("test.jpg");
* std::shared_ptr<void> data(image.data, [image=image](void* p){});
* mmdeploy::Mat mat(image.rows, image.cols, kBGR, kINT8, data);
* ```
* @param h height of an image
* @param w width of an image
* @param format pixel format of an image, rgb, bgr, gray etc. Note that in
* case of nv12 or nv21, height is the real height of an image,
* not height * 3 / 2
* @param type data type of an pixel in each channel
* @param data custom data
* @param device location where `data` is on
*/
Mat(int h, int w, PixelFormat format, DataType type, std::shared_ptr<void> data,
Device device = Device{0});
/**
* @brief construct a Mat for an image using custom data
* @param h height of an image
* @param w width of an image
* @param format pixel format of an image, rgb, bgr, gray etc. Note that in
* case of nv12 or nv21, height is the real height of an image,
* not height * 3 / 2
* @param type data type of an pixel in each channel
* @param data custom data
* @param device location where `data` is on
*/
Mat(int h, int w, PixelFormat format, DataType type, void* data, Device device = Device{0});
Device device() const;
Buffer& buffer();
const Buffer& buffer() const;
PixelFormat pixel_format() const { return format_; }
DataType type() const { return type_; }
int height() const { return height_; }
int width() const { return width_; }
int channel() const { return channel_; }
int size() const { return size_; }
int byte_size() const { return bytes_; }
template <typename T>
T* data() const {
return reinterpret_cast<T*>(buf_.GetNative());
}
private:
Buffer buf_;
PixelFormat format_{PixelFormat::kGRAYSCALE};
DataType type_{DataType::kINT8};
int width_{0};
int height_{0};
int channel_{0};
int size_{0}; // size of elements in mat
int bytes_{0};
};
} // namespace framework
MMDEPLOY_REGISTER_TYPE_ID(framework::Mat, 7);
} // namespace mmdeploy
#endif // !CORE_MAT_H
// Copyright (c) OpenMMLab. All rights reserved.
#include "model.h"
#include "mmdeploy/core/logger.h"
#include "mmdeploy/core/model_impl.h"
#include "mmdeploy/core/utils/filesystem.h"
#include "mmdeploy/core/utils/formatter.h"
using namespace std;
namespace mmdeploy::framework {
Model::Model(const std::string& model_path) {
if (auto r = Model::Init(model_path); !r) {
MMDEPLOY_ERROR("Failed to load model \"{}\"", model_path);
r.error().throw_exception();
}
}
Model::Model(const void* buffer, size_t size) { Init(buffer, size).value(); }
Result<void> Model::Init(const std::string& model_path) {
model_path_ = model_path;
if (!fs::exists(model_path)) {
MMDEPLOY_ERROR("File not found: \"{}\"", model_path);
return Status(eFileNotExist);
}
for (const auto& creator : gRegistry<ModelImpl>().Creators()) {
if (auto impl = creator->Create(); impl->Init(model_path)) {
OUTCOME_TRY(auto meta, impl->ReadMeta());
impl_ = std::move(impl);
meta_ = std::move(meta);
MMDEPLOY_INFO("[{}] Load model: \"{}\"", creator->name(), model_path);
return success();
}
}
MMDEPLOY_ERROR("Failed to load model: \"{}\", implementations tried: {}", model_path,
gRegistry<ModelImpl>().List());
return Status(eNotSupported);
}
const std::string& Model::GetModelPath() const { return model_path_; }
Result<void> Model::Init(const void* buffer, size_t size) {
for (const auto& creator : gRegistry<ModelImpl>().Creators()) {
if (auto impl = creator->Create(); impl->Init(buffer, size)) {
OUTCOME_TRY(auto meta, impl->ReadMeta());
impl_ = std::move(impl);
meta_ = std::move(meta);
MMDEPLOY_INFO("[{}] Parse model", creator->name());
return success();
}
}
MMDEPLOY_ERROR("Failed to parse model buffer, implementations tried: {}",
gRegistry<ModelImpl>().List());
return Status(eNotSupported);
}
Result<model_meta_info_t> Model::GetModelConfig(const std::string& name) const {
for (auto& info : meta_.models) {
if (name == info.name) {
return info;
}
}
MMDEPLOY_ERROR("Cannot find model '{}' in meta file", name);
return Status(eEntryNotFound);
}
Result<std::string> Model::ReadFile(const std::string& file_path) noexcept {
return impl_->ReadFile(file_path);
}
Result<Value> Model::ReadConfig(const string& config_path) noexcept {
return impl_->ReadConfig(config_path);
}
MMDEPLOY_DEFINE_REGISTRY(ModelImpl);
} // namespace mmdeploy::framework
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef CORE_SDK_MODEL_H
#define CORE_SDK_MODEL_H
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "mmdeploy/core/mpl/type_traits.h"
#include "mmdeploy/core/serialization.h"
#include "mmdeploy/core/types.h"
#include "mmdeploy/core/value.h"
namespace mmdeploy {
namespace framework {
struct model_meta_info_t {
std::string name;
std::string net;
std::string weights;
std::string backend;
int batch_size;
std::string precision;
bool dynamic_shape;
MMDEPLOY_ARCHIVE_MEMBERS(name, net, weights, backend, batch_size, precision, dynamic_shape);
};
struct deploy_meta_info_t {
std::string version;
std::vector<model_meta_info_t> models;
MMDEPLOY_ARCHIVE_MEMBERS(version, models);
};
class ModelImpl;
/**
* @class Model
* @brief Load SDK model from file or buffer.
*/
class MMDEPLOY_API Model {
public:
Model() = default;
explicit Model(const std::string& model_path);
explicit Model(const void* buffer, size_t size);
~Model() = default;
/**
* @brief Load SDK model.
* @param model_path file path of the model. It can be a file or a
* directory.
* @return status with an error code.
*/
Result<void> Init(const std::string& model_path);
Result<void> Init(const void* buffer, size_t size);
/**
* @brief Get model's meta info
* @param name the name of a model in the SDK model file
* @return
*/
Result<model_meta_info_t> GetModelConfig(const std::string& name) const;
/**
* @brief Read file from the SDK model
* @param file_path path relative to the root directory of the model.
* @return the content of file on success
*/
Result<std::string> ReadFile(const std::string& file_path) noexcept;
Result<Value> ReadConfig(const std::string& config_path) noexcept;
/**
* @brief get meta information of the model
* @return SDK model's meta information
*/
const deploy_meta_info_t& meta() const { return meta_; }
/**
* @brief Check if an instance of `Model` is valid
* @return the status of an instance of `Model`
*/
explicit operator bool() const { return impl_ != nullptr; }
/**
* @brief get model_path that init with DirectoryModel
* @return file path of an sdk model
*/
const std::string& GetModelPath() const;
private:
std::string model_path_;
std::shared_ptr<ModelImpl> impl_;
deploy_meta_info_t meta_;
};
} // namespace framework
MMDEPLOY_REGISTER_TYPE_ID(framework::Model, 5);
} // namespace mmdeploy
#endif // !CORE_SDK_MODEL_H
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_MODEL_IMPL_H
#define MMDEPLOY_MODEL_IMPL_H
#include "mmdeploy/core/model.h"
#include "mmdeploy/core/registry.h"
namespace mmdeploy::framework {
/**
* @class ModelImpl
* @brief SDK model's implementation interface
*/
class ModelImpl {
public:
virtual ~ModelImpl() = default;
/**
* @brief Load an SDK model.
* @param model_path path of the model. It can be a file or a directory.
* @return status with an error code.
*/
virtual Result<void> Init(const std::string& model_path) { return Status(eNotSupported); }
virtual Result<void> Init(const void* buffer, size_t size) { return Status(eNotSupported); }
/**
* @brief Read specified file from a SDK model
* @param file_path path relative to the root directory of the model.
* @return the content of the file on success
*/
virtual Result<std::string> ReadFile(const std::string& file_path) const = 0;
virtual Result<Value> ReadConfig(const std::string& config_path) const = 0;
/**
* @brief get meta information of an sdk model
* @return SDK model's meta information
*/
virtual Result<deploy_meta_info_t> ReadMeta() const = 0;
};
MMDEPLOY_DECLARE_REGISTRY(ModelImpl, std::unique_ptr<ModelImpl>());
} // namespace mmdeploy::framework
#endif // MMDEPLOY_MODEL_IMPL_H
// Copyright (c) OpenMMLab. All rights reserved.
#include "module.h"
#include "registry.h"
namespace mmdeploy {
MMDEPLOY_DEFINE_REGISTRY(Module);
} // namespace mmdeploy
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_CORE_MODULE_H_
#define MMDEPLOY_SRC_CORE_MODULE_H_
#include "mmdeploy/core/macro.h"
#include "mmdeploy/core/registry.h"
#include "mmdeploy/core/status_code.h"
#include "mmdeploy/core/value.h"
namespace mmdeploy {
class MMDEPLOY_API Module {
public:
virtual ~Module() = default;
virtual Result<Value> Process(const Value& args) = 0;
};
MMDEPLOY_DECLARE_REGISTRY(Module, std::unique_ptr<Module>(const Value& config));
} // namespace mmdeploy
#endif // MMDEPLOY_SRC_CORE_MODULE_H_
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_CORE_CPP_DETECTED_H_
#define MMDEPLOY_SRC_CORE_CPP_DETECTED_H_
#include <type_traits>
namespace mmdeploy::detail {
struct nonesuch {
nonesuch() = delete;
~nonesuch() = delete;
nonesuch(nonesuch const&) = delete;
nonesuch(nonesuch const&&) = delete;
void operator=(nonesuch const&) = delete;
void operator=(nonesuch&&) = delete;
};
template <class Default, class AlwaysVoid, template <class...> class Op, class... Args>
struct detector {
using value_t = std::false_type;
using type = Default;
};
template <class Default, template <class...> class Op, class... Args>
struct detector<Default, std::void_t<Op<Args...>>, Op, Args...> {
using value_t = std::true_type;
using type = Op<Args...>;
};
template <template <class...> class Op, class... Args>
using is_detected = typename detector<nonesuch, void, Op, Args...>::value_t;
template <template <class...> class Op, class... Args>
using detected_t = typename detector<nonesuch, void, Op, Args...>::type;
template <template <class...> class Op, class... Args>
constexpr inline bool is_detected_v = is_detected<Op, Args...>::value;
template <class Default, template <class...> class Op, class... Args>
using detected_or = detector<Default, void, Op, Args...>;
template <class Default, template <class...> class Op, class... Args>
using detected_or_t = typename detected_or<Default, Op, Args...>::type;
template <class Expected, template <class...> class Op, class... Args>
using is_detected_exact = std::is_same<Expected, detected_t<Op, Args...>>;
template <class To, template <class...> class Op, class... Args>
using is_detected_convertible = std::is_convertible<detected_t<Op, Args...>, To>;
} // namespace mmdeploy::detail
#endif // MMDEPLOY_SRC_CORE_CPP_DETECTED_H_
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_CORE_MPL_ITERATOR_H_
#define MMDEPLOY_SRC_CORE_MPL_ITERATOR_H_
#include <iterator>
#include "type_traits.h"
namespace mmdeploy {
template <typename T>
using iter_value_t = typename std::iterator_traits<uncvref_t<T> >::value_type;
template <typename T>
using iter_reference_t = decltype(*std::declval<T&>());
} // namespace mmdeploy
#endif // MMDEPLOY_SRC_CORE_MPL_ITERATOR_H_
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_CORE_MPL_PRIORITY_TAG_H_
#define MMDEPLOY_SRC_CORE_MPL_PRIORITY_TAG_H_
namespace mmdeploy {
template <unsigned N>
struct priority_tag : priority_tag<N - 1> {};
template <>
struct priority_tag<0> {};
} // namespace mmdeploy
#endif // MMDEPLOY_SRC_CORE_MPL_PRIORITY_TAG_H_
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_CORE_MPL_SPAN_H_
#define MMDEPLOY_SRC_CORE_MPL_SPAN_H_
#include <iterator>
#include <type_traits>
#include "detected.h"
#include "iterator.h"
namespace mmdeploy {
namespace detail {
template <typename T>
using arrow_t = decltype(std::declval<T>().operator->());
template <typename T>
constexpr auto to_address(const T& p) noexcept {
if constexpr (std::is_pointer_v<T>) {
return p;
} else if (detail::is_detected_v<arrow_t, T>) {
return to_address(p.operator->());
}
}
} // namespace detail
template <typename T>
class Span {
public:
using element_type = T;
using value_type = std::remove_cv_t<T>;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
using iterator = T*;
using reverse_iterator = std::reverse_iterator<iterator>;
public:
constexpr Span() noexcept : data_(nullptr), size_(0) {}
// clang-format off
template <typename It,
std::void_t<decltype(std::addressof(std::declval<It&>()))>* = nullptr>
// clang-format on
constexpr Span(It first, size_type size) : data_(detail::to_address(first)), size_(size) {}
template <typename It, typename End,
std::enable_if_t<!std::is_convertible_v<End, std::size_t>, int> = 0>
constexpr Span(It first, End last) : data_(detail::to_address(first)), size_(last - first) {}
template <typename U, typename = std::void_t<decltype(std::data(std::declval<U>()))>,
typename = std::void_t<decltype(std::size(std::declval<U>()))>>
constexpr Span(U& v) : data_(std::data(v)), size_(std::size(v)) {}
template <typename U, typename = std::void_t<decltype(std::data(std::declval<U>()))>,
typename = std::void_t<decltype(std::size(std::declval<U>()))>>
constexpr Span(const U& v) : data_(std::data(v)), size_(std::size(v)) {}
template <typename U>
constexpr Span(std::initializer_list<U> il) noexcept : Span(il.begin(), il.size()) {}
template <std::size_t N>
constexpr Span(element_type (&arr)[N]) noexcept : data_(std::data(arr)), size_(N) {}
constexpr Span(const Span& other) noexcept : data_(std::data(other)), size_(std::size(other)) {}
constexpr iterator begin() const noexcept { return data_; }
constexpr iterator end() const noexcept { return data_ + size_; }
constexpr reverse_iterator rbegin() const noexcept { return std::make_reverse_iterator(end()); }
constexpr reverse_iterator rend() const noexcept { return std::make_reverse_iterator(begin()); }
constexpr reference front() const { return data_[0]; }
constexpr reference back() const { return data_[size_ - 1]; }
constexpr reference operator[](size_type idx) const { return data_[idx]; }
constexpr pointer data() const noexcept { return data_; }
constexpr size_type size() const noexcept { return size_; }
constexpr size_type size_bytes() const noexcept { return sizeof(value_type) * size(); }
constexpr bool empty() const noexcept { return size_ == 0; }
constexpr Span<element_type> first(size_type count) const { return {begin(), count}; }
constexpr Span<element_type> last(size_type count) const { return {end() - count, count}; }
constexpr Span<element_type> subspan(size_type offset, size_type count = -1) const {
if (count == -1) {
return Span(begin() + offset, end());
} else {
return Span(begin() + offset, begin() + offset + count);
}
}
constexpr Span& operator=(const Span& other) noexcept = default;
template <typename U>
friend bool operator!=(const Span& a, const Span<U>& b) {
if (a.size() != b.size()) {
return true;
}
for (size_type i = 0; i < a.size(); ++i) {
if (a[i] != b[i]) {
return true;
}
}
return false;
}
template <typename U>
friend bool operator==(const Span& a, const Span<U>& b) {
return !(a != b);
}
private:
T* data_;
size_type size_;
};
// clang-format off
template <typename It, typename EndOrSize>
Span(It, EndOrSize) -> Span<std::remove_reference_t<iter_reference_t<It>>>;
template <typename T, std::size_t N>
Span(T (&)[N]) -> Span<T>;
template <typename U, typename = std::void_t<decltype(std::declval<U>().data())>,
typename = std::void_t<decltype(std::declval<U>().size())>>
Span(U& v) -> Span<typename uncvref_t<U>::value_type>;
template <typename T>
Span(std::initializer_list<T>) -> Span<const T>;
// clang-format on
} // namespace mmdeploy
#endif // MMDEPLOY_SRC_CORE_MPL_SPAN_H_
// Copyright (c) OpenMMLab. All rights reserved.
// Re-implementation of std::any, relies on static type id instead of RTTI.
// adjusted from libc++-10
#ifndef MMDEPLOY_CSRC_CORE_MPL_STATIC_ANY_H_
#define MMDEPLOY_CSRC_CORE_MPL_STATIC_ANY_H_
#include <cstdint>
#include <memory>
#include <stdexcept>
#include <type_traits>
#include <utility>
#include "mmdeploy/core/mpl/type_traits.h"
namespace mmdeploy {
namespace detail {
template <typename T>
struct is_in_place_type_impl : std::false_type {};
template <typename T>
struct is_in_place_type_impl<std::in_place_type_t<T>> : std::true_type {};
template <typename T>
struct is_in_place_type : public is_in_place_type_impl<T> {};
} // namespace detail
class BadAnyCast : public std::bad_cast {
public:
const char* what() const noexcept override { return "BadAnyCast"; }
};
[[noreturn]] inline void ThrowBadAnyCast() {
#if __cpp_exceptions
throw BadAnyCast{};
#else
std::abort();
#endif
}
// Forward declarations
class StaticAny;
template <class ValueType>
std::add_pointer_t<std::add_const_t<ValueType>> static_any_cast(const StaticAny*) noexcept;
template <class ValueType>
std::add_pointer_t<ValueType> static_any_cast(StaticAny*) noexcept;
namespace __static_any_impl {
using _Buffer = std::aligned_storage_t<3 * sizeof(void*), std::alignment_of_v<void*>>;
template <class T>
using _IsSmallObject =
std::integral_constant<bool, sizeof(T) <= sizeof(_Buffer) &&
std::alignment_of_v<_Buffer> % std::alignment_of_v<T> == 0 &&
std::is_nothrow_move_constructible_v<T>>;
enum class _Action { _Destroy, _Copy, _Move, _Get, _TypeInfo };
union _Ret {
void* ptr_;
traits::type_id_t type_id_;
};
template <class T>
struct _SmallHandler;
template <class T>
struct _LargeHandler;
template <class T>
inline bool __compare_typeid(traits::type_id_t __id) {
if (__id && __id == traits::TypeId<T>::value) {
return true;
}
return false;
}
template <class T>
using _Handler = std::conditional_t<_IsSmallObject<T>::value, _SmallHandler<T>, _LargeHandler<T>>;
} // namespace __static_any_impl
class StaticAny {
public:
constexpr StaticAny() noexcept : h_(nullptr) {}
StaticAny(const StaticAny& other) : h_(nullptr) {
if (other.h_) {
other.__call(_Action::_Copy, this);
}
}
StaticAny(StaticAny&& other) noexcept : h_(nullptr) {
if (other.h_) {
other.__call(_Action::_Move, this);
}
}
template <class ValueType, class T = std::decay_t<ValueType>,
class = std::enable_if_t<
!std::is_same<T, StaticAny>::value && !detail::is_in_place_type<ValueType>::value &&
std::is_copy_constructible<T>::value && traits::TypeId<T>::value>>
explicit StaticAny(ValueType&& value);
template <
class ValueType, class... Args, class T = std::decay_t<ValueType>,
class = std::enable_if_t<std::is_constructible<T, Args...>::value &&
std::is_copy_constructible<T>::value && traits::TypeId<T>::value>>
explicit StaticAny(std::in_place_type_t<ValueType>, Args&&... args);
template <class ValueType, class U, class... Args, class T = std::decay_t<ValueType>,
class = std::enable_if_t<
std::is_constructible<T, std::initializer_list<U>&, Args...>::value &&
std::is_copy_constructible<T>::value && traits::TypeId<T>::value>>
explicit StaticAny(std::in_place_type_t<ValueType>, std::initializer_list<U>, Args&&... args);
~StaticAny() { this->reset(); }
StaticAny& operator=(const StaticAny& rhs) {
StaticAny(rhs).swap(*this);
return *this;
}
StaticAny& operator=(StaticAny&& rhs) noexcept {
StaticAny(std::move(rhs)).swap(*this);
return *this;
}
template <
class ValueType, class T = std::decay_t<ValueType>,
class = std::enable_if_t<!std::is_same<T, StaticAny>::value &&
std::is_copy_constructible<T>::value && traits::TypeId<T>::value>>
StaticAny& operator=(ValueType&& v);
template <
class ValueType, class... Args, class T = std::decay_t<ValueType>,
class = std::enable_if_t<std::is_constructible<T, Args...>::value &&
std::is_copy_constructible<T>::value && traits::TypeId<T>::value>>
T& emplace(Args&&... args);
template <class ValueType, class U, class... Args, class T = std::decay_t<ValueType>,
class = std::enable_if_t<
std::is_constructible<T, std::initializer_list<U>&, Args...>::value &&
std::is_copy_constructible<T>::value && traits::TypeId<T>::value>>
T& emplace(std::initializer_list<U>, Args&&...);
void reset() noexcept {
if (h_) {
this->__call(_Action::_Destroy);
}
}
void swap(StaticAny& rhs) noexcept;
bool has_value() const noexcept { return h_ != nullptr; }
traits::type_id_t type() const noexcept {
if (h_) {
return this->__call(_Action::_TypeInfo).type_id_;
} else {
return traits::TypeId<void>::value;
}
}
private:
using _Action = __static_any_impl::_Action;
using _Ret = __static_any_impl::_Ret;
using _HandleFuncPtr = _Ret (*)(_Action, const StaticAny*, StaticAny*, traits::type_id_t info);
union _Storage {
constexpr _Storage() : ptr_(nullptr) {}
void* ptr_;
__static_any_impl::_Buffer buf_;
};
_Ret __call(_Action a, StaticAny* other = nullptr, traits::type_id_t info = 0) const {
return h_(a, this, other, info);
}
_Ret __call(_Action a, StaticAny* other = nullptr, traits::type_id_t info = 0) {
return h_(a, this, other, info);
}
template <class>
friend struct __static_any_impl::_SmallHandler;
template <class>
friend struct __static_any_impl::_LargeHandler;
template <class ValueType>
friend std::add_pointer_t<std::add_const_t<ValueType>> static_any_cast(const StaticAny*) noexcept;
template <class ValueType>
friend std::add_pointer_t<ValueType> static_any_cast(StaticAny*) noexcept;
_HandleFuncPtr h_ = nullptr;
_Storage s_;
};
namespace __static_any_impl {
template <class T>
struct _SmallHandler {
static _Ret __handle(_Action action, const StaticAny* self, StaticAny* other,
traits::type_id_t info) {
_Ret ret;
ret.ptr_ = nullptr;
switch (action) {
case _Action::_Destroy:
__destroy(const_cast<StaticAny&>(*self));
break;
case _Action::_Copy:
__copy(*self, *other);
break;
case _Action::_Move:
__move(const_cast<StaticAny&>(*self), *other);
break;
case _Action::_Get:
ret.ptr_ = __get(const_cast<StaticAny&>(*self), info);
break;
case _Action::_TypeInfo:
ret.type_id_ = __type_info();
break;
}
return ret;
}
template <class... Args>
static T& __create(StaticAny& dest, Args&&... args) {
T* ret = ::new (static_cast<void*>(&dest.s_.buf_)) T(std::forward<Args>(args)...);
dest.h_ = &_SmallHandler::__handle;
return *ret;
}
private:
template <class... Args>
static void __destroy(StaticAny& self) {
T& value = *static_cast<T*>(static_cast<void*>(&self.s_.buf_));
value.~T();
self.h_ = nullptr;
}
template <class... Args>
static void __copy(const StaticAny& self, StaticAny& dest) {
_SmallHandler::__create(dest, *static_cast<const T*>(static_cast<const void*>(&self.s_.buf_)));
}
static void __move(StaticAny& self, StaticAny& dest) {
_SmallHandler::__create(dest, std::move(*static_cast<T*>(static_cast<void*>(&self.s_.buf_))));
__destroy(self);
}
static void* __get(StaticAny& self, traits::type_id_t info) {
if (__static_any_impl::__compare_typeid<T>(info)) {
return static_cast<void*>(&self.s_.buf_);
}
return nullptr;
}
static traits::type_id_t __type_info() { return traits::TypeId<T>::value; }
};
template <class T>
struct _LargeHandler {
static _Ret __handle(_Action action, const StaticAny* self, StaticAny* other,
traits::type_id_t info) {
_Ret ret;
ret.ptr_ = nullptr;
switch (action) {
case _Action::_Destroy:
__destroy(const_cast<StaticAny&>(*self));
break;
case _Action::_Copy:
__copy(*self, *other);
break;
case _Action::_Move:
__move(const_cast<StaticAny&>(*self), *other);
break;
case _Action::_Get:
ret.ptr_ = __get(const_cast<StaticAny&>(*self), info);
break;
case _Action::_TypeInfo:
ret.type_id_ = __type_info();
break;
}
return ret;
}
template <class... Args>
static T& __create(StaticAny& dest, Args&&... args) {
using _Alloc = std::allocator<T>;
_Alloc alloc;
auto dealloc = [&](T* p) { alloc.deallocate(p, 1); };
std::unique_ptr<T, decltype(dealloc)> hold(alloc.allocate(1), dealloc);
T* ret = ::new ((void*)hold.get()) T(std::forward<Args>(args)...);
dest.s_.ptr_ = hold.release();
dest.h_ = &_LargeHandler::__handle;
return *ret;
}
private:
static void __destroy(StaticAny& self) {
delete static_cast<T*>(self.s_.ptr_);
self.h_ = nullptr;
}
static void __copy(const StaticAny& self, StaticAny& dest) {
_LargeHandler::__create(dest, *static_cast<const T*>(self.s_.ptr_));
}
static void __move(StaticAny& self, StaticAny& dest) {
dest.s_.ptr_ = self.s_.ptr_;
dest.h_ = &_LargeHandler::__handle;
self.h_ = nullptr;
}
static void* __get(StaticAny& self, traits::type_id_t info) {
if (__static_any_impl::__compare_typeid<T>(info)) {
return static_cast<void*>(self.s_.ptr_);
}
return nullptr;
}
static traits::type_id_t __type_info() { return traits::TypeId<T>::value; }
};
} // namespace __static_any_impl
template <class ValueType, class T, class>
StaticAny::StaticAny(ValueType&& v) : h_(nullptr) {
__static_any_impl::_Handler<T>::__create(*this, std::forward<ValueType>(v));
}
template <class ValueType, class... Args, class T, class>
StaticAny::StaticAny(std::in_place_type_t<ValueType>, Args&&... args) {
__static_any_impl::_Handler<T>::__create(*this, std::forward<Args>(args)...);
}
template <class ValueType, class U, class... Args, class T, class>
StaticAny::StaticAny(std::in_place_type_t<ValueType>, std::initializer_list<U> il, Args&&... args) {
__static_any_impl::_Handler<T>::__create(*this, il, std::forward<Args>(args)...);
}
template <class ValueType, class, class>
inline StaticAny& StaticAny::operator=(ValueType&& v) {
StaticAny(std::forward<ValueType>(v)).swap(*this);
return *this;
}
template <class ValueType, class... Args, class T, class>
inline T& StaticAny::emplace(Args&&... args) {
reset();
return __static_any_impl::_Handler<T>::__create(*this, std::forward<Args>(args)...);
}
template <class ValueType, class U, class... Args, class T, class>
inline T& StaticAny::emplace(std::initializer_list<U> il, Args&&... args) {
reset();
return __static_any_impl::_Handler<T>::_create(*this, il, std::forward<Args>(args)...);
}
inline void StaticAny::swap(StaticAny& rhs) noexcept {
if (this == &rhs) {
return;
}
if (h_ && rhs.h_) {
StaticAny tmp;
rhs.__call(_Action::_Move, &tmp);
this->__call(_Action::_Move, &rhs);
tmp.__call(_Action::_Move, this);
} else if (h_) {
this->__call(_Action::_Move, &rhs);
} else if (rhs.h_) {
rhs.__call(_Action::_Move, this);
}
}
inline void swap(StaticAny& lhs, StaticAny& rhs) noexcept { lhs.swap(rhs); }
template <class T, class... Args>
inline StaticAny make_static_any(Args&&... args) {
return StaticAny(std::in_place_type<T>, std::forward<Args>(args)...);
}
template <class T, class U, class... Args>
StaticAny make_static_any(std::initializer_list<U> il, Args&&... args) {
return StaticAny(std::in_place_type<T>, il, std::forward<Args>(args)...);
}
template <class ValueType>
ValueType static_any_cast(const StaticAny& v) {
using _RawValueType = std::remove_cv_t<std::remove_reference_t<ValueType>>;
static_assert(std::is_constructible<ValueType, const _RawValueType&>::value,
"ValueType is required to be a const lvalue reference "
"or a CopyConstructible type");
auto tmp = static_any_cast<std::add_const_t<_RawValueType>>(&v);
if (tmp == nullptr) {
ThrowBadAnyCast();
}
return static_cast<ValueType>(*tmp);
}
template <class ValueType>
inline ValueType static_any_cast(StaticAny& v) {
using _RawValueType = std::remove_cv_t<std::remove_reference_t<ValueType>>;
static_assert(std::is_constructible<ValueType, _RawValueType&>::value,
"ValueType is required to be an lvalue reference "
"or a CopyConstructible type");
auto tmp = static_any_cast<_RawValueType>(&v);
if (tmp == nullptr) {
ThrowBadAnyCast();
}
return static_cast<ValueType>(*tmp);
}
template <class ValueType>
inline ValueType static_any_cast(StaticAny&& v) {
using _RawValueType = std::remove_cv_t<std::remove_reference_t<ValueType>>;
static_assert(std::is_constructible<ValueType, _RawValueType>::value,
"ValueType is required to be an rvalue reference "
"or a CopyConstructible type");
auto tmp = static_any_cast<_RawValueType>(&v);
if (tmp == nullptr) {
ThrowBadAnyCast();
}
return static_cast<ValueType>(std::move(*tmp));
}
template <class ValueType>
inline std::add_pointer_t<std::add_const_t<ValueType>> static_any_cast(
const StaticAny* __any) noexcept {
static_assert(!std::is_reference<ValueType>::value, "ValueType may not be a reference.");
return static_any_cast<ValueType>(const_cast<StaticAny*>(__any));
}
template <class RetType>
inline RetType __pointer_or_func_test(void* p, std::false_type) noexcept {
return static_cast<RetType>(p);
}
template <class RetType>
inline RetType __pointer_or_func_test(void*, std::true_type) noexcept {
return nullptr;
}
template <class ValueType>
std::add_pointer_t<ValueType> static_any_cast(StaticAny* any) noexcept {
using __static_any_impl::_Action;
static_assert(!std::is_reference<ValueType>::value, "ValueType may not be a reference.");
using ReturnType = std::add_pointer_t<ValueType>;
if (any && any->h_) {
void* p = any->__call(_Action::_Get, nullptr, traits::TypeId<ValueType>::value).ptr_;
return __pointer_or_func_test<ReturnType>(p, std::is_function<ValueType>{});
}
return nullptr;
}
} // namespace mmdeploy
#endif // MMDEPLOY_CSRC_CORE_MPL_STATIC_ANY_H_
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_CSRC_MMDEPLOY_CORE_MPL_STRUCTURE_H_
#define MMDEPLOY_CSRC_MMDEPLOY_CORE_MPL_STRUCTURE_H_
#include <array>
#include <memory>
#include <tuple>
#include <utility>
namespace mmdeploy {
namespace _structure {
using std::array;
using std::index_sequence;
using std::integral_constant;
using std::tuple;
// [p0][T0]...[p1][T1]...[pn][Tn]...[px][X]
// ^ |
// |-------------------------------------|
template <size_t Size>
class Storage {
static constexpr auto S = Size + 1;
using Indices = std::make_index_sequence<S>;
public:
Storage(const Storage&) = delete;
Storage(Storage&&) noexcept = delete;
Storage& operator=(const Storage&) = delete;
Storage& operator=(Storage&&) noexcept = delete;
Storage(const array<size_t, Size>& sizes, const array<size_t, Size>& aligns) {
create(std::make_index_sequence<Size>{}, sizes, aligns);
}
template <size_t offset>
Storage(const array<size_t, Size>& sizes, const array<size_t, Size>& aligns,
integral_constant<size_t, offset> index, void* ptr) noexcept {
create(std::make_index_sequence<Size>{}, sizes, aligns, index, ptr);
}
template <size_t... i, typename... As>
void create(index_sequence<i...>, const array<size_t, Size>& sizes,
const array<size_t, Size>& aligns, As&&... as) {
std::tie(data_, pointers_) =
Creator{{sizes[i]..., sizeof(void*)}, {aligns[i]..., alignof(void*)}}.create((As &&) as...);
}
~Storage() {
if (data_) {
delete[] static_cast<uint8_t*>(data_);
release();
}
}
void* data() const noexcept { return data_; }
template <size_t i>
void* at() const noexcept {
return pointers_[i];
}
array<void*, S>& pointers() { return pointers_; }
void* release() noexcept {
std::fill_n(pointers_.data(), S, nullptr);
return std::exchange(data_, nullptr);
}
private:
struct Creator {
const array<size_t, S>& sizes_;
const array<size_t, S>& aligns_;
tuple<void*, array<void*, S>> create() {
auto space = get_space(Indices{});
void* data = new uint8_t[space];
auto ptr = data;
array<void*, S> pointers{};
// build the layout according to sizes and alignments
align<0>(ptr, space, pointers, Indices{});
// store a pointer to the head of data in the last slot
*reinterpret_cast<void**>(pointers.back()) = data;
return {data, pointers};
}
template <size_t offset>
tuple<void*, array<void*, S>> create(integral_constant<size_t, offset>, void* ptr) {
auto space = get_space(Indices{});
array<void*, S> pointers{};
// recover the layout after offset
align<offset>(ptr, space, pointers, std::make_index_sequence<S - offset>{});
// recover data pointer
auto data = ptr = *reinterpret_cast<void**>(pointers.back());
// recover the layout before offset
align<0>(ptr, space, pointers, std::make_index_sequence<offset>{});
return {data, pointers};
}
private:
template <size_t... i>
size_t get_space(index_sequence<i...>) const noexcept {
return ((sizes_[i] + aligns_[i]) + ...);
}
template <size_t offset, size_t... i>
void align(void*& ptr, size_t& space, array<void*, S>& pointers,
index_sequence<i...>) noexcept {
(align(ptr, space, pointers, integral_constant<size_t, offset + i>{}), ...);
}
template <size_t i>
void align(void*& ptr, size_t& space, array<void*, S>& pointers,
integral_constant<size_t, i>) noexcept {
pointers[i] = std::align(aligns_[i], sizes_[i], ptr, space);
ptr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + sizes_[i]);
space -= sizes_[i];
}
};
private:
void* data_{};
array<void*, S> pointers_{};
};
template <typename T, typename... Ts>
struct _count {
static constexpr size_t value = (std::is_same_v<T, Ts> + ...);
};
template <typename T, typename Ts, typename Is, typename = void>
struct get_type_index {};
template <typename T, typename... Ts, size_t... Is>
struct get_type_index<T, tuple<Ts...>, std::index_sequence<Is...>,
std::enable_if_t<_count<T, Ts...>::value == 1>> {
static constexpr size_t value = ((std::is_same_v<T, Ts> * Is) + ...);
};
template <typename T>
using _size_t = size_t;
template <typename... Ts>
class Structure : public Storage<sizeof...(Ts)> {
static constexpr auto Size = sizeof...(Ts);
using Base = Storage<Size>;
using Indices = std::index_sequence_for<Ts...>;
public:
explicit Structure() : Structure(1) {}
explicit Structure(size_t length) : Structure(array<size_t, Size>{_size_t<Ts>(length)...}) {}
explicit Structure(const array<size_t, Size>& lengths)
: Base(get_sizes(lengths, Indices{}), {alignof(Ts)...}), lengths_{lengths} {
construct(Indices{});
}
template <typename T, size_t index = get_type_index<T, tuple<Ts...>, Indices>::value>
explicit Structure(T* p) : Structure(1, integral_constant<size_t, index>{}, p) {}
template <typename T, size_t index = get_type_index<T, tuple<Ts...>, Indices>::value>
explicit Structure(size_t length, T* p)
: Structure(length, integral_constant<size_t, index>{}, p) {}
template <typename T, size_t index = get_type_index<T, tuple<Ts...>, Indices>::value>
explicit Structure(const array<size_t, Size>& lengths, T* p)
: Structure(lengths, integral_constant<size_t, index>{}, p) {}
template <size_t i>
explicit Structure(integral_constant<size_t, i> index, void* p) : Structure(1, index, p) {}
template <size_t i>
explicit Structure(size_t length, integral_constant<size_t, i> index, void* p)
: Structure({_size_t<Ts>(length)...}, index, p) {}
template <size_t i>
explicit Structure(const array<size_t, Size>& lengths, integral_constant<size_t, i> index,
void* p)
: Base(get_sizes(lengths, Indices{}), {alignof(Ts)...}, index, p), lengths_{lengths} {}
~Structure() {
if (this->data()) {
destruct(Indices{});
}
}
template <size_t i>
decltype(auto) get() const {
using T = std::tuple_element_t<i, tuple<Ts...>>;
return reinterpret_cast<T*>(this->template at<i>());
}
tuple<Ts*...> pointers() const noexcept { return pointers(Indices{}); }
private:
template <size_t... i>
static array<size_t, Size> get_sizes(const array<size_t, Size>& lengths,
index_sequence<i...>) noexcept {
return {(sizeof(Ts) * lengths[i])...};
}
template <size_t... i>
tuple<Ts*...> pointers(index_sequence<i...>) const noexcept {
return {get<i>()...};
}
template <size_t... i>
void construct(index_sequence<i...>) {
(create_n(get<i>(), lengths_[i]), ...);
}
template <typename T>
static void create_n(T* data, size_t n) {
for (size_t i = 0; i < n; ++i) {
new (data + i) T{};
}
}
template <size_t... i>
void destruct(index_sequence<i...>) {
(std::destroy_n(get<i>(), lengths_[i]), ...);
}
private:
array<size_t, Size> lengths_;
};
} // namespace _structure
using _structure::Structure;
} // namespace mmdeploy
#endif // MMDEPLOY_CSRC_MMDEPLOY_CORE_MPL_STRUCTURE_H_
// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_CORE_MPL_TYPE_TRAITS_H_
#define MMDEPLOY_SRC_CORE_MPL_TYPE_TRAITS_H_
#include <cstdint>
#include <type_traits>
namespace mmdeploy {
template <typename T>
struct uncvref {
typedef std::remove_cv_t<std::remove_reference_t<T>> type;
};
template <typename T>
using uncvref_t = typename uncvref<T>::type;
template <class T>
struct is_cast_by_erasure : std::false_type {};
namespace traits {
using type_id_t = uint64_t;
template <class T>
struct TypeId {
static constexpr type_id_t value = 0;
};
template <>
struct TypeId<void> {
static constexpr auto value = static_cast<type_id_t>(-1);
};
// ! This only works when calling inside mmdeploy namespace
#define MMDEPLOY_REGISTER_TYPE_ID(type, id) \
namespace traits { \
template <> \
struct TypeId<type> { \
static constexpr type_id_t value = id; \
}; \
} \
template <> \
struct is_cast_by_erasure<type> : std::true_type {};
} // namespace traits
} // namespace mmdeploy
#endif // MMDEPLOY_SRC_CORE_MPL_TYPE_TRAITS_H_
// Copyright (c) OpenMMLab. All rights reserved.
#include "net.h"
#include "registry.h"
namespace mmdeploy::framework {
MMDEPLOY_DEFINE_REGISTRY(Net);
} // namespace mmdeploy::framework
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment