Commit fbd3199c authored by fengzch-das's avatar fengzch-das
Browse files

Initial commit

parents
/*
* By downloading, copying, installing or using the software you agree to this license.
* If you do not agree to this license, do not download, install,
* copy or use the software.
*
*
* License Agreement
* For Open Source Computer Vision Library
* (3-clause BSD License)
*
* Copyright (C) 2015, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the names of the copyright holders nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* This software is provided by the copyright holders and contributors "as is" and
* any express or implied warranties, including, but not limited to, the implied
* warranties of merchantability and fitness for a particular purpose are disclaimed.
* In no event shall copyright holders or contributors be liable for any direct,
* indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort (including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*/
#include "remap.hpp"
namespace CAROTENE_NS {
bool isWarpPerspectiveNearestNeighborSupported(const Size2D &ssize)
{
#if SIZE_MAX > UINT32_MAX
return !(ssize.width > 0xffffFFFF || ssize.height > 0xffffFFFF) && // Restrict image size since internal index evaluation
// is performed with u32
isSupportedConfiguration();
#else
(void)ssize;
return isSupportedConfiguration();
#endif
}
bool isWarpPerspectiveLinearSupported(const Size2D &ssize)
{
#if SIZE_MAX > UINT32_MAX
return !(ssize.width > 0xffffFFFF || ssize.height > 0xffffFFFF) && // Restrict image size since internal index evaluation
// is performed with u32
isSupportedConfiguration();
#else
(void)ssize;
return isSupportedConfiguration();
#endif
}
void warpPerspectiveNearestNeighbor(const Size2D &ssize, const Size2D &dsize,
const u8 * srcBase, ptrdiff_t srcStride,
const f32 * m,
u8 * dstBase, ptrdiff_t dstStride,
BORDER_MODE borderMode, u8 borderValue)
{
internal::assertSupportedConfiguration(isWarpPerspectiveNearestNeighborSupported(ssize));
#ifdef CAROTENE_NEON
using namespace internal;
s32 _map[BLOCK_SIZE * BLOCK_SIZE + 16];
s32 * map = alignPtr(_map, 16);
int32x4_t v_width4 = vdupq_n_s32(ssize.width - 1), v_height4 = vdupq_n_s32(ssize.height - 1);
int32x4_t v_step4 = vdupq_n_s32(srcStride);
float32x4_t v_4 = vdupq_n_f32(4.0f);
float32x4_t v_m0 = vdupq_n_f32(m[0]);
float32x4_t v_m1 = vdupq_n_f32(m[1]);
float32x4_t v_m2 = vdupq_n_f32(m[2]);
float32x4_t v_m3 = vdupq_n_f32(m[3]);
float32x4_t v_m4 = vdupq_n_f32(m[4]);
float32x4_t v_m5 = vdupq_n_f32(m[5]);
float32x4_t v_m6 = vdupq_n_f32(m[6]);
float32x4_t v_m7 = vdupq_n_f32(m[7]);
float32x4_t v_m8 = vdupq_n_f32(m[8]);
if (borderMode == BORDER_MODE_REPLICATE)
{
int32x4_t v_zero4 = vdupq_n_s32(0);
for (size_t i = 0; i < dsize.height; i += BLOCK_SIZE)
{
size_t blockHeight = std::min<size_t>(BLOCK_SIZE, dsize.height - i);
for (size_t j = 0; j < dsize.width; j += BLOCK_SIZE)
{
size_t blockWidth = std::min<size_t>(BLOCK_SIZE, dsize.width - j);
// compute table
for (size_t y = 0; y < blockHeight; ++y)
{
s32 * map_row = getRowPtr(&map[0], blockWidth * sizeof(s32), y);
size_t x = 0, y_ = y + i;
f32 indeces[4] = { j + 0.0f, j + 1.0f, j + 2.0f, j + 3.0f };
float32x4_t v_x = vld1q_f32(indeces), v_y = vdupq_n_f32(y_);
float32x4_t v_yx = vmlaq_f32(v_m6, v_m3, v_y), v_yy = vmlaq_f32(v_m7, v_m4, v_y),
v_yw = vmlaq_f32(v_m8, v_m5, v_y);
for ( ; x + 4 <= blockWidth; x += 4)
{
float32x4_t v_src_xf = vmlaq_f32(v_yx, v_m0, v_x);
float32x4_t v_src_yf = vmlaq_f32(v_yy, v_m1, v_x);
float32x4_t v_wf = vrecpq_f32(vmlaq_f32(v_yw, v_m2, v_x));
v_src_xf = vmulq_f32(v_wf, v_src_xf);
v_src_yf = vmulq_f32(v_wf, v_src_yf);
int32x4_t v_src_x = vmaxq_s32(v_zero4, vminq_s32(v_width4, vcvtq_s32_f32(v_src_xf)));
int32x4_t v_src_y = vmaxq_s32(v_zero4, vminq_s32(v_height4, vcvtq_s32_f32(v_src_yf)));
int32x4_t v_src_index = vmlaq_s32(v_src_x, v_src_y, v_step4);
vst1q_s32(map_row + x, v_src_index);
v_x = vaddq_f32(v_x, v_4);
}
f32 yx = m[3] * y_ + m[6], yy = m[4] * y_ + m[7], yw = m[5] * y_ + m[8];
for (ptrdiff_t x_ = x + j; x < blockWidth; ++x, ++x_)
{
f32 w_f = 1.0f / (m[2] * x_ + yw);
f32 src_x_f = (m[0] * x_ + yx) * w_f;
f32 src_y_f = (m[1] * x_ + yy) * w_f;
s32 src_x = floorf(src_x_f), src_y = floorf(src_y_f);
src_x = std::max(0, std::min<s32>(ssize.width - 1, src_x));
src_y = std::max(0, std::min<s32>(ssize.height - 1, src_y));
map_row[x] = src_y * srcStride + src_x;
}
}
// make remap
remapNearestNeighborReplicate(Size2D(blockWidth, blockHeight), srcBase, &map[0],
getRowPtr(dstBase, dstStride, i) + j, dstStride);
}
}
}
else if (borderMode == BORDER_MODE_CONSTANT)
{
int32x4_t v_m1_4 = vdupq_n_s32(-1);
float32x4_t v_zero4 = vdupq_n_f32(0.0f);
for (size_t i = 0; i < dsize.height; i += BLOCK_SIZE)
{
size_t blockHeight = std::min<size_t>(BLOCK_SIZE, dsize.height - i);
for (size_t j = 0; j < dsize.width; j += BLOCK_SIZE)
{
size_t blockWidth = std::min<size_t>(BLOCK_SIZE, dsize.width - j);
// compute table
for (size_t y = 0; y < blockHeight; ++y)
{
s32 * map_row = getRowPtr(&map[0], blockWidth * sizeof(s32), y);
size_t x = 0, y_ = y + i;
f32 indeces[4] = { j + 0.0f, j + 1.0f, j + 2.0f, j + 3.0f };
float32x4_t v_x = vld1q_f32(indeces), v_y = vdupq_n_f32(y_);
float32x4_t v_yx = vmlaq_f32(v_m6, v_m3, v_y), v_yy = vmlaq_f32(v_m7, v_m4, v_y),
v_yw = vmlaq_f32(v_m8, v_m5, v_y);
for ( ; x + 4 <= blockWidth; x += 4)
{
float32x4_t v_src_xf = vmlaq_f32(v_yx, v_m0, v_x);
float32x4_t v_src_yf = vmlaq_f32(v_yy, v_m1, v_x);
float32x4_t v_wf = vrecpq_f32(vmlaq_f32(v_yw, v_m2, v_x));
v_src_xf = vmulq_f32(v_wf, v_src_xf);
v_src_yf = vmulq_f32(v_wf, v_src_yf);
int32x4_t v_src_x = vcvtq_s32_f32(v_src_xf);
int32x4_t v_src_y = vcvtq_s32_f32(v_src_yf);
uint32x4_t v_mask = vandq_u32(vandq_u32(vcgeq_f32(v_src_xf, v_zero4), vcleq_s32(v_src_x, v_width4)),
vandq_u32(vcgeq_f32(v_src_yf, v_zero4), vcleq_s32(v_src_y, v_height4)));
int32x4_t v_src_index = vbslq_s32(v_mask, vmlaq_s32(v_src_x, v_src_y, v_step4), v_m1_4);
vst1q_s32(map_row + x, v_src_index);
v_x = vaddq_f32(v_x, v_4);
}
f32 yx = m[3] * y_ + m[6], yy = m[4] * y_ + m[7], yw = m[5] * y_ + m[8];
for (ptrdiff_t x_ = x + j; x < blockWidth; ++x, ++x_)
{
f32 w_f = 1.0f / (m[2] * x_ + yw);
f32 src_x_f = (m[0] * x_ + yx) * w_f;
f32 src_y_f = (m[1] * x_ + yy) * w_f;
s32 src_x = floorf(src_x_f), src_y = floorf(src_y_f);
map_row[x] = (src_x >= 0) && (src_x < (s32)ssize.width) &&
(src_y >= 0) && (src_y < (s32)ssize.height) ? src_y * srcStride + src_x : -1;
}
}
// make remap
remapNearestNeighborConst(Size2D(blockWidth, blockHeight), srcBase, &map[0],
getRowPtr(dstBase, dstStride, i) + j, dstStride, borderValue);
}
}
}
#else
(void)ssize;
(void)dsize;
(void)srcBase;
(void)srcStride;
(void)m;
(void)dstBase;
(void)dstStride;
(void)borderMode;
(void)borderValue;
#endif
}
void warpPerspectiveLinear(const Size2D &ssize, const Size2D &dsize,
const u8 * srcBase, ptrdiff_t srcStride,
const f32 * m,
u8 * dstBase, ptrdiff_t dstStride,
BORDER_MODE borderMode, u8 borderValue)
{
internal::assertSupportedConfiguration(isWarpPerspectiveLinearSupported(ssize));
#ifdef CAROTENE_NEON
using namespace internal;
s32 _map[((BLOCK_SIZE * BLOCK_SIZE) << 2) + 16];
f32 _coeffs[((BLOCK_SIZE * BLOCK_SIZE) << 1) + 16];
s32 * map = alignPtr(_map, 16);
f32 * coeffs = alignPtr(_coeffs, 16);
int32x4_t v_width4 = vdupq_n_s32(ssize.width - 1), v_height4 = vdupq_n_s32(ssize.height - 1);
int32x4_t v_step4 = vdupq_n_s32(srcStride), v_1 = vdupq_n_s32(1);
float32x4_t v_zero4f = vdupq_n_f32(0.0f), v_one4f = vdupq_n_f32(1.0f);
float32x4_t v_4 = vdupq_n_f32(4.0f);
float32x4_t v_m0 = vdupq_n_f32(m[0]);
float32x4_t v_m1 = vdupq_n_f32(m[1]);
float32x4_t v_m2 = vdupq_n_f32(m[2]);
float32x4_t v_m3 = vdupq_n_f32(m[3]);
float32x4_t v_m4 = vdupq_n_f32(m[4]);
float32x4_t v_m5 = vdupq_n_f32(m[5]);
float32x4_t v_m6 = vdupq_n_f32(m[6]);
float32x4_t v_m7 = vdupq_n_f32(m[7]);
float32x4_t v_m8 = vdupq_n_f32(m[8]);
if (borderMode == BORDER_MODE_REPLICATE)
{
int32x4_t v_zero4 = vdupq_n_s32(0);
for (size_t i = 0; i < dsize.height; i += BLOCK_SIZE)
{
size_t blockHeight = std::min<size_t>(BLOCK_SIZE, dsize.height - i);
for (size_t j = 0; j < dsize.width; j += BLOCK_SIZE)
{
size_t blockWidth = std::min<size_t>(BLOCK_SIZE, dsize.width - j);
// compute table
for (size_t y = 0; y < blockHeight; ++y)
{
s32 * map_row = getRowPtr(map, blockWidth * sizeof(s32) * 4, y);
f32 * coeff_row = getRowPtr(coeffs, blockWidth * sizeof(f32) * 2, y);
size_t x = 0, y_ = y + i;
f32 indeces[4] = { j + 0.0f, j + 1.0f, j + 2.0f, j + 3.0f };
float32x4_t v_x = vld1q_f32(indeces), v_y = vdupq_n_f32(y_);
float32x4_t v_yx = vmlaq_f32(v_m6, v_m3, v_y), v_yy = vmlaq_f32(v_m7, v_m4, v_y),
v_yw = vmlaq_f32(v_m8, v_m5, v_y);
for ( ; x + 4 <= blockWidth; x += 4)
{
float32x4_t v_src_xf = vmlaq_f32(v_yx, v_m0, v_x);
float32x4_t v_src_yf = vmlaq_f32(v_yy, v_m1, v_x);
float32x4_t v_wf = vrecpq_f32(vmlaq_f32(v_yw, v_m2, v_x));
v_src_xf = vmulq_f32(v_wf, v_src_xf);
v_src_yf = vmulq_f32(v_wf, v_src_yf);
int32x4_t v_src_x = vcvtq_s32_f32(v_src_xf);
int32x4_t v_src_y = vcvtq_s32_f32(v_src_yf);
float32x4x2_t v_coeff;
v_coeff.val[0] = vsubq_f32(v_src_xf, vcvtq_f32_s32(v_src_x));
v_coeff.val[1] = vsubq_f32(v_src_yf, vcvtq_f32_s32(v_src_y));
uint32x4_t v_maskx = vcltq_f32(v_coeff.val[0], v_zero4f);
uint32x4_t v_masky = vcltq_f32(v_coeff.val[1], v_zero4f);
v_coeff.val[0] = vbslq_f32(v_maskx, vaddq_f32(v_one4f, v_coeff.val[0]), v_coeff.val[0]);
v_coeff.val[1] = vbslq_f32(v_masky, vaddq_f32(v_one4f, v_coeff.val[1]), v_coeff.val[1]);
v_src_x = vbslq_s32(v_maskx, vsubq_s32(v_src_x, v_1), v_src_x);
v_src_y = vbslq_s32(v_masky, vsubq_s32(v_src_y, v_1), v_src_y);
int32x4_t v_dst0_x = vmaxq_s32(v_zero4, vminq_s32(v_width4, v_src_x));
int32x4_t v_dst0_y = vmaxq_s32(v_zero4, vminq_s32(v_height4, v_src_y));
int32x4_t v_dst1_x = vmaxq_s32(v_zero4, vminq_s32(v_width4, vaddq_s32(v_1, v_src_x)));
int32x4_t v_dst1_y = vmaxq_s32(v_zero4, vminq_s32(v_height4, vaddq_s32(v_1, v_src_y)));
int32x4x4_t v_dst_index;
v_dst_index.val[0] = vmlaq_s32(v_dst0_x, v_dst0_y, v_step4);
v_dst_index.val[1] = vmlaq_s32(v_dst1_x, v_dst0_y, v_step4);
v_dst_index.val[2] = vmlaq_s32(v_dst0_x, v_dst1_y, v_step4);
v_dst_index.val[3] = vmlaq_s32(v_dst1_x, v_dst1_y, v_step4);
vst2q_f32(coeff_row + (x << 1), v_coeff);
vst4q_s32(map_row + (x << 2), v_dst_index);
v_x = vaddq_f32(v_x, v_4);
}
f32 yx = m[3] * y_ + m[6], yy = m[4] * y_ + m[7], yw = m[5] * y_ + m[8];
for (ptrdiff_t x_ = x + j; x < blockWidth; ++x, ++x_)
{
f32 w_f = 1.0f / (m[2] * x_ + yw);
f32 src_x_f = (m[0] * x_ + yx) * w_f;
f32 src_y_f = (m[1] * x_ + yy) * w_f;
s32 src0_x = (s32)floorf(src_x_f);
s32 src0_y = (s32)floorf(src_y_f);
coeff_row[(x << 1) + 0] = src_x_f - src0_x;
coeff_row[(x << 1) + 1] = src_y_f - src0_y;
s32 src1_y = std::max(0, std::min<s32>(ssize.height - 1, src0_y + 1));
src0_y = std::max(0, std::min<s32>(ssize.height - 1, src0_y));
s32 src1_x = std::max(0, std::min<s32>(ssize.width - 1, src0_x + 1));
src0_x = std::max(0, std::min<s32>(ssize.width - 1, src0_x));
map_row[(x << 2) + 0] = src0_y * srcStride + src0_x;
map_row[(x << 2) + 1] = src0_y * srcStride + src1_x;
map_row[(x << 2) + 2] = src1_y * srcStride + src0_x;
map_row[(x << 2) + 3] = src1_y * srcStride + src1_x;
}
}
remapLinearReplicate(Size2D(blockWidth, blockHeight),
srcBase, &map[0], &coeffs[0],
getRowPtr(dstBase, dstStride, i) + j, dstStride);
}
}
}
else if (borderMode == BORDER_MODE_CONSTANT)
{
float32x4_t v_zero4 = vdupq_n_f32(0.0f);
int32x4_t v_m1_4 = vdupq_n_s32(-1);
for (size_t i = 0; i < dsize.height; i += BLOCK_SIZE)
{
size_t blockHeight = std::min<size_t>(BLOCK_SIZE, dsize.height - i);
for (size_t j = 0; j < dsize.width; j += BLOCK_SIZE)
{
size_t blockWidth = std::min<size_t>(BLOCK_SIZE, dsize.width - j);
// compute table
for (size_t y = 0; y < blockHeight; ++y)
{
s32 * map_row = getRowPtr(map, blockWidth * sizeof(s32) * 4, y);
f32 * coeff_row = getRowPtr(coeffs, blockWidth * sizeof(f32) * 2, y);
size_t x = 0, y_ = y + i;
f32 indeces[4] = { j + 0.0f, j + 1.0f, j + 2.0f, j + 3.0f };
float32x4_t v_x = vld1q_f32(indeces), v_y = vdupq_n_f32(y_);
float32x4_t v_yx = vmlaq_f32(v_m6, v_m3, v_y), v_yy = vmlaq_f32(v_m7, v_m4, v_y),
v_yw = vmlaq_f32(v_m8, v_m5, v_y);
for ( ; x + 4 <= blockWidth; x += 4)
{
float32x4_t v_src_xf = vmlaq_f32(v_yx, v_m0, v_x);
float32x4_t v_src_yf = vmlaq_f32(v_yy, v_m1, v_x);
float32x4_t v_wf = vrecpq_f32(vmlaq_f32(v_yw, v_m2, v_x));
v_src_xf = vmulq_f32(v_wf, v_src_xf);
v_src_yf = vmulq_f32(v_wf, v_src_yf);
int32x4_t v_src_x0 = vcvtq_s32_f32(v_src_xf);
int32x4_t v_src_y0 = vcvtq_s32_f32(v_src_yf);
float32x4x2_t v_coeff;
v_coeff.val[0] = vsubq_f32(v_src_xf, vcvtq_f32_s32(v_src_x0));
v_coeff.val[1] = vsubq_f32(v_src_yf, vcvtq_f32_s32(v_src_y0));
uint32x4_t v_maskx = vcltq_f32(v_coeff.val[0], v_zero4f);
uint32x4_t v_masky = vcltq_f32(v_coeff.val[1], v_zero4f);
v_coeff.val[0] = vbslq_f32(v_maskx, vaddq_f32(v_one4f, v_coeff.val[0]), v_coeff.val[0]);
v_coeff.val[1] = vbslq_f32(v_masky, vaddq_f32(v_one4f, v_coeff.val[1]), v_coeff.val[1]);
v_src_x0 = vbslq_s32(v_maskx, vsubq_s32(v_src_x0, v_1), v_src_x0);
v_src_y0 = vbslq_s32(v_masky, vsubq_s32(v_src_y0, v_1), v_src_y0);
int32x4_t v_src_x1 = vaddq_s32(v_src_x0, v_1);
int32x4_t v_src_y1 = vaddq_s32(v_src_y0, v_1);
int32x4x4_t v_dst_index;
v_dst_index.val[0] = vmlaq_s32(v_src_x0, v_src_y0, v_step4);
v_dst_index.val[1] = vmlaq_s32(v_src_x1, v_src_y0, v_step4);
v_dst_index.val[2] = vmlaq_s32(v_src_x0, v_src_y1, v_step4);
v_dst_index.val[3] = vmlaq_s32(v_src_x1, v_src_y1, v_step4);
uint32x4_t v_mask_x0 = vandq_u32(vcgeq_f32(v_src_xf, v_zero4), vcleq_s32(v_src_x0, v_width4));
uint32x4_t v_mask_x1 = vandq_u32(vcgeq_f32(vaddq_f32(v_src_xf, v_one4f), v_zero4), vcleq_s32(v_src_x1, v_width4));
uint32x4_t v_mask_y0 = vandq_u32(vcgeq_f32(v_src_yf, v_zero4), vcleq_s32(v_src_y0, v_height4));
uint32x4_t v_mask_y1 = vandq_u32(vcgeq_f32(vaddq_f32(v_src_yf, v_one4f), v_zero4), vcleq_s32(v_src_y1, v_height4));
v_dst_index.val[0] = vbslq_s32(vandq_u32(v_mask_x0, v_mask_y0), v_dst_index.val[0], v_m1_4);
v_dst_index.val[1] = vbslq_s32(vandq_u32(v_mask_x1, v_mask_y0), v_dst_index.val[1], v_m1_4);
v_dst_index.val[2] = vbslq_s32(vandq_u32(v_mask_x0, v_mask_y1), v_dst_index.val[2], v_m1_4);
v_dst_index.val[3] = vbslq_s32(vandq_u32(v_mask_x1, v_mask_y1), v_dst_index.val[3], v_m1_4);
vst2q_f32(coeff_row + (x << 1), v_coeff);
vst4q_s32(map_row + (x << 2), v_dst_index);
v_x = vaddq_f32(v_x, v_4);
}
f32 yx = m[3] * y_ + m[6], yy = m[4] * y_ + m[7], yw = m[5] * y_ + m[8];
for (ptrdiff_t x_ = x + j; x < blockWidth; ++x, ++x_)
{
f32 w_f = 1.0f / (m[2] * x_ + yw);
f32 src_x_f = (m[0] * x_ + yx) * w_f;
f32 src_y_f = (m[1] * x_ + yy) * w_f;
s32 src0_x = (s32)floorf(src_x_f), src1_x = src0_x + 1;
s32 src0_y = (s32)floorf(src_y_f), src1_y = src0_y + 1;
coeff_row[(x << 1) + 0] = src_x_f - src0_x;
coeff_row[(x << 1) + 1] = src_y_f - src0_y;
map_row[(x << 2) + 0] = (src0_x >= 0) && (src0_x < (s32)ssize.width) &&
(src0_y >= 0) && (src0_y < (s32)ssize.height) ? src0_y * srcStride + src0_x : -1;
map_row[(x << 2) + 1] = (src1_x >= 0) && (src1_x < (s32)ssize.width) &&
(src0_y >= 0) && (src0_y < (s32)ssize.height) ? src0_y * srcStride + src1_x : -1;
map_row[(x << 2) + 2] = (src0_x >= 0) && (src0_x < (s32)ssize.width) &&
(src1_y >= 0) && (src1_y < (s32)ssize.height) ? src1_y * srcStride + src0_x : -1;
map_row[(x << 2) + 3] = (src1_x >= 0) && (src1_x < (s32)ssize.width) &&
(src1_y >= 0) && (src1_y < (s32)ssize.height) ? src1_y * srcStride + src1_x : -1;
}
}
remapLinearConst(Size2D(blockWidth, blockHeight),
srcBase, &map[0], &coeffs[0],
getRowPtr(dstBase, dstStride, i) + j, dstStride, borderValue);
}
}
}
#else
(void)ssize;
(void)dsize;
(void)srcBase;
(void)srcStride;
(void)m;
(void)dstBase;
(void)dstStride;
(void)borderMode;
(void)borderValue;
#endif
}
} // namespace CAROTENE_NS
if(NOT ANDROID)
message("cpufeatures is ANDROID project")
endif()
ocv_update(OPENCV_CPUFEATURES_TARGET_NAME libcpufeatures)
set(CPUFEATURES_ROOT "${CMAKE_CURRENT_SOURCE_DIR}" CACHE PATH "Android cpufeatures project sources (for example, <android-ndk>/sources/android/cpufeatures)")
set(CPUFEATURES_INCLUDE_DIRS ${CPUFEATURES_ROOT} CACHE INTERNAL "")
set(CPUFEATURES_LIBRARIES "${OPENCV_CPUFEATURES_TARGET_NAME}" CACHE INTERNAL "")
if(NOT DEFINED CPUFEATURES_SOURCES)
set(CPUFEATURES_SOURCES ${CPUFEATURES_ROOT}/cpu-features.c ${CPUFEATURES_ROOT}/cpu-features.h)
endif()
include_directories(${CPUFEATURES_INCLUDE_DIRS})
add_library(${OPENCV_CPUFEATURES_TARGET_NAME} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${CPUFEATURES_SOURCES})
set_target_properties(${OPENCV_CPUFEATURES_TARGET_NAME}
PROPERTIES OUTPUT_NAME cpufeatures
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
COMPILE_PDB_NAME cpufeatures
COMPILE_PDB_NAME_DEBUG "cpufeatures${OPENCV_DEBUG_POSTFIX}"
ARCHIVE_OUTPUT_DIRECTORY ${3P_LIBRARY_OUTPUT_PATH}
)
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${OPENCV_CPUFEATURES_TARGET_NAME} PROPERTIES FOLDER "3rdparty")
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${OPENCV_CPUFEATURES_TARGET_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(cpufeatures LICENSE README.md)
Copyright (C) 2016 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The Android NDK provides a small library named cpufeatures that your app can use at runtime to detect the target device's CPU family and the optional features it supports.
It is designed to work as-is on all official Android platform versions.
https://developer.android.com/ndk/guides/cpu-features.html
/*
* Copyright (C) 2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* ChangeLog for this library:
*
* NDK r10e?: Add MIPS MSA feature.
*
* NDK r10: Support for 64-bit CPUs (Intel, ARM & MIPS).
*
* NDK r8d: Add android_setCpu().
*
* NDK r8c: Add new ARM CPU features: VFPv2, VFP_D32, VFP_FP16,
* VFP_FMA, NEON_FMA, IDIV_ARM, IDIV_THUMB2 and iWMMXt.
*
* Rewrite the code to parse /proc/self/auxv instead of
* the "Features" field in /proc/cpuinfo.
*
* Dynamically allocate the buffer that hold the content
* of /proc/cpuinfo to deal with newer hardware.
*
* NDK r7c: Fix CPU count computation. The old method only reported the
* number of _active_ CPUs when the library was initialized,
* which could be less than the real total.
*
* NDK r5: Handle buggy kernels which report a CPU Architecture number of 7
* for an ARMv6 CPU (see below).
*
* Handle kernels that only report 'neon', and not 'vfpv3'
* (VFPv3 is mandated by the ARM architecture is Neon is implemented)
*
* Handle kernels that only report 'vfpv3d16', and not 'vfpv3'
*
* Fix x86 compilation. Report ANDROID_CPU_FAMILY_X86 in
* android_getCpuFamily().
*
* NDK r4: Initial release
*/
#include "cpu-features.h"
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/system_properties.h>
#include <unistd.h>
static pthread_once_t g_once;
static int g_inited;
static AndroidCpuFamily g_cpuFamily;
static uint64_t g_cpuFeatures;
static int g_cpuCount;
#ifdef __arm__
static uint32_t g_cpuIdArm;
#endif
static const int android_cpufeatures_debug = 0;
#define D(...) \
do { \
if (android_cpufeatures_debug) { \
printf(__VA_ARGS__); fflush(stdout); \
} \
} while (0)
#ifdef __i386__
static __inline__ void x86_cpuid(int func, int values[4])
{
int a, b, c, d;
/* We need to preserve ebx since we're compiling PIC code */
/* this means we can't use "=b" for the second output register */
__asm__ __volatile__ ( \
"push %%ebx\n"
"cpuid\n" \
"mov %%ebx, %1\n"
"pop %%ebx\n"
: "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
: "a" (func) \
);
values[0] = a;
values[1] = b;
values[2] = c;
values[3] = d;
}
#elif defined(__x86_64__)
static __inline__ void x86_cpuid(int func, int values[4])
{
int64_t a, b, c, d;
/* We need to preserve ebx since we're compiling PIC code */
/* this means we can't use "=b" for the second output register */
__asm__ __volatile__ ( \
"push %%rbx\n"
"cpuid\n" \
"mov %%rbx, %1\n"
"pop %%rbx\n"
: "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
: "a" (func) \
);
values[0] = a;
values[1] = b;
values[2] = c;
values[3] = d;
}
#endif
/* Get the size of a file by reading it until the end. This is needed
* because files under /proc do not always return a valid size when
* using fseek(0, SEEK_END) + ftell(). Nor can they be mmap()-ed.
*/
static int
get_file_size(const char* pathname)
{
int fd, result = 0;
char buffer[256];
fd = open(pathname, O_RDONLY);
if (fd < 0) {
D("Can't open %s: %s\n", pathname, strerror(errno));
return -1;
}
for (;;) {
int ret = read(fd, buffer, sizeof buffer);
if (ret < 0) {
if (errno == EINTR)
continue;
D("Error while reading %s: %s\n", pathname, strerror(errno));
break;
}
if (ret == 0)
break;
result += ret;
}
close(fd);
return result;
}
/* Read the content of /proc/cpuinfo into a user-provided buffer.
* Return the length of the data, or -1 on error. Does *not*
* zero-terminate the content. Will not read more
* than 'buffsize' bytes.
*/
static int
read_file(const char* pathname, char* buffer, size_t buffsize)
{
int fd, count;
fd = open(pathname, O_RDONLY);
if (fd < 0) {
D("Could not open %s: %s\n", pathname, strerror(errno));
return -1;
}
count = 0;
while (count < (int)buffsize) {
int ret = read(fd, buffer + count, buffsize - count);
if (ret < 0) {
if (errno == EINTR)
continue;
D("Error while reading from %s: %s\n", pathname, strerror(errno));
if (count == 0)
count = -1;
break;
}
if (ret == 0)
break;
count += ret;
}
close(fd);
return count;
}
#ifdef __arm__
/* Extract the content of a the first occurence of a given field in
* the content of /proc/cpuinfo and return it as a heap-allocated
* string that must be freed by the caller.
*
* Return NULL if not found
*/
static char*
extract_cpuinfo_field(const char* buffer, int buflen, const char* field)
{
int fieldlen = strlen(field);
const char* bufend = buffer + buflen;
char* result = NULL;
int len;
const char *p, *q;
/* Look for first field occurence, and ensures it starts the line. */
p = buffer;
for (;;) {
p = memmem(p, bufend-p, field, fieldlen);
if (p == NULL)
goto EXIT;
if (p == buffer || p[-1] == '\n')
break;
p += fieldlen;
}
/* Skip to the first column followed by a space */
p += fieldlen;
p = memchr(p, ':', bufend-p);
if (p == NULL || p[1] != ' ')
goto EXIT;
/* Find the end of the line */
p += 2;
q = memchr(p, '\n', bufend-p);
if (q == NULL)
q = bufend;
/* Copy the line into a heap-allocated buffer */
len = q-p;
result = malloc(len+1);
if (result == NULL)
goto EXIT;
memcpy(result, p, len);
result[len] = '\0';
EXIT:
return result;
}
/* Checks that a space-separated list of items contains one given 'item'.
* Returns 1 if found, 0 otherwise.
*/
static int
has_list_item(const char* list, const char* item)
{
const char* p = list;
int itemlen = strlen(item);
if (list == NULL)
return 0;
while (*p) {
const char* q;
/* skip spaces */
while (*p == ' ' || *p == '\t')
p++;
/* find end of current list item */
q = p;
while (*q && *q != ' ' && *q != '\t')
q++;
if (itemlen == q-p && !memcmp(p, item, itemlen))
return 1;
/* skip to next item */
p = q;
}
return 0;
}
#endif /* __arm__ */
/* Parse a number starting from 'input', but not going further
* than 'limit'. Return the value into '*result'.
*
* NOTE: Does not skip over leading spaces, or deal with sign characters.
* NOTE: Ignores overflows.
*
* The function returns NULL in case of error (bad format), or the new
* position after the decimal number in case of success (which will always
* be <= 'limit').
*/
static const char*
parse_number(const char* input, const char* limit, int base, int* result)
{
const char* p = input;
int val = 0;
while (p < limit) {
int d = (*p - '0');
if ((unsigned)d >= 10U) {
d = (*p - 'a');
if ((unsigned)d >= 6U)
d = (*p - 'A');
if ((unsigned)d >= 6U)
break;
d += 10;
}
if (d >= base)
break;
val = val*base + d;
p++;
}
if (p == input)
return NULL;
*result = val;
return p;
}
static const char*
parse_decimal(const char* input, const char* limit, int* result)
{
return parse_number(input, limit, 10, result);
}
#ifdef __arm__
static const char*
parse_hexadecimal(const char* input, const char* limit, int* result)
{
return parse_number(input, limit, 16, result);
}
#endif /* __arm__ */
/* This small data type is used to represent a CPU list / mask, as read
* from sysfs on Linux. See http://www.kernel.org/doc/Documentation/cputopology.txt
*
* For now, we don't expect more than 32 cores on mobile devices, so keep
* everything simple.
*/
typedef struct {
uint32_t mask;
} CpuList;
static __inline__ void
cpulist_init(CpuList* list) {
list->mask = 0;
}
static __inline__ void
cpulist_and(CpuList* list1, CpuList* list2) {
list1->mask &= list2->mask;
}
static __inline__ void
cpulist_set(CpuList* list, int index) {
if ((unsigned)index < 32) {
list->mask |= (uint32_t)(1U << index);
}
}
static __inline__ int
cpulist_count(CpuList* list) {
return __builtin_popcount(list->mask);
}
/* Parse a textual list of cpus and store the result inside a CpuList object.
* Input format is the following:
* - comma-separated list of items (no spaces)
* - each item is either a single decimal number (cpu index), or a range made
* of two numbers separated by a single dash (-). Ranges are inclusive.
*
* Examples: 0
* 2,4-127,128-143
* 0-1
*/
static void
cpulist_parse(CpuList* list, const char* line, int line_len)
{
const char* p = line;
const char* end = p + line_len;
const char* q;
/* NOTE: the input line coming from sysfs typically contains a
* trailing newline, so take care of it in the code below
*/
while (p < end && *p != '\n')
{
int val, start_value, end_value;
/* Find the end of current item, and put it into 'q' */
q = memchr(p, ',', end-p);
if (q == NULL) {
q = end;
}
/* Get first value */
p = parse_decimal(p, q, &start_value);
if (p == NULL)
goto BAD_FORMAT;
end_value = start_value;
/* If we're not at the end of the item, expect a dash and
* and integer; extract end value.
*/
if (p < q && *p == '-') {
p = parse_decimal(p+1, q, &end_value);
if (p == NULL)
goto BAD_FORMAT;
}
/* Set bits CPU list bits */
for (val = start_value; val <= end_value; val++) {
cpulist_set(list, val);
}
/* Jump to next item */
p = q;
if (p < end)
p++;
}
BAD_FORMAT:
;
}
/* Read a CPU list from one sysfs file */
static void
cpulist_read_from(CpuList* list, const char* filename)
{
char file[64];
int filelen;
cpulist_init(list);
filelen = read_file(filename, file, sizeof file);
if (filelen < 0) {
D("Could not read %s: %s\n", filename, strerror(errno));
return;
}
cpulist_parse(list, file, filelen);
}
#if defined(__aarch64__)
// see <uapi/asm/hwcap.h> kernel header
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
#define HWCAP_AES (1 << 3)
#define HWCAP_PMULL (1 << 4)
#define HWCAP_SHA1 (1 << 5)
#define HWCAP_SHA2 (1 << 6)
#define HWCAP_CRC32 (1 << 7)
#endif
#if defined(__arm__)
// See <asm/hwcap.h> kernel header.
#define HWCAP_VFP (1 << 6)
#define HWCAP_IWMMXT (1 << 9)
#define HWCAP_NEON (1 << 12)
#define HWCAP_VFPv3 (1 << 13)
#define HWCAP_VFPv3D16 (1 << 14)
#define HWCAP_VFPv4 (1 << 16)
#define HWCAP_IDIVA (1 << 17)
#define HWCAP_IDIVT (1 << 18)
// see <uapi/asm/hwcap.h> kernel header
#define HWCAP2_AES (1 << 0)
#define HWCAP2_PMULL (1 << 1)
#define HWCAP2_SHA1 (1 << 2)
#define HWCAP2_SHA2 (1 << 3)
#define HWCAP2_CRC32 (1 << 4)
// This is the list of 32-bit ARMv7 optional features that are _always_
// supported by ARMv8 CPUs, as mandated by the ARM Architecture Reference
// Manual.
#define HWCAP_SET_FOR_ARMV8 \
( HWCAP_VFP | \
HWCAP_NEON | \
HWCAP_VFPv3 | \
HWCAP_VFPv4 | \
HWCAP_IDIVA | \
HWCAP_IDIVT )
#endif
#if defined(__mips__)
// see <uapi/asm/hwcap.h> kernel header
#define HWCAP_MIPS_R6 (1 << 0)
#define HWCAP_MIPS_MSA (1 << 1)
#endif
#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
#define AT_HWCAP 16
#define AT_HWCAP2 26
// Probe the system's C library for a 'getauxval' function and call it if
// it exits, or return 0 for failure. This function is available since API
// level 20.
//
// This code does *NOT* check for '__ANDROID_API__ >= 20' to support the
// edge case where some NDK developers use headers for a platform that is
// newer than the one really targetted by their application.
// This is typically done to use newer native APIs only when running on more
// recent Android versions, and requires careful symbol management.
//
// Note that getauxval() can't really be re-implemented here, because
// its implementation does not parse /proc/self/auxv. Instead it depends
// on values that are passed by the kernel at process-init time to the
// C runtime initialization layer.
#if 1
// OpenCV calls CPU features check during library initialization stage
// (under other dlopen() call).
// Unfortunatelly, calling dlopen() recursively is not supported on some old
// Android versions. Android fix is here:
// - https://android-review.googlesource.com/#/c/32951/
// - GitHub mirror: https://github.com/android/platform_bionic/commit/e19d702b8e330cef87e0983733c427b5f7842144
__attribute__((weak)) unsigned long getauxval(unsigned long); // Lets linker to handle this symbol
static uint32_t
get_elf_hwcap_from_getauxval(int hwcap_type) {
uint32_t ret = 0;
if(getauxval != 0) {
ret = (uint32_t)getauxval(hwcap_type);
} else {
D("getauxval() is not available\n");
}
return ret;
}
#else
static uint32_t
get_elf_hwcap_from_getauxval(int hwcap_type) {
typedef unsigned long getauxval_func_t(unsigned long);
dlerror();
void* libc_handle = dlopen("libc.so", RTLD_NOW);
if (!libc_handle) {
D("Could not dlopen() C library: %s\n", dlerror());
return 0;
}
uint32_t ret = 0;
getauxval_func_t* func = (getauxval_func_t*)
dlsym(libc_handle, "getauxval");
if (!func) {
D("Could not find getauxval() in C library\n");
} else {
// Note: getauxval() returns 0 on failure. Doesn't touch errno.
ret = (uint32_t)(*func)(hwcap_type);
}
dlclose(libc_handle);
return ret;
}
#endif
#endif
#if defined(__arm__)
// Parse /proc/self/auxv to extract the ELF HW capabilities bitmap for the
// current CPU. Note that this file is not accessible from regular
// application processes on some Android platform releases.
// On success, return new ELF hwcaps, or 0 on failure.
static uint32_t
get_elf_hwcap_from_proc_self_auxv(void) {
const char filepath[] = "/proc/self/auxv";
int fd = TEMP_FAILURE_RETRY(open(filepath, O_RDONLY));
if (fd < 0) {
D("Could not open %s: %s\n", filepath, strerror(errno));
return 0;
}
struct { uint32_t tag; uint32_t value; } entry;
uint32_t result = 0;
for (;;) {
int ret = TEMP_FAILURE_RETRY(read(fd, (char*)&entry, sizeof entry));
if (ret < 0) {
D("Error while reading %s: %s\n", filepath, strerror(errno));
break;
}
// Detect end of list.
if (ret == 0 || (entry.tag == 0 && entry.value == 0))
break;
if (entry.tag == AT_HWCAP) {
result = entry.value;
break;
}
}
close(fd);
return result;
}
/* Compute the ELF HWCAP flags from the content of /proc/cpuinfo.
* This works by parsing the 'Features' line, which lists which optional
* features the device's CPU supports, on top of its reference
* architecture.
*/
static uint32_t
get_elf_hwcap_from_proc_cpuinfo(const char* cpuinfo, int cpuinfo_len) {
uint32_t hwcaps = 0;
long architecture = 0;
char* cpuArch = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "CPU architecture");
if (cpuArch) {
architecture = strtol(cpuArch, NULL, 10);
free(cpuArch);
if (architecture >= 8L) {
// This is a 32-bit ARM binary running on a 64-bit ARM64 kernel.
// The 'Features' line only lists the optional features that the
// device's CPU supports, compared to its reference architecture
// which are of no use for this process.
D("Faking 32-bit ARM HWCaps on ARMv%ld CPU\n", architecture);
return HWCAP_SET_FOR_ARMV8;
}
}
char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features");
if (cpuFeatures != NULL) {
D("Found cpuFeatures = '%s'\n", cpuFeatures);
if (has_list_item(cpuFeatures, "vfp"))
hwcaps |= HWCAP_VFP;
if (has_list_item(cpuFeatures, "vfpv3"))
hwcaps |= HWCAP_VFPv3;
if (has_list_item(cpuFeatures, "vfpv3d16"))
hwcaps |= HWCAP_VFPv3D16;
if (has_list_item(cpuFeatures, "vfpv4"))
hwcaps |= HWCAP_VFPv4;
if (has_list_item(cpuFeatures, "neon"))
hwcaps |= HWCAP_NEON;
if (has_list_item(cpuFeatures, "idiva"))
hwcaps |= HWCAP_IDIVA;
if (has_list_item(cpuFeatures, "idivt"))
hwcaps |= HWCAP_IDIVT;
if (has_list_item(cpuFeatures, "idiv"))
hwcaps |= HWCAP_IDIVA | HWCAP_IDIVT;
if (has_list_item(cpuFeatures, "iwmmxt"))
hwcaps |= HWCAP_IWMMXT;
free(cpuFeatures);
}
return hwcaps;
}
#endif /* __arm__ */
/* Return the number of cpus present on a given device.
*
* To handle all weird kernel configurations, we need to compute the
* intersection of the 'present' and 'possible' CPU lists and count
* the result.
*/
static int
get_cpu_count(void)
{
CpuList cpus_present[1];
CpuList cpus_possible[1];
cpulist_read_from(cpus_present, "/sys/devices/system/cpu/present");
cpulist_read_from(cpus_possible, "/sys/devices/system/cpu/possible");
/* Compute the intersection of both sets to get the actual number of
* CPU cores that can be used on this device by the kernel.
*/
cpulist_and(cpus_present, cpus_possible);
return cpulist_count(cpus_present);
}
static void
android_cpuInitFamily(void)
{
#if defined(__arm__)
g_cpuFamily = ANDROID_CPU_FAMILY_ARM;
#elif defined(__i386__)
g_cpuFamily = ANDROID_CPU_FAMILY_X86;
#elif defined(__mips64)
/* Needs to be before __mips__ since the compiler defines both */
g_cpuFamily = ANDROID_CPU_FAMILY_MIPS64;
#elif defined(__mips__)
g_cpuFamily = ANDROID_CPU_FAMILY_MIPS;
#elif defined(__aarch64__)
g_cpuFamily = ANDROID_CPU_FAMILY_ARM64;
#elif defined(__x86_64__)
g_cpuFamily = ANDROID_CPU_FAMILY_X86_64;
#else
g_cpuFamily = ANDROID_CPU_FAMILY_UNKNOWN;
#endif
}
static void
android_cpuInit(void)
{
char* cpuinfo = NULL;
int cpuinfo_len;
android_cpuInitFamily();
g_cpuFeatures = 0;
g_cpuCount = 1;
g_inited = 1;
cpuinfo_len = get_file_size("/proc/cpuinfo");
if (cpuinfo_len < 0) {
D("cpuinfo_len cannot be computed!");
return;
}
cpuinfo = malloc(cpuinfo_len);
if (cpuinfo == NULL) {
D("cpuinfo buffer could not be allocated");
return;
}
cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, cpuinfo_len);
D("cpuinfo_len is (%d):\n%.*s\n", cpuinfo_len,
cpuinfo_len >= 0 ? cpuinfo_len : 0, cpuinfo);
if (cpuinfo_len < 0) /* should not happen */ {
free(cpuinfo);
return;
}
/* Count the CPU cores, the value may be 0 for single-core CPUs */
g_cpuCount = get_cpu_count();
if (g_cpuCount == 0) {
g_cpuCount = 1;
}
D("found cpuCount = %d\n", g_cpuCount);
#ifdef __arm__
{
/* Extract architecture from the "CPU Architecture" field.
* The list is well-known, unlike the the output of
* the 'Processor' field which can vary greatly.
*
* See the definition of the 'proc_arch' array in
* $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
* same file.
*/
char* cpuArch = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "CPU architecture");
if (cpuArch != NULL) {
char* end;
long archNumber;
int hasARMv7 = 0;
D("found cpuArch = '%s'\n", cpuArch);
/* read the initial decimal number, ignore the rest */
archNumber = strtol(cpuArch, &end, 10);
/* Note that ARMv8 is upwards compatible with ARMv7. */
if (end > cpuArch && archNumber >= 7) {
hasARMv7 = 1;
}
/* Unfortunately, it seems that certain ARMv6-based CPUs
* report an incorrect architecture number of 7!
*
* See http://code.google.com/p/android/issues/detail?id=10812
*
* We try to correct this by looking at the 'elf_format'
* field reported by the 'Processor' field, which is of the
* form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
* an ARMv6-one.
*/
if (hasARMv7) {
char* cpuProc = extract_cpuinfo_field(cpuinfo, cpuinfo_len,
"Processor");
if (cpuProc != NULL) {
D("found cpuProc = '%s'\n", cpuProc);
if (has_list_item(cpuProc, "(v6l)")) {
D("CPU processor and architecture mismatch!!\n");
hasARMv7 = 0;
}
free(cpuProc);
}
}
if (hasARMv7) {
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_ARMv7;
}
/* The LDREX / STREX instructions are available from ARMv6 */
if (archNumber >= 6) {
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_LDREX_STREX;
}
free(cpuArch);
}
/* Extract the list of CPU features from ELF hwcaps */
uint32_t hwcaps = 0;
hwcaps = get_elf_hwcap_from_getauxval(AT_HWCAP);
if (!hwcaps) {
D("Parsing /proc/self/auxv to extract ELF hwcaps!\n");
hwcaps = get_elf_hwcap_from_proc_self_auxv();
}
if (!hwcaps) {
// Parsing /proc/self/auxv will fail from regular application
// processes on some Android platform versions, when this happens
// parse proc/cpuinfo instead.
D("Parsing /proc/cpuinfo to extract ELF hwcaps!\n");
hwcaps = get_elf_hwcap_from_proc_cpuinfo(cpuinfo, cpuinfo_len);
}
if (hwcaps != 0) {
int has_vfp = (hwcaps & HWCAP_VFP);
int has_vfpv3 = (hwcaps & HWCAP_VFPv3);
int has_vfpv3d16 = (hwcaps & HWCAP_VFPv3D16);
int has_vfpv4 = (hwcaps & HWCAP_VFPv4);
int has_neon = (hwcaps & HWCAP_NEON);
int has_idiva = (hwcaps & HWCAP_IDIVA);
int has_idivt = (hwcaps & HWCAP_IDIVT);
int has_iwmmxt = (hwcaps & HWCAP_IWMMXT);
// The kernel does a poor job at ensuring consistency when
// describing CPU features. So lots of guessing is needed.
// 'vfpv4' implies VFPv3|VFP_FMA|FP16
if (has_vfpv4)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3 |
ANDROID_CPU_ARM_FEATURE_VFP_FP16 |
ANDROID_CPU_ARM_FEATURE_VFP_FMA;
// 'vfpv3' or 'vfpv3d16' imply VFPv3. Note that unlike GCC,
// a value of 'vfpv3' doesn't necessarily mean that the D32
// feature is present, so be conservative. All CPUs in the
// field that support D32 also support NEON, so this should
// not be a problem in practice.
if (has_vfpv3 || has_vfpv3d16)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3;
// 'vfp' is super ambiguous. Depending on the kernel, it can
// either mean VFPv2 or VFPv3. Make it depend on ARMv7.
if (has_vfp) {
if (g_cpuFeatures & ANDROID_CPU_ARM_FEATURE_ARMv7)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3;
else
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv2;
}
// Neon implies VFPv3|D32, and if vfpv4 is detected, NEON_FMA
if (has_neon) {
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3 |
ANDROID_CPU_ARM_FEATURE_NEON |
ANDROID_CPU_ARM_FEATURE_VFP_D32;
if (has_vfpv4)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_NEON_FMA;
}
// VFPv3 implies VFPv2 and ARMv7
if (g_cpuFeatures & ANDROID_CPU_ARM_FEATURE_VFPv3)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv2 |
ANDROID_CPU_ARM_FEATURE_ARMv7;
if (has_idiva)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_IDIV_ARM;
if (has_idivt)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_IDIV_THUMB2;
if (has_iwmmxt)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_iWMMXt;
}
/* Extract the list of CPU features from ELF hwcaps2 */
uint32_t hwcaps2 = 0;
hwcaps2 = get_elf_hwcap_from_getauxval(AT_HWCAP2);
if (hwcaps2 != 0) {
int has_aes = (hwcaps2 & HWCAP2_AES);
int has_pmull = (hwcaps2 & HWCAP2_PMULL);
int has_sha1 = (hwcaps2 & HWCAP2_SHA1);
int has_sha2 = (hwcaps2 & HWCAP2_SHA2);
int has_crc32 = (hwcaps2 & HWCAP2_CRC32);
if (has_aes)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_AES;
if (has_pmull)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_PMULL;
if (has_sha1)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_SHA1;
if (has_sha2)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_SHA2;
if (has_crc32)
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_CRC32;
}
/* Extract the cpuid value from various fields */
// The CPUID value is broken up in several entries in /proc/cpuinfo.
// This table is used to rebuild it from the entries.
static const struct CpuIdEntry {
const char* field;
char format;
char bit_lshift;
char bit_length;
} cpu_id_entries[] = {
{ "CPU implementer", 'x', 24, 8 },
{ "CPU variant", 'x', 20, 4 },
{ "CPU part", 'x', 4, 12 },
{ "CPU revision", 'd', 0, 4 },
};
size_t i;
D("Parsing /proc/cpuinfo to recover CPUID\n");
for (i = 0;
i < sizeof(cpu_id_entries)/sizeof(cpu_id_entries[0]);
++i) {
const struct CpuIdEntry* entry = &cpu_id_entries[i];
char* value = extract_cpuinfo_field(cpuinfo,
cpuinfo_len,
entry->field);
if (value == NULL)
continue;
D("field=%s value='%s'\n", entry->field, value);
char* value_end = value + strlen(value);
int val = 0;
const char* start = value;
const char* p;
if (value[0] == '0' && (value[1] == 'x' || value[1] == 'X')) {
start += 2;
p = parse_hexadecimal(start, value_end, &val);
} else if (entry->format == 'x')
p = parse_hexadecimal(value, value_end, &val);
else
p = parse_decimal(value, value_end, &val);
if (p > (const char*)start) {
val &= ((1 << entry->bit_length)-1);
val <<= entry->bit_lshift;
g_cpuIdArm |= (uint32_t) val;
}
free(value);
}
// Handle kernel configuration bugs that prevent the correct
// reporting of CPU features.
static const struct CpuFix {
uint32_t cpuid;
uint64_t or_flags;
} cpu_fixes[] = {
/* The Nexus 4 (Qualcomm Krait) kernel configuration
* forgets to report IDIV support. */
{ 0x510006f2, ANDROID_CPU_ARM_FEATURE_IDIV_ARM |
ANDROID_CPU_ARM_FEATURE_IDIV_THUMB2 },
{ 0x510006f3, ANDROID_CPU_ARM_FEATURE_IDIV_ARM |
ANDROID_CPU_ARM_FEATURE_IDIV_THUMB2 },
};
size_t n;
for (n = 0; n < sizeof(cpu_fixes)/sizeof(cpu_fixes[0]); ++n) {
const struct CpuFix* entry = &cpu_fixes[n];
if (g_cpuIdArm == entry->cpuid)
g_cpuFeatures |= entry->or_flags;
}
// Special case: The emulator-specific Android 4.2 kernel fails
// to report support for the 32-bit ARM IDIV instruction.
// Technically, this is a feature of the virtual CPU implemented
// by the emulator. Note that it could also support Thumb IDIV
// in the future, and this will have to be slightly updated.
char* hardware = extract_cpuinfo_field(cpuinfo,
cpuinfo_len,
"Hardware");
if (hardware) {
if (!strcmp(hardware, "Goldfish") &&
g_cpuIdArm == 0x4100c080 &&
(g_cpuFamily & ANDROID_CPU_ARM_FEATURE_ARMv7) != 0) {
g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_IDIV_ARM;
}
free(hardware);
}
}
#endif /* __arm__ */
#ifdef __aarch64__
{
/* Extract the list of CPU features from ELF hwcaps */
uint32_t hwcaps = 0;
hwcaps = get_elf_hwcap_from_getauxval(AT_HWCAP);
if (hwcaps != 0) {
int has_fp = (hwcaps & HWCAP_FP);
int has_asimd = (hwcaps & HWCAP_ASIMD);
int has_aes = (hwcaps & HWCAP_AES);
int has_pmull = (hwcaps & HWCAP_PMULL);
int has_sha1 = (hwcaps & HWCAP_SHA1);
int has_sha2 = (hwcaps & HWCAP_SHA2);
int has_crc32 = (hwcaps & HWCAP_CRC32);
if(has_fp == 0) {
D("ERROR: Floating-point unit missing, but is required by Android on AArch64 CPUs\n");
}
if(has_asimd == 0) {
D("ERROR: ASIMD unit missing, but is required by Android on AArch64 CPUs\n");
}
if (has_fp)
g_cpuFeatures |= ANDROID_CPU_ARM64_FEATURE_FP;
if (has_asimd)
g_cpuFeatures |= ANDROID_CPU_ARM64_FEATURE_ASIMD;
if (has_aes)
g_cpuFeatures |= ANDROID_CPU_ARM64_FEATURE_AES;
if (has_pmull)
g_cpuFeatures |= ANDROID_CPU_ARM64_FEATURE_PMULL;
if (has_sha1)
g_cpuFeatures |= ANDROID_CPU_ARM64_FEATURE_SHA1;
if (has_sha2)
g_cpuFeatures |= ANDROID_CPU_ARM64_FEATURE_SHA2;
if (has_crc32)
g_cpuFeatures |= ANDROID_CPU_ARM64_FEATURE_CRC32;
}
}
#endif /* __aarch64__ */
#if defined(__i386__) || defined(__x86_64__)
int regs[4];
/* According to http://en.wikipedia.org/wiki/CPUID */
#define VENDOR_INTEL_b 0x756e6547
#define VENDOR_INTEL_c 0x6c65746e
#define VENDOR_INTEL_d 0x49656e69
x86_cpuid(0, regs);
int vendorIsIntel = (regs[1] == VENDOR_INTEL_b &&
regs[2] == VENDOR_INTEL_c &&
regs[3] == VENDOR_INTEL_d);
x86_cpuid(1, regs);
if ((regs[2] & (1 << 9)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_SSSE3;
}
if ((regs[2] & (1 << 23)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_POPCNT;
}
if ((regs[2] & (1 << 19)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_SSE4_1;
}
if ((regs[2] & (1 << 20)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_SSE4_2;
}
if (vendorIsIntel && (regs[2] & (1 << 22)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_MOVBE;
}
if ((regs[2] & (1 << 25)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_AES_NI;
}
if ((regs[2] & (1 << 28)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_AVX;
}
if ((regs[2] & (1 << 30)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_RDRAND;
}
x86_cpuid(7, regs);
if ((regs[1] & (1 << 5)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_AVX2;
}
if ((regs[1] & (1 << 29)) != 0) {
g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_SHA_NI;
}
#endif
#if defined( __mips__)
{ /* MIPS and MIPS64 */
/* Extract the list of CPU features from ELF hwcaps */
uint32_t hwcaps = 0;
hwcaps = get_elf_hwcap_from_getauxval(AT_HWCAP);
if (hwcaps != 0) {
int has_r6 = (hwcaps & HWCAP_MIPS_R6);
int has_msa = (hwcaps & HWCAP_MIPS_MSA);
if (has_r6)
g_cpuFeatures |= ANDROID_CPU_MIPS_FEATURE_R6;
if (has_msa)
g_cpuFeatures |= ANDROID_CPU_MIPS_FEATURE_MSA;
}
}
#endif /* __mips__ */
free(cpuinfo);
}
AndroidCpuFamily
android_getCpuFamily(void)
{
pthread_once(&g_once, android_cpuInit);
return g_cpuFamily;
}
uint64_t
android_getCpuFeatures(void)
{
pthread_once(&g_once, android_cpuInit);
return g_cpuFeatures;
}
int
android_getCpuCount(void)
{
pthread_once(&g_once, android_cpuInit);
return g_cpuCount;
}
static void
android_cpuInitDummy(void)
{
g_inited = 1;
}
int
android_setCpu(int cpu_count, uint64_t cpu_features)
{
/* Fail if the library was already initialized. */
if (g_inited)
return 0;
android_cpuInitFamily();
g_cpuCount = (cpu_count <= 0 ? 1 : cpu_count);
g_cpuFeatures = cpu_features;
pthread_once(&g_once, android_cpuInitDummy);
return 1;
}
#ifdef __arm__
uint32_t
android_getCpuIdArm(void)
{
pthread_once(&g_once, android_cpuInit);
return g_cpuIdArm;
}
int
android_setCpuArm(int cpu_count, uint64_t cpu_features, uint32_t cpu_id)
{
if (!android_setCpu(cpu_count, cpu_features))
return 0;
g_cpuIdArm = cpu_id;
return 1;
}
#endif /* __arm__ */
/*
* Technical note: Making sense of ARM's FPU architecture versions.
*
* FPA was ARM's first attempt at an FPU architecture. There is no Android
* device that actually uses it since this technology was already obsolete
* when the project started. If you see references to FPA instructions
* somewhere, you can be sure that this doesn't apply to Android at all.
*
* FPA was followed by "VFP", soon renamed "VFPv1" due to the emergence of
* new versions / additions to it. ARM considers this obsolete right now,
* and no known Android device implements it either.
*
* VFPv2 added a few instructions to VFPv1, and is an *optional* extension
* supported by some ARMv5TE, ARMv6 and ARMv6T2 CPUs. Note that a device
* supporting the 'armeabi' ABI doesn't necessarily support these.
*
* VFPv3-D16 adds a few instructions on top of VFPv2 and is typically used
* on ARMv7-A CPUs which implement a FPU. Note that it is also mandated
* by the Android 'armeabi-v7a' ABI. The -D16 suffix in its name means
* that it provides 16 double-precision FPU registers (d0-d15) and 32
* single-precision ones (s0-s31) which happen to be mapped to the same
* register banks.
*
* VFPv3-D32 is the name of an extension to VFPv3-D16 that provides 16
* additional double precision registers (d16-d31). Note that there are
* still only 32 single precision registers.
*
* VFPv3xD is a *subset* of VFPv3-D16 that only provides single-precision
* registers. It is only used on ARMv7-M (i.e. on micro-controllers) which
* are not supported by Android. Note that it is not compatible with VFPv2.
*
* NOTE: The term 'VFPv3' usually designate either VFPv3-D16 or VFPv3-D32
* depending on context. For example GCC uses it for VFPv3-D32, but
* the Linux kernel code uses it for VFPv3-D16 (especially in
* /proc/cpuinfo). Always try to use the full designation when
* possible.
*
* NEON, a.k.a. "ARM Advanced SIMD" is an extension that provides
* instructions to perform parallel computations on vectors of 8, 16,
* 32, 64 and 128 bit quantities. NEON requires VFPv32-D32 since all
* NEON registers are also mapped to the same register banks.
*
* VFPv4-D16, adds a few instructions on top of VFPv3-D16 in order to
* perform fused multiply-accumulate on VFP registers, as well as
* half-precision (16-bit) conversion operations.
*
* VFPv4-D32 is VFPv4-D16 with 32, instead of 16, FPU double precision
* registers.
*
* VPFv4-NEON is VFPv4-D32 with NEON instructions. It also adds fused
* multiply-accumulate instructions that work on the NEON registers.
*
* NOTE: Similarly, "VFPv4" might either reference VFPv4-D16 or VFPv4-D32
* depending on context.
*
* The following information was determined by scanning the binutils-2.22
* sources:
*
* Basic VFP instruction subsets:
*
* #define FPU_VFP_EXT_V1xD 0x08000000 // Base VFP instruction set.
* #define FPU_VFP_EXT_V1 0x04000000 // Double-precision insns.
* #define FPU_VFP_EXT_V2 0x02000000 // ARM10E VFPr1.
* #define FPU_VFP_EXT_V3xD 0x01000000 // VFPv3 single-precision.
* #define FPU_VFP_EXT_V3 0x00800000 // VFPv3 double-precision.
* #define FPU_NEON_EXT_V1 0x00400000 // Neon (SIMD) insns.
* #define FPU_VFP_EXT_D32 0x00200000 // Registers D16-D31.
* #define FPU_VFP_EXT_FP16 0x00100000 // Half-precision extensions.
* #define FPU_NEON_EXT_FMA 0x00080000 // Neon fused multiply-add
* #define FPU_VFP_EXT_FMA 0x00040000 // VFP fused multiply-add
*
* FPU types (excluding NEON)
*
* FPU_VFP_V1xD (EXT_V1xD)
* |
* +--------------------------+
* | |
* FPU_VFP_V1 (+EXT_V1) FPU_VFP_V3xD (+EXT_V2+EXT_V3xD)
* | |
* | |
* FPU_VFP_V2 (+EXT_V2) FPU_VFP_V4_SP_D16 (+EXT_FP16+EXT_FMA)
* |
* FPU_VFP_V3D16 (+EXT_Vx3D+EXT_V3)
* |
* +--------------------------+
* | |
* FPU_VFP_V3 (+EXT_D32) FPU_VFP_V4D16 (+EXT_FP16+EXT_FMA)
* | |
* | FPU_VFP_V4 (+EXT_D32)
* |
* FPU_VFP_HARD (+EXT_FMA+NEON_EXT_FMA)
*
* VFP architectures:
*
* ARCH_VFP_V1xD (EXT_V1xD)
* |
* +------------------+
* | |
* | ARCH_VFP_V3xD (+EXT_V2+EXT_V3xD)
* | |
* | ARCH_VFP_V3xD_FP16 (+EXT_FP16)
* | |
* | ARCH_VFP_V4_SP_D16 (+EXT_FMA)
* |
* ARCH_VFP_V1 (+EXT_V1)
* |
* ARCH_VFP_V2 (+EXT_V2)
* |
* ARCH_VFP_V3D16 (+EXT_V3xD+EXT_V3)
* |
* +-------------------+
* | |
* | ARCH_VFP_V3D16_FP16 (+EXT_FP16)
* |
* +-------------------+
* | |
* | ARCH_VFP_V4_D16 (+EXT_FP16+EXT_FMA)
* | |
* | ARCH_VFP_V4 (+EXT_D32)
* | |
* | ARCH_NEON_VFP_V4 (+EXT_NEON+EXT_NEON_FMA)
* |
* ARCH_VFP_V3 (+EXT_D32)
* |
* +-------------------+
* | |
* | ARCH_VFP_V3_FP16 (+EXT_FP16)
* |
* ARCH_VFP_V3_PLUS_NEON_V1 (+EXT_NEON)
* |
* ARCH_NEON_FP16 (+EXT_FP16)
*
* -fpu=<name> values and their correspondance with FPU architectures above:
*
* {"vfp", FPU_ARCH_VFP_V2},
* {"vfp9", FPU_ARCH_VFP_V2},
* {"vfp3", FPU_ARCH_VFP_V3}, // For backwards compatbility.
* {"vfp10", FPU_ARCH_VFP_V2},
* {"vfp10-r0", FPU_ARCH_VFP_V1},
* {"vfpxd", FPU_ARCH_VFP_V1xD},
* {"vfpv2", FPU_ARCH_VFP_V2},
* {"vfpv3", FPU_ARCH_VFP_V3},
* {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
* {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
* {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
* {"vfpv3xd", FPU_ARCH_VFP_V3xD},
* {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
* {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
* {"neon-fp16", FPU_ARCH_NEON_FP16},
* {"vfpv4", FPU_ARCH_VFP_V4},
* {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
* {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
* {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
*
*
* Simplified diagram that only includes FPUs supported by Android:
* Only ARCH_VFP_V3D16 is actually mandated by the armeabi-v7a ABI,
* all others are optional and must be probed at runtime.
*
* ARCH_VFP_V3D16 (EXT_V1xD+EXT_V1+EXT_V2+EXT_V3xD+EXT_V3)
* |
* +-------------------+
* | |
* | ARCH_VFP_V3D16_FP16 (+EXT_FP16)
* |
* +-------------------+
* | |
* | ARCH_VFP_V4_D16 (+EXT_FP16+EXT_FMA)
* | |
* | ARCH_VFP_V4 (+EXT_D32)
* | |
* | ARCH_NEON_VFP_V4 (+EXT_NEON+EXT_NEON_FMA)
* |
* ARCH_VFP_V3 (+EXT_D32)
* |
* +-------------------+
* | |
* | ARCH_VFP_V3_FP16 (+EXT_FP16)
* |
* ARCH_VFP_V3_PLUS_NEON_V1 (+EXT_NEON)
* |
* ARCH_NEON_FP16 (+EXT_FP16)
*
*/
/*
* Copyright (C) 2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef CPU_FEATURES_H
#define CPU_FEATURES_H
#include <sys/cdefs.h>
#include <stdint.h>
#include <string.h>
__BEGIN_DECLS
/* A list of valid values returned by android_getCpuFamily().
* They describe the CPU Architecture of the current process.
*/
typedef enum {
ANDROID_CPU_FAMILY_UNKNOWN = 0,
ANDROID_CPU_FAMILY_ARM,
ANDROID_CPU_FAMILY_X86,
ANDROID_CPU_FAMILY_MIPS,
ANDROID_CPU_FAMILY_ARM64,
ANDROID_CPU_FAMILY_X86_64,
ANDROID_CPU_FAMILY_MIPS64,
ANDROID_CPU_FAMILY_MAX /* do not remove */
} AndroidCpuFamily;
/* Return the CPU family of the current process.
*
* Note that this matches the bitness of the current process. I.e. when
* running a 32-bit binary on a 64-bit capable CPU, this will return the
* 32-bit CPU family value.
*/
extern AndroidCpuFamily android_getCpuFamily(void);
/* Return a bitmap describing a set of optional CPU features that are
* supported by the current device's CPU. The exact bit-flags returned
* depend on the value returned by android_getCpuFamily(). See the
* documentation for the ANDROID_CPU_*_FEATURE_* flags below for details.
*/
extern uint64_t android_getCpuFeatures(void);
/* The list of feature flags for ANDROID_CPU_FAMILY_ARM that can be
* recognized by the library (see note below for 64-bit ARM). Value details
* are:
*
* VFPv2:
* CPU supports the VFPv2 instruction set. Many, but not all, ARMv6 CPUs
* support these instructions. VFPv2 is a subset of VFPv3 so this will
* be set whenever VFPv3 is set too.
*
* ARMv7:
* CPU supports the ARMv7-A basic instruction set.
* This feature is mandated by the 'armeabi-v7a' ABI.
*
* VFPv3:
* CPU supports the VFPv3-D16 instruction set, providing hardware FPU
* support for single and double precision floating point registers.
* Note that only 16 FPU registers are available by default, unless
* the D32 bit is set too. This feature is also mandated by the
* 'armeabi-v7a' ABI.
*
* VFP_D32:
* CPU VFP optional extension that provides 32 FPU registers,
* instead of 16. Note that ARM mandates this feature is the 'NEON'
* feature is implemented by the CPU.
*
* NEON:
* CPU FPU supports "ARM Advanced SIMD" instructions, also known as
* NEON. Note that this mandates the VFP_D32 feature as well, per the
* ARM Architecture specification.
*
* VFP_FP16:
* Half-width floating precision VFP extension. If set, the CPU
* supports instructions to perform floating-point operations on
* 16-bit registers. This is part of the VFPv4 specification, but
* not mandated by any Android ABI.
*
* VFP_FMA:
* Fused multiply-accumulate VFP instructions extension. Also part of
* the VFPv4 specification, but not mandated by any Android ABI.
*
* NEON_FMA:
* Fused multiply-accumulate NEON instructions extension. Optional
* extension from the VFPv4 specification, but not mandated by any
* Android ABI.
*
* IDIV_ARM:
* Integer division available in ARM mode. Only available
* on recent CPUs (e.g. Cortex-A15).
*
* IDIV_THUMB2:
* Integer division available in Thumb-2 mode. Only available
* on recent CPUs (e.g. Cortex-A15).
*
* iWMMXt:
* Optional extension that adds MMX registers and operations to an
* ARM CPU. This is only available on a few XScale-based CPU designs
* sold by Marvell. Pretty rare in practice.
*
* AES:
* CPU supports AES instructions. These instructions are only
* available for 32-bit applications running on ARMv8 CPU.
*
* CRC32:
* CPU supports CRC32 instructions. These instructions are only
* available for 32-bit applications running on ARMv8 CPU.
*
* SHA2:
* CPU supports SHA2 instructions. These instructions are only
* available for 32-bit applications running on ARMv8 CPU.
*
* SHA1:
* CPU supports SHA1 instructions. These instructions are only
* available for 32-bit applications running on ARMv8 CPU.
*
* PMULL:
* CPU supports 64-bit PMULL and PMULL2 instructions. These
* instructions are only available for 32-bit applications
* running on ARMv8 CPU.
*
* If you want to tell the compiler to generate code that targets one of
* the feature set above, you should probably use one of the following
* flags (for more details, see technical note at the end of this file):
*
* -mfpu=vfp
* -mfpu=vfpv2
* These are equivalent and tell GCC to use VFPv2 instructions for
* floating-point operations. Use this if you want your code to
* run on *some* ARMv6 devices, and any ARMv7-A device supported
* by Android.
*
* Generated code requires VFPv2 feature.
*
* -mfpu=vfpv3-d16
* Tell GCC to use VFPv3 instructions (using only 16 FPU registers).
* This should be generic code that runs on any CPU that supports the
* 'armeabi-v7a' Android ABI. Note that no ARMv6 CPU supports this.
*
* Generated code requires VFPv3 feature.
*
* -mfpu=vfpv3
* Tell GCC to use VFPv3 instructions with 32 FPU registers.
* Generated code requires VFPv3|VFP_D32 features.
*
* -mfpu=neon
* Tell GCC to use VFPv3 instructions with 32 FPU registers, and
* also support NEON intrinsics (see <arm_neon.h>).
* Generated code requires VFPv3|VFP_D32|NEON features.
*
* -mfpu=vfpv4-d16
* Generated code requires VFPv3|VFP_FP16|VFP_FMA features.
*
* -mfpu=vfpv4
* Generated code requires VFPv3|VFP_FP16|VFP_FMA|VFP_D32 features.
*
* -mfpu=neon-vfpv4
* Generated code requires VFPv3|VFP_FP16|VFP_FMA|VFP_D32|NEON|NEON_FMA
* features.
*
* -mcpu=cortex-a7
* -mcpu=cortex-a15
* Generated code requires VFPv3|VFP_FP16|VFP_FMA|VFP_D32|
* NEON|NEON_FMA|IDIV_ARM|IDIV_THUMB2
* This flag implies -mfpu=neon-vfpv4.
*
* -mcpu=iwmmxt
* Allows the use of iWMMXt instrinsics with GCC.
*
* IMPORTANT NOTE: These flags should only be tested when
* android_getCpuFamily() returns ANDROID_CPU_FAMILY_ARM, i.e. this is a
* 32-bit process.
*
* When running a 64-bit ARM process on an ARMv8 CPU,
* android_getCpuFeatures() will return a different set of bitflags
*/
enum {
ANDROID_CPU_ARM_FEATURE_ARMv7 = (1 << 0),
ANDROID_CPU_ARM_FEATURE_VFPv3 = (1 << 1),
ANDROID_CPU_ARM_FEATURE_NEON = (1 << 2),
ANDROID_CPU_ARM_FEATURE_LDREX_STREX = (1 << 3),
ANDROID_CPU_ARM_FEATURE_VFPv2 = (1 << 4),
ANDROID_CPU_ARM_FEATURE_VFP_D32 = (1 << 5),
ANDROID_CPU_ARM_FEATURE_VFP_FP16 = (1 << 6),
ANDROID_CPU_ARM_FEATURE_VFP_FMA = (1 << 7),
ANDROID_CPU_ARM_FEATURE_NEON_FMA = (1 << 8),
ANDROID_CPU_ARM_FEATURE_IDIV_ARM = (1 << 9),
ANDROID_CPU_ARM_FEATURE_IDIV_THUMB2 = (1 << 10),
ANDROID_CPU_ARM_FEATURE_iWMMXt = (1 << 11),
ANDROID_CPU_ARM_FEATURE_AES = (1 << 12),
ANDROID_CPU_ARM_FEATURE_PMULL = (1 << 13),
ANDROID_CPU_ARM_FEATURE_SHA1 = (1 << 14),
ANDROID_CPU_ARM_FEATURE_SHA2 = (1 << 15),
ANDROID_CPU_ARM_FEATURE_CRC32 = (1 << 16),
};
/* The bit flags corresponding to the output of android_getCpuFeatures()
* when android_getCpuFamily() returns ANDROID_CPU_FAMILY_ARM64. Value details
* are:
*
* FP:
* CPU has Floating-point unit.
*
* ASIMD:
* CPU has Advanced SIMD unit.
*
* AES:
* CPU supports AES instructions.
*
* CRC32:
* CPU supports CRC32 instructions.
*
* SHA2:
* CPU supports SHA2 instructions.
*
* SHA1:
* CPU supports SHA1 instructions.
*
* PMULL:
* CPU supports 64-bit PMULL and PMULL2 instructions.
*/
enum {
ANDROID_CPU_ARM64_FEATURE_FP = (1 << 0),
ANDROID_CPU_ARM64_FEATURE_ASIMD = (1 << 1),
ANDROID_CPU_ARM64_FEATURE_AES = (1 << 2),
ANDROID_CPU_ARM64_FEATURE_PMULL = (1 << 3),
ANDROID_CPU_ARM64_FEATURE_SHA1 = (1 << 4),
ANDROID_CPU_ARM64_FEATURE_SHA2 = (1 << 5),
ANDROID_CPU_ARM64_FEATURE_CRC32 = (1 << 6),
};
/* The bit flags corresponding to the output of android_getCpuFeatures()
* when android_getCpuFamily() returns ANDROID_CPU_FAMILY_X86 or
* ANDROID_CPU_FAMILY_X86_64.
*/
enum {
ANDROID_CPU_X86_FEATURE_SSSE3 = (1 << 0),
ANDROID_CPU_X86_FEATURE_POPCNT = (1 << 1),
ANDROID_CPU_X86_FEATURE_MOVBE = (1 << 2),
ANDROID_CPU_X86_FEATURE_SSE4_1 = (1 << 3),
ANDROID_CPU_X86_FEATURE_SSE4_2 = (1 << 4),
ANDROID_CPU_X86_FEATURE_AES_NI = (1 << 5),
ANDROID_CPU_X86_FEATURE_AVX = (1 << 6),
ANDROID_CPU_X86_FEATURE_RDRAND = (1 << 7),
ANDROID_CPU_X86_FEATURE_AVX2 = (1 << 8),
ANDROID_CPU_X86_FEATURE_SHA_NI = (1 << 9),
};
/* The bit flags corresponding to the output of android_getCpuFeatures()
* when android_getCpuFamily() returns ANDROID_CPU_FAMILY_MIPS
* or ANDROID_CPU_FAMILY_MIPS64. Values are:
*
* R6:
* CPU executes MIPS Release 6 instructions natively, and
* supports obsoleted R1..R5 instructions only via kernel traps.
*
* MSA:
* CPU supports Mips SIMD Architecture instructions.
*/
enum {
ANDROID_CPU_MIPS_FEATURE_R6 = (1 << 0),
ANDROID_CPU_MIPS_FEATURE_MSA = (1 << 1),
};
/* Return the number of CPU cores detected on this device. */
extern int android_getCpuCount(void);
/* The following is used to force the CPU count and features
* mask in sandboxed processes. Under 4.1 and higher, these processes
* cannot access /proc, which is the only way to get information from
* the kernel about the current hardware (at least on ARM).
*
* It _must_ be called only once, and before any android_getCpuXXX
* function, any other case will fail.
*
* This function return 1 on success, and 0 on failure.
*/
extern int android_setCpu(int cpu_count,
uint64_t cpu_features);
#ifdef __arm__
/* Retrieve the ARM 32-bit CPUID value from the kernel.
* Note that this cannot work on sandboxed processes under 4.1 and
* higher, unless you called android_setCpuArm() before.
*/
extern uint32_t android_getCpuIdArm(void);
/* An ARM-specific variant of android_setCpu() that also allows you
* to set the ARM CPUID field.
*/
extern int android_setCpuArm(int cpu_count,
uint64_t cpu_features,
uint32_t cpu_id);
#endif
__END_DECLS
#endif /* CPU_FEATURES_H */
$url = "https://raw.githubusercontent.com/opencv/opencv_3rdparty/@FFMPEG_BINARIES_COMMIT@/ffmpeg/opencv_videoio_ffmpeg_64.dll"
$expected_md5 = "@FFMPEG_FILE_HASH_BIN64@"
$output = "$PSScriptRoot\@OPENCV_BIN_INSTALL_PATH@\opencv_videoio_ffmpeg@OPENCV_DLLVERSION@_64.dll"
Write-Output ("=" * 120)
try {
Get-content -Path "$PSScriptRoot\@OPENCV_LICENSES_INSTALL_PATH@\ffmpeg-readme.txt" -ErrorAction 'Stop'
} catch {
Write-Output "Refer to OpenCV FFmpeg wrapper readme notes about library usage / licensing details."
}
Write-Output ("=" * 120)
Write-Output ""
if(![System.IO.File]::Exists($output)) {
try {
[io.file]::OpenWrite($output).close()
} catch {
Write-Warning "Unable to write: $output"
if (!([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole] "Administrator")) {
Write-Warning "Launching with 'Administrator' elevated privileges..."
Pause
Start-Process powershell.exe "-NoProfile -ExecutionPolicy Bypass -File `"$PSCommandPath`"" -Verb RunAs
exit
} else {
Write-Output "FATAL: Unable to write with elevated privileges: $output"
Pause
exit 1
}
}
try {
Write-Output ("Downloading: " + $output)
Import-Module BitsTransfer
$start_time = Get-Date
Start-BitsTransfer -Source $url -Destination $output -ErrorAction 'Stop'
Write-Output "Downloaded in $((Get-Date).Subtract($start_time).Seconds) seconds"
} catch {
$_ # Dump error
try {
Write-Output ("Downloading (second attempt): " + $output)
$start_time = Get-Date
Invoke-WebRequest -Uri $url -OutFile $output
Write-Output "Downloaded in $((Get-Date).Subtract($start_time).Seconds) seconds"
} catch {
Write-Output ("Can't download file: " + $output)
Write-Output ("URL: " + $url)
Write-Output "You need to download this file manually. Stop"
Pause
Exit
}
}
} else {
Write-Output ("File exists: " + $output)
Write-Output ("Downloading is skipped. Remove this file and re-run this script to force downloading.")
}
if(![System.IO.File]::Exists($output)) {
Write-Output ("Destination file not found: " + $output)
Write-Output "Stop"
Pause
Exit
}
try {
$hash = Get-FileHash $output -Algorithm MD5 -ErrorAction 'Stop'
if($hash.Hash -eq $expected_md5) {
Write-Output "MD5 check passed"
} else {
Write-Output ("MD5 : " + $hash.Hash.toLower())
Write-Output ("Expected: " + $expected_md5)
Write-Output "MD5 hash mismatch"
}
} catch {
$_ # Dump error
Write-Output "Can't check MD5 hash (requires PowerShell 4+)"
}
Pause
Write-Output "Exit"
# Binaries branch name: ffmpeg/4.x_20230622
# Binaries were created for OpenCV: 61d48dd0f8d1cc1a115d26998705a61478f64a3c
ocv_update(FFMPEG_BINARIES_COMMIT "7da61f0695eabf8972a2c302bf1632a3d99fb0d5")
ocv_update(FFMPEG_FILE_HASH_BIN32 "4aaef1456e282e5ef665d65555f47f56")
ocv_update(FFMPEG_FILE_HASH_BIN64 "38a638851e064c591ce812e27ed43f1f")
ocv_update(FFMPEG_FILE_HASH_CMAKE "8862c87496e2e8c375965e1277dee1c7")
function(download_win_ffmpeg script_var)
set(${script_var} "" PARENT_SCOPE)
set(ids BIN32 BIN64 CMAKE)
set(name_BIN32 "opencv_videoio_ffmpeg.dll")
set(name_BIN64 "opencv_videoio_ffmpeg_64.dll")
set(name_CMAKE "ffmpeg_version.cmake")
set(FFMPEG_DOWNLOAD_DIR "${OpenCV_BINARY_DIR}/3rdparty/ffmpeg")
set(status TRUE)
foreach(id ${ids})
ocv_download(FILENAME ${name_${id}}
HASH ${FFMPEG_FILE_HASH_${id}}
URL
"$ENV{OPENCV_FFMPEG_URL}"
"${OPENCV_FFMPEG_URL}"
"https://raw.githubusercontent.com/opencv/opencv_3rdparty/${FFMPEG_BINARIES_COMMIT}/ffmpeg/"
DESTINATION_DIR ${FFMPEG_DOWNLOAD_DIR}
ID FFMPEG
RELATIVE_URL
STATUS res)
if(NOT res)
set(status FALSE)
endif()
endforeach()
if(status)
set(${script_var} "${FFMPEG_DOWNLOAD_DIR}/ffmpeg_version.cmake" PARENT_SCOPE)
endif()
endfunction()
if(OPENCV_INSTALL_FFMPEG_DOWNLOAD_SCRIPT)
configure_file("${CMAKE_CURRENT_LIST_DIR}/ffmpeg-download.ps1.in" "${CMAKE_BINARY_DIR}/win-install/ffmpeg-download.ps1" @ONLY)
install(FILES "${CMAKE_BINARY_DIR}/win-install/ffmpeg-download.ps1" DESTINATION "." COMPONENT libs)
endif()
ocv_install_3rdparty_licenses(ffmpeg license.txt readme.txt)
Copyright (C) 2001 Fabrice Bellard
FFmpeg is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
FFmpeg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with FFmpeg; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
==================================================================================
GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
everyone can redistribute and change. You can do so by permitting
redistribution under these terms (or, alternatively, under the terms of the
ordinary General Public License).
To apply these terms, attach the following notices to the library. It is
safest to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least the
"copyright" line and a pointer to where the full notice is found.
<one line to give the library's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Also add information on how to contact you by electronic and paper mail.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the library, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
<signature of Ty Coon>, 1 April 1990
Ty Coon, President of Vice
That's all there is to it!
* On Linux and other Unix flavors OpenCV uses default or user-built ffmpeg/libav libraries.
If user builds ffmpeg/libav from source and wants OpenCV to stay BSD library, not GPL/LGPL,
he/she should use --enabled-shared configure flag and make sure that no GPL components are
enabled (some notable examples are x264 (H264 encoder) and libac3 (Dolby AC3 audio codec)).
See https://www.ffmpeg.org/legal.html for details.
If you want to play very safe and do not want to use FFMPEG at all, regardless of whether it's installed on
your system or not, configure and build OpenCV using CMake with WITH_FFMPEG=OFF flag. OpenCV will then use
AVFoundation (OSX), GStreamer (Linux) or other available backends supported by opencv_videoio module.
There is also our self-contained motion jpeg codec, which you can use without any worries.
It handles CV_FOURCC('M', 'J', 'P', 'G') streams within an AVI container (".avi").
* On Windows OpenCV uses pre-built ffmpeg binaries, built with proper flags (without GPL components) and
wrapped with simple, stable OpenCV-compatible API.
The binaries are opencv_videoio_ffmpeg.dll (version for 32-bit Windows) and
opencv_videoio_ffmpeg_64.dll (version for 64-bit Windows).
The pre-built opencv_videoio_ffmpeg*.dll is:
* LGPL library, not BSD libraries.
* Loaded at runtime by opencv_videoio module.
If it succeeds, ffmpeg can be used to decode/encode videos;
otherwise, other API is used.
FFMPEG build includes support for H264 encoder based on the OpenH264 library.
OpenH264 Video Codec provided by Cisco Systems, Inc.
See https://github.com/cisco/openh264/releases for details and OpenH264 license.
OpenH264 library should be installed separatelly. Downloaded binary file can be placed into global system path
(System32 or SysWOW64) or near application binaries (check documentation of "LoadLibrary" Win32 function from MSDN).
Or you can specify location of binary file via OPENH264_LIBRARY environment variable.
If LGPL/GPL software can not be supplied with your OpenCV-based product, simply exclude
opencv_videoio_ffmpeg*.dll from your distribution; OpenCV will stay fully functional except for the ability to
decode/encode videos using FFMPEG (though, it may still be able to do that using other API,
such as Video for Windows, Windows Media Foundation or our self-contained motion jpeg codec).
See license.txt for the FFMPEG copyright notice and the licensing terms.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Origin: https://github.com/google/flatbuffers/tree/v23.5.9
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_ALLOCATOR_H_
#define FLATBUFFERS_ALLOCATOR_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// Allocator interface. This is flatbuffers-specific and meant only for
// `vector_downward` usage.
class Allocator {
public:
virtual ~Allocator() {}
// Allocate `size` bytes of memory.
virtual uint8_t *allocate(size_t size) = 0;
// Deallocate `size` bytes of memory at `p` allocated by this allocator.
virtual void deallocate(uint8_t *p, size_t size) = 0;
// Reallocate `new_size` bytes of memory, replacing the old region of size
// `old_size` at `p`. In contrast to a normal realloc, this grows downwards,
// and is intended specifcally for `vector_downward` use.
// `in_use_back` and `in_use_front` indicate how much of `old_size` is
// actually in use at each end, and needs to be copied.
virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size,
size_t new_size, size_t in_use_back,
size_t in_use_front) {
FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows
uint8_t *new_p = allocate(new_size);
memcpy_downward(old_p, old_size, new_p, new_size, in_use_back,
in_use_front);
deallocate(old_p, old_size);
return new_p;
}
protected:
// Called by `reallocate_downward` to copy memory from `old_p` of `old_size`
// to `new_p` of `new_size`. Only memory of size `in_use_front` and
// `in_use_back` will be copied from the front and back of the old memory
// allocation.
void memcpy_downward(uint8_t *old_p, size_t old_size, uint8_t *new_p,
size_t new_size, size_t in_use_back,
size_t in_use_front) {
memcpy(new_p + new_size - in_use_back, old_p + old_size - in_use_back,
in_use_back);
memcpy(new_p, old_p, in_use_front);
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_ALLOCATOR_H_
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_ARRAY_H_
#define FLATBUFFERS_ARRAY_H_
#include <cstdint>
#include <memory>
#include "flatbuffers/base.h"
#include "flatbuffers/stl_emulation.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
// This is used as a helper type for accessing arrays.
template<typename T, uint16_t length> class Array {
// Array<T> can carry only POD data types (scalars or structs).
typedef typename flatbuffers::bool_constant<flatbuffers::is_scalar<T>::value>
scalar_tag;
typedef
typename flatbuffers::conditional<scalar_tag::value, T, const T *>::type
IndirectHelperType;
public:
typedef uint16_t size_type;
typedef typename IndirectHelper<IndirectHelperType>::return_type return_type;
typedef VectorConstIterator<T, return_type, uoffset_t> const_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
// If T is a LE-scalar or a struct (!scalar_tag::value).
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
(scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1)) ||
!scalar_tag::value;
FLATBUFFERS_CONSTEXPR uint16_t size() const { return length; }
return_type Get(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<IndirectHelperType>::Read(Data(), i);
}
return_type operator[](uoffset_t i) const { return Get(i); }
// If this is a Vector of enums, T will be its storage type, not the enum
// type. This function makes it convenient to retrieve value with enum
// type E.
template<typename E> E GetEnum(uoffset_t i) const {
return static_cast<E>(Get(i));
}
const_iterator begin() const { return const_iterator(Data(), 0); }
const_iterator end() const { return const_iterator(Data(), size()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
const_reverse_iterator crbegin() const { return rbegin(); }
const_reverse_iterator crend() const { return rend(); }
// Get a mutable pointer to elements inside this array.
// This method used to mutate arrays of structs followed by a @p Mutate
// operation. For primitive types use @p Mutate directly.
// @warning Assignments and reads to/from the dereferenced pointer are not
// automatically converted to the correct endianness.
typename flatbuffers::conditional<scalar_tag::value, void, T *>::type
GetMutablePointer(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return const_cast<T *>(&data()[i]);
}
// Change elements if you have a non-const pointer to this object.
void Mutate(uoffset_t i, const T &val) { MutateImpl(scalar_tag(), i, val); }
// The raw data in little endian format. Use with care.
const uint8_t *Data() const { return data_; }
uint8_t *Data() { return data_; }
// Similarly, but typed, much like std::vector::data
const T *data() const { return reinterpret_cast<const T *>(Data()); }
T *data() { return reinterpret_cast<T *>(Data()); }
// Copy data from a span with endian conversion.
// If this Array and the span overlap, the behavior is undefined.
void CopyFromSpan(flatbuffers::span<const T, length> src) {
const auto p1 = reinterpret_cast<const uint8_t *>(src.data());
const auto p2 = Data();
FLATBUFFERS_ASSERT(!(p1 >= p2 && p1 < (p2 + length)) &&
!(p2 >= p1 && p2 < (p1 + length)));
(void)p1;
(void)p2;
CopyFromSpanImpl(flatbuffers::bool_constant<is_span_observable>(), src);
}
protected:
void MutateImpl(flatbuffers::true_type, uoffset_t i, const T &val) {
FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
void MutateImpl(flatbuffers::false_type, uoffset_t i, const T &val) {
*(GetMutablePointer(i)) = val;
}
void CopyFromSpanImpl(flatbuffers::true_type,
flatbuffers::span<const T, length> src) {
// Use std::memcpy() instead of std::copy() to avoid performance degradation
// due to aliasing if T is char or unsigned char.
// The size is known at compile time, so memcpy would be inlined.
std::memcpy(data(), src.data(), length * sizeof(T));
}
// Copy data from flatbuffers::span with endian conversion.
void CopyFromSpanImpl(flatbuffers::false_type,
flatbuffers::span<const T, length> src) {
for (size_type k = 0; k < length; k++) { Mutate(k, src[k]); }
}
// This class is only used to access pre-existing data. Don't ever
// try to construct these manually.
// 'constexpr' allows us to use 'size()' at compile time.
// @note Must not use 'FLATBUFFERS_CONSTEXPR' here, as const is not allowed on
// a constructor.
#if defined(__cpp_constexpr)
constexpr Array();
#else
Array();
#endif
uint8_t data_[length * sizeof(T)];
private:
// This class is a pointer. Copying will therefore create an invalid object.
// Private and unimplemented copy constructor.
Array(const Array &);
Array &operator=(const Array &);
};
// Specialization for Array[struct] with access using Offset<void> pointer.
// This specialization used by idl_gen_text.cpp.
template<typename T, uint16_t length, template<typename> class OffsetT>
class Array<OffsetT<T>, length> {
static_assert(flatbuffers::is_same<T, void>::value, "unexpected type T");
public:
typedef const void *return_type;
typedef uint16_t size_type;
const uint8_t *Data() const { return data_; }
// Make idl_gen_text.cpp::PrintContainer happy.
return_type operator[](uoffset_t) const {
FLATBUFFERS_ASSERT(false);
return nullptr;
}
private:
// This class is only used to access pre-existing data.
Array();
Array(const Array &);
Array &operator=(const Array &);
uint8_t data_[1];
};
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U, N> make_span(Array<U, N> &arr)
FLATBUFFERS_NOEXCEPT {
static_assert(
Array<U, N>::is_span_observable,
"wrong type U, only plain struct, LE-scalar, or byte types are allowed");
return span<U, N>(arr.data(), N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U, N> make_span(
const Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(
Array<U, N>::is_span_observable,
"wrong type U, only plain struct, LE-scalar, or byte types are allowed");
return span<const U, N>(arr.data(), N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<uint8_t, sizeof(U) * N>
make_bytes_span(Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(Array<U, N>::is_span_observable,
"internal error, Array<T> might hold only scalars or structs");
return span<uint8_t, sizeof(U) * N>(arr.Data(), sizeof(U) * N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const uint8_t, sizeof(U) * N>
make_bytes_span(const Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(Array<U, N>::is_span_observable,
"internal error, Array<T> might hold only scalars or structs");
return span<const uint8_t, sizeof(U) * N>(arr.Data(), sizeof(U) * N);
}
// Cast a raw T[length] to a raw flatbuffers::Array<T, length>
// without endian conversion. Use with care.
// TODO: move these Cast-methods to `internal` namespace.
template<typename T, uint16_t length>
Array<T, length> &CastToArray(T (&arr)[length]) {
return *reinterpret_cast<Array<T, length> *>(arr);
}
template<typename T, uint16_t length>
const Array<T, length> &CastToArray(const T (&arr)[length]) {
return *reinterpret_cast<const Array<T, length> *>(arr);
}
template<typename E, typename T, uint16_t length>
Array<E, length> &CastToArrayOfEnum(T (&arr)[length]) {
static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
return *reinterpret_cast<Array<E, length> *>(arr);
}
template<typename E, typename T, uint16_t length>
const Array<E, length> &CastToArrayOfEnum(const T (&arr)[length]) {
static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
return *reinterpret_cast<const Array<E, length> *>(arr);
}
template<typename T, uint16_t length>
bool operator==(const Array<T, length> &lhs,
const Array<T, length> &rhs) noexcept {
return std::addressof(lhs) == std::addressof(rhs) ||
(lhs.size() == rhs.size() &&
std::memcmp(lhs.Data(), rhs.Data(), rhs.size() * sizeof(T)) == 0);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_ARRAY_H_
#ifndef FLATBUFFERS_BASE_H_
#define FLATBUFFERS_BASE_H_
// clang-format off
// If activate should be declared and included first.
#if defined(FLATBUFFERS_MEMORY_LEAK_TRACKING) && \
defined(_MSC_VER) && defined(_DEBUG)
// The _CRTDBG_MAP_ALLOC inside <crtdbg.h> will replace
// calloc/free (etc) to its debug version using #define directives.
#define _CRTDBG_MAP_ALLOC
#include <stdlib.h>
#include <crtdbg.h>
// Replace operator new by trace-enabled version.
#define DEBUG_NEW new(_NORMAL_BLOCK, __FILE__, __LINE__)
#define new DEBUG_NEW
#endif
#if !defined(FLATBUFFERS_ASSERT)
#include <assert.h>
#define FLATBUFFERS_ASSERT assert
#elif defined(FLATBUFFERS_ASSERT_INCLUDE)
// Include file with forward declaration
#include FLATBUFFERS_ASSERT_INCLUDE
#endif
#ifndef ARDUINO
#include <cstdint>
#endif
#include <cstddef>
#include <cstdlib>
#include <cstring>
#if defined(ARDUINO) && !defined(ARDUINOSTL_M_H) && defined(__AVR__)
#include <utility.h>
#else
#include <utility>
#endif
#include <string>
#include <type_traits>
#include <vector>
#include <set>
#include <algorithm>
#include <limits>
#include <iterator>
#include <memory>
#if defined(__unix__) && !defined(FLATBUFFERS_LOCALE_INDEPENDENT)
#include <unistd.h>
#endif
#ifdef __ANDROID__
#include <android/api-level.h>
#endif
#if defined(__ICCARM__)
#include <intrinsics.h>
#endif
// Note the __clang__ check is needed, because clang presents itself
// as an older GNUC compiler (4.2).
// Clang 3.3 and later implement all of the ISO C++ 2011 standard.
// Clang 3.4 and later implement all of the ISO C++ 2014 standard.
// http://clang.llvm.org/cxx_status.html
// Note the MSVC value '__cplusplus' may be incorrect:
// The '__cplusplus' predefined macro in the MSVC stuck at the value 199711L,
// indicating (erroneously!) that the compiler conformed to the C++98 Standard.
// This value should be correct starting from MSVC2017-15.7-Preview-3.
// The '__cplusplus' will be valid only if MSVC2017-15.7-P3 and the `/Zc:__cplusplus` switch is set.
// Workaround (for details see MSDN):
// Use the _MSC_VER and _MSVC_LANG definition instead of the __cplusplus for compatibility.
// The _MSVC_LANG macro reports the Standard version regardless of the '/Zc:__cplusplus' switch.
#if defined(__GNUC__) && !defined(__clang__)
#define FLATBUFFERS_GCC (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#else
#define FLATBUFFERS_GCC 0
#endif
#if defined(__clang__)
#define FLATBUFFERS_CLANG (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
#else
#define FLATBUFFERS_CLANG 0
#endif
/// @cond FLATBUFFERS_INTERNAL
#if __cplusplus <= 199711L && \
(!defined(_MSC_VER) || _MSC_VER < 1600) && \
(!defined(__GNUC__) || \
(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40400))
#error A C++11 compatible compiler with support for the auto typing is \
required for FlatBuffers.
#error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__
#endif
#if !defined(__clang__) && \
defined(__GNUC__) && \
(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40600)
// Backwards compatibility for g++ 4.4, and 4.5 which don't have the nullptr
// and constexpr keywords. Note the __clang__ check is needed, because clang
// presents itself as an older GNUC compiler.
#ifndef nullptr_t
const class nullptr_t {
public:
template<class T> inline operator T*() const { return 0; }
private:
void operator&() const;
} nullptr = {};
#endif
#ifndef constexpr
#define constexpr const
#endif
#endif
// The wire format uses a little endian encoding (since that's efficient for
// the common platforms).
#if defined(__s390x__)
#define FLATBUFFERS_LITTLEENDIAN 0
#endif // __s390x__
#if !defined(FLATBUFFERS_LITTLEENDIAN)
#if defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__)
#if (defined(__BIG_ENDIAN__) || \
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
#define FLATBUFFERS_LITTLEENDIAN 0
#else
#define FLATBUFFERS_LITTLEENDIAN 1
#endif // __BIG_ENDIAN__
#elif defined(_MSC_VER)
#if defined(_M_PPC)
#define FLATBUFFERS_LITTLEENDIAN 0
#else
#define FLATBUFFERS_LITTLEENDIAN 1
#endif
#else
#error Unable to determine endianness, define FLATBUFFERS_LITTLEENDIAN.
#endif
#endif // !defined(FLATBUFFERS_LITTLEENDIAN)
#define FLATBUFFERS_VERSION_MAJOR 23
#define FLATBUFFERS_VERSION_MINOR 5
#define FLATBUFFERS_VERSION_REVISION 9
#define FLATBUFFERS_STRING_EXPAND(X) #X
#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
namespace flatbuffers {
// Returns version as string "MAJOR.MINOR.REVISION".
const char* FLATBUFFERS_VERSION();
}
#if (!defined(_MSC_VER) || _MSC_VER > 1600) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 407)) || \
defined(__clang__)
#define FLATBUFFERS_FINAL_CLASS final
#define FLATBUFFERS_OVERRIDE override
#define FLATBUFFERS_EXPLICIT_CPP11 explicit
#define FLATBUFFERS_VTABLE_UNDERLYING_TYPE : flatbuffers::voffset_t
#else
#define FLATBUFFERS_FINAL_CLASS
#define FLATBUFFERS_OVERRIDE
#define FLATBUFFERS_EXPLICIT_CPP11
#define FLATBUFFERS_VTABLE_UNDERLYING_TYPE
#endif
#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \
(defined(__cpp_constexpr) && __cpp_constexpr >= 200704)
#define FLATBUFFERS_CONSTEXPR constexpr
#define FLATBUFFERS_CONSTEXPR_CPP11 constexpr
#define FLATBUFFERS_CONSTEXPR_DEFINED
#else
#define FLATBUFFERS_CONSTEXPR const
#define FLATBUFFERS_CONSTEXPR_CPP11
#endif
#if (defined(__cplusplus) && __cplusplus >= 201402L) || \
(defined(__cpp_constexpr) && __cpp_constexpr >= 201304)
#define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR_CPP11
#else
#define FLATBUFFERS_CONSTEXPR_CPP14
#endif
#if (defined(__GXX_EXPERIMENTAL_CXX0X__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \
(defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 190023026)) || \
defined(__clang__)
#define FLATBUFFERS_NOEXCEPT noexcept
#else
#define FLATBUFFERS_NOEXCEPT
#endif
// NOTE: the FLATBUFFERS_DELETE_FUNC macro may change the access mode to
// private, so be sure to put it at the end or reset access mode explicitly.
#if (!defined(_MSC_VER) || _MSC_FULL_VER >= 180020827) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 404)) || \
defined(__clang__)
#define FLATBUFFERS_DELETE_FUNC(func) func = delete
#else
#define FLATBUFFERS_DELETE_FUNC(func) private: func
#endif
#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \
defined(__clang__)
#define FLATBUFFERS_DEFAULT_DECLARATION
#endif
// Check if we can use template aliases
// Not possible if Microsoft Compiler before 2012
// Possible is the language feature __cpp_alias_templates is defined well
// Or possible if the C++ std is C+11 or newer
#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \
|| (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \
|| (defined(__cplusplus) && __cplusplus >= 201103L)
#define FLATBUFFERS_TEMPLATES_ALIASES
#endif
#ifndef FLATBUFFERS_HAS_STRING_VIEW
// Only provide flatbuffers::string_view if __has_include can be used
// to detect a header that provides an implementation
#if defined(__has_include)
// Check for std::string_view (in c++17)
#if __has_include(<string_view>) && (__cplusplus >= 201606 || (defined(_HAS_CXX17) && _HAS_CXX17))
#include <string_view>
namespace flatbuffers {
typedef std::string_view string_view;
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
// Check for std::experimental::string_view (in c++14, compiler-dependent)
#elif __has_include(<experimental/string_view>) && (__cplusplus >= 201411)
#include <experimental/string_view>
namespace flatbuffers {
typedef std::experimental::string_view string_view;
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
// Check for absl::string_view
#elif __has_include("absl/strings/string_view.h") && \
__has_include("absl/base/config.h") && \
(__cplusplus >= 201411)
#include "absl/base/config.h"
#if !defined(ABSL_USES_STD_STRING_VIEW)
#include "absl/strings/string_view.h"
namespace flatbuffers {
typedef absl::string_view string_view;
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
#endif
#endif
#endif // __has_include
#endif // !FLATBUFFERS_HAS_STRING_VIEW
#ifndef FLATBUFFERS_GENERAL_HEAP_ALLOC_OK
// Allow heap allocations to be used
#define FLATBUFFERS_GENERAL_HEAP_ALLOC_OK 1
#endif // !FLATBUFFERS_GENERAL_HEAP_ALLOC_OK
#ifndef FLATBUFFERS_HAS_NEW_STRTOD
// Modern (C++11) strtod and strtof functions are available for use.
// 1) nan/inf strings as argument of strtod;
// 2) hex-float as argument of strtod/strtof.
#if (defined(_MSC_VER) && _MSC_VER >= 1900) || \
(defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \
(defined(__clang__))
#define FLATBUFFERS_HAS_NEW_STRTOD 1
#endif
#endif // !FLATBUFFERS_HAS_NEW_STRTOD
#ifndef FLATBUFFERS_LOCALE_INDEPENDENT
// Enable locale independent functions {strtof_l, strtod_l,strtoll_l,
// strtoull_l}.
#if (defined(_MSC_VER) && _MSC_VER >= 1800) || \
(defined(__ANDROID_API__) && __ANDROID_API__>= 21) || \
(defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700)) && \
(!defined(__Fuchsia__) && !defined(__ANDROID_API__))
#define FLATBUFFERS_LOCALE_INDEPENDENT 1
#else
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#endif
#endif // !FLATBUFFERS_LOCALE_INDEPENDENT
// Suppress Undefined Behavior Sanitizer (recoverable only). Usage:
// - __suppress_ubsan__("undefined")
// - __suppress_ubsan__("signed-integer-overflow")
#if defined(__clang__) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >=7))
#define __suppress_ubsan__(type) __attribute__((no_sanitize(type)))
#elif defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)
#define __suppress_ubsan__(type) __attribute__((no_sanitize_undefined))
#else
#define __suppress_ubsan__(type)
#endif
// This is constexpr function used for checking compile-time constants.
// Avoid `#pragma warning(disable: 4127) // C4127: expression is constant`.
template<typename T> FLATBUFFERS_CONSTEXPR inline bool IsConstTrue(T t) {
return !!t;
}
// Enable C++ attribute [[]] if std:c++17 or higher.
#if ((__cplusplus >= 201703L) \
|| (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L)))
// All attributes unknown to an implementation are ignored without causing an error.
#define FLATBUFFERS_ATTRIBUTE(attr) attr
#define FLATBUFFERS_FALLTHROUGH() [[fallthrough]]
#else
#define FLATBUFFERS_ATTRIBUTE(attr)
#if FLATBUFFERS_CLANG >= 30800
#define FLATBUFFERS_FALLTHROUGH() [[clang::fallthrough]]
#elif FLATBUFFERS_GCC >= 70300
#define FLATBUFFERS_FALLTHROUGH() [[gnu::fallthrough]]
#else
#define FLATBUFFERS_FALLTHROUGH()
#endif
#endif
/// @endcond
/// @file
namespace flatbuffers {
/// @cond FLATBUFFERS_INTERNAL
// Our default offset / size type, 32bit on purpose on 64bit systems.
// Also, using a consistent offset type maintains compatibility of serialized
// offset values between 32bit and 64bit systems.
typedef uint32_t uoffset_t;
typedef uint64_t uoffset64_t;
// Signed offsets for references that can go in both directions.
typedef int32_t soffset_t;
typedef int64_t soffset64_t;
// Offset/index used in v-tables, can be changed to uint8_t in
// format forks to save a bit of space if desired.
typedef uint16_t voffset_t;
typedef uintmax_t largest_scalar_t;
// In 32bits, this evaluates to 2GB - 1
#define FLATBUFFERS_MAX_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset_t>::max()
#define FLATBUFFERS_MAX_64_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset64_t>::max()
// The minimum size buffer that can be a valid flatbuffer.
// Includes the offset to the root table (uoffset_t), the offset to the vtable
// of the root table (soffset_t), the size of the vtable (uint16_t), and the
// size of the referring table (uint16_t).
#define FLATBUFFERS_MIN_BUFFER_SIZE sizeof(uoffset_t) + sizeof(soffset_t) + \
sizeof(uint16_t) + sizeof(uint16_t)
// We support aligning the contents of buffers up to this size.
#ifndef FLATBUFFERS_MAX_ALIGNMENT
#define FLATBUFFERS_MAX_ALIGNMENT 32
#endif
/// @brief The length of a FlatBuffer file header.
static const size_t kFileIdentifierLength = 4;
inline bool VerifyAlignmentRequirements(size_t align, size_t min_align = 1) {
return (min_align <= align) && (align <= (FLATBUFFERS_MAX_ALIGNMENT)) &&
(align & (align - 1)) == 0; // must be power of 2
}
#if defined(_MSC_VER)
#pragma warning(disable: 4351) // C4351: new behavior: elements of array ... will be default initialized
#pragma warning(push)
#pragma warning(disable: 4127) // C4127: conditional expression is constant
#endif
template<typename T> T EndianSwap(T t) {
#if defined(_MSC_VER)
#define FLATBUFFERS_BYTESWAP16 _byteswap_ushort
#define FLATBUFFERS_BYTESWAP32 _byteswap_ulong
#define FLATBUFFERS_BYTESWAP64 _byteswap_uint64
#elif defined(__ICCARM__)
#define FLATBUFFERS_BYTESWAP16 __REV16
#define FLATBUFFERS_BYTESWAP32 __REV
#define FLATBUFFERS_BYTESWAP64(x) \
((__REV(static_cast<uint32_t>(x >> 32U))) | (static_cast<uint64_t>(__REV(static_cast<uint32_t>(x)))) << 32U)
#else
#if defined(__GNUC__) && __GNUC__ * 100 + __GNUC_MINOR__ < 408 && !defined(__clang__)
// __builtin_bswap16 was missing prior to GCC 4.8.
#define FLATBUFFERS_BYTESWAP16(x) \
static_cast<uint16_t>(__builtin_bswap32(static_cast<uint32_t>(x) << 16))
#else
#define FLATBUFFERS_BYTESWAP16 __builtin_bswap16
#endif
#define FLATBUFFERS_BYTESWAP32 __builtin_bswap32
#define FLATBUFFERS_BYTESWAP64 __builtin_bswap64
#endif
if (sizeof(T) == 1) { // Compile-time if-then's.
return t;
} else if (sizeof(T) == 2) {
union { T t; uint16_t i; } u = { t };
u.i = FLATBUFFERS_BYTESWAP16(u.i);
return u.t;
} else if (sizeof(T) == 4) {
union { T t; uint32_t i; } u = { t };
u.i = FLATBUFFERS_BYTESWAP32(u.i);
return u.t;
} else if (sizeof(T) == 8) {
union { T t; uint64_t i; } u = { t };
u.i = FLATBUFFERS_BYTESWAP64(u.i);
return u.t;
} else {
FLATBUFFERS_ASSERT(0);
return t;
}
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
template<typename T> T EndianScalar(T t) {
#if FLATBUFFERS_LITTLEENDIAN
return t;
#else
return EndianSwap(t);
#endif
}
template<typename T>
// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
__suppress_ubsan__("alignment")
T ReadScalar(const void *p) {
return EndianScalar(*reinterpret_cast<const T *>(p));
}
// See https://github.com/google/flatbuffers/issues/5950
#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
template<typename T>
// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
__suppress_ubsan__("alignment")
void WriteScalar(void *p, T t) {
*reinterpret_cast<T *>(p) = EndianScalar(t);
}
template<typename T> struct Offset;
template<typename T> __suppress_ubsan__("alignment") void WriteScalar(void *p, Offset<T> t) {
*reinterpret_cast<uoffset_t *>(p) = EndianScalar(t.o);
}
#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
#pragma GCC diagnostic pop
#endif
// Computes how many bytes you'd have to pad to be able to write an
// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in
// memory).
__suppress_ubsan__("unsigned-integer-overflow")
inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) {
return ((~buf_size) + 1) & (scalar_size - 1);
}
// Generic 'operator==' with conditional specialisations.
// T e - new value of a scalar field.
// T def - default of scalar (is known at compile-time).
template<typename T> inline bool IsTheSameAs(T e, T def) { return e == def; }
#if defined(FLATBUFFERS_NAN_DEFAULTS) && \
defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
// Like `operator==(e, def)` with weak NaN if T=(float|double).
template<typename T> inline bool IsFloatTheSameAs(T e, T def) {
return (e == def) || ((def != def) && (e != e));
}
template<> inline bool IsTheSameAs<float>(float e, float def) {
return IsFloatTheSameAs(e, def);
}
template<> inline bool IsTheSameAs<double>(double e, double def) {
return IsFloatTheSameAs(e, def);
}
#endif
// Check 'v' is out of closed range [low; high].
// Workaround for GCC warning [-Werror=type-limits]:
// comparison is always true due to limited range of data type.
template<typename T>
inline bool IsOutRange(const T &v, const T &low, const T &high) {
return (v < low) || (high < v);
}
// Check 'v' is in closed range [low; high].
template<typename T>
inline bool IsInRange(const T &v, const T &low, const T &high) {
return !IsOutRange(v, low, high);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_BASE_H_
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_BUFFER_H_
#define FLATBUFFERS_BUFFER_H_
#include <algorithm>
#include "flatbuffers/base.h"
namespace flatbuffers {
// Wrapper for uoffset_t to allow safe template specialization.
// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset).
template<typename T = void> struct Offset {
// The type of offset to use.
typedef uoffset_t offset_type;
offset_type o;
Offset() : o(0) {}
Offset(const offset_type _o) : o(_o) {}
Offset<> Union() const { return o; }
bool IsNull() const { return !o; }
};
// Wrapper for uoffset64_t Offsets.
template<typename T = void> struct Offset64 {
// The type of offset to use.
typedef uoffset64_t offset_type;
offset_type o;
Offset64() : o(0) {}
Offset64(const offset_type offset) : o(offset) {}
Offset64<> Union() const { return o; }
bool IsNull() const { return !o; }
};
// Litmus check for ensuring the Offsets are the expected size.
static_assert(sizeof(Offset<>) == 4, "Offset has wrong size");
static_assert(sizeof(Offset64<>) == 8, "Offset64 has wrong size");
inline void EndianCheck() {
int endiantest = 1;
// If this fails, see FLATBUFFERS_LITTLEENDIAN above.
FLATBUFFERS_ASSERT(*reinterpret_cast<char *>(&endiantest) ==
FLATBUFFERS_LITTLEENDIAN);
(void)endiantest;
}
template<typename T> FLATBUFFERS_CONSTEXPR size_t AlignOf() {
// clang-format off
#ifdef _MSC_VER
return __alignof(T);
#else
#ifndef alignof
return __alignof__(T);
#else
return alignof(T);
#endif
#endif
// clang-format on
}
// Lexicographically compare two strings (possibly containing nulls), and
// return true if the first is less than the second.
static inline bool StringLessThan(const char *a_data, uoffset_t a_size,
const char *b_data, uoffset_t b_size) {
const auto cmp = memcmp(a_data, b_data, (std::min)(a_size, b_size));
return cmp == 0 ? a_size < b_size : cmp < 0;
}
// When we read serialized data from memory, in the case of most scalars,
// we want to just read T, but in the case of Offset, we want to actually
// perform the indirection and return a pointer.
// The template specialization below does just that.
// It is wrapped in a struct since function templates can't overload on the
// return type like this.
// The typedef is for the convenience of callers of this function
// (avoiding the need for a trailing return decltype)
template<typename T> struct IndirectHelper {
typedef T return_type;
typedef T mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, const size_t i) {
return EndianScalar((reinterpret_cast<const T *>(p))[i]);
}
static mutable_return_type Read(uint8_t *p, const size_t i) {
return reinterpret_cast<mutable_return_type>(
Read(const_cast<const uint8_t *>(p), i));
}
};
// For vector of Offsets.
template<typename T, template<typename> class OffsetT>
struct IndirectHelper<OffsetT<T>> {
typedef const T *return_type;
typedef T *mutable_return_type;
typedef typename OffsetT<T>::offset_type offset_type;
static const offset_type element_stride = sizeof(offset_type);
static return_type Read(const uint8_t *const p, const offset_type i) {
// Offsets are relative to themselves, so first update the pointer to
// point to the offset location.
const uint8_t *const offset_location = p + i * element_stride;
// Then read the scalar value of the offset (which may be 32 or 64-bits) and
// then determine the relative location from the offset location.
return reinterpret_cast<return_type>(
offset_location + ReadScalar<offset_type>(offset_location));
}
static mutable_return_type Read(uint8_t *const p, const offset_type i) {
// Offsets are relative to themselves, so first update the pointer to
// point to the offset location.
uint8_t *const offset_location = p + i * element_stride;
// Then read the scalar value of the offset (which may be 32 or 64-bits) and
// then determine the relative location from the offset location.
return reinterpret_cast<mutable_return_type>(
offset_location + ReadScalar<offset_type>(offset_location));
}
};
// For vector of structs.
template<typename T> struct IndirectHelper<const T *> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *const p, const size_t i) {
// Structs are stored inline, relative to the first struct pointer.
return reinterpret_cast<return_type>(p + i * element_stride);
}
static mutable_return_type Read(uint8_t *const p, const size_t i) {
// Structs are stored inline, relative to the first struct pointer.
return reinterpret_cast<mutable_return_type>(p + i * element_stride);
}
};
/// @brief Get a pointer to the file_identifier section of the buffer.
/// @return Returns a const char pointer to the start of the file_identifier
/// characters in the buffer. The returned char * has length
/// 'flatbuffers::FlatBufferBuilder::kFileIdentifierLength'.
/// This function is UNDEFINED for FlatBuffers whose schema does not include
/// a file_identifier (likely points at padding or the start of a the root
/// vtable).
inline const char *GetBufferIdentifier(const void *buf,
bool size_prefixed = false) {
return reinterpret_cast<const char *>(buf) +
((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t));
}
// Helper to see if the identifier in a buffer has the expected value.
inline bool BufferHasIdentifier(const void *buf, const char *identifier,
bool size_prefixed = false) {
return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier,
flatbuffers::kFileIdentifierLength) == 0;
}
/// @cond FLATBUFFERS_INTERNAL
// Helpers to get a typed pointer to the root object contained in the buffer.
template<typename T> T *GetMutableRoot(void *buf) {
if (!buf) return nullptr;
EndianCheck();
return reinterpret_cast<T *>(
reinterpret_cast<uint8_t *>(buf) +
EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
}
template<typename T, typename SizeT = uoffset_t>
T *GetMutableSizePrefixedRoot(void *buf) {
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) + sizeof(SizeT));
}
template<typename T> const T *GetRoot(const void *buf) {
return GetMutableRoot<T>(const_cast<void *>(buf));
}
template<typename T, typename SizeT = uoffset_t>
const T *GetSizePrefixedRoot(const void *buf) {
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(SizeT));
}
} // namespace flatbuffers
#endif // FLATBUFFERS_BUFFER_H_
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_BUFFER_REF_H_
#define FLATBUFFERS_BUFFER_REF_H_
#include "flatbuffers/base.h"
#include "flatbuffers/verifier.h"
namespace flatbuffers {
// Convenient way to bundle a buffer and its length, to pass it around
// typed by its root.
// A BufferRef does not own its buffer.
struct BufferRefBase {}; // for std::is_base_of
template<typename T> struct BufferRef : BufferRefBase {
BufferRef() : buf(nullptr), len(0), must_free(false) {}
BufferRef(uint8_t *_buf, uoffset_t _len)
: buf(_buf), len(_len), must_free(false) {}
~BufferRef() {
if (must_free) free(buf);
}
const T *GetRoot() const { return flatbuffers::GetRoot<T>(buf); }
bool Verify() {
Verifier verifier(buf, len);
return verifier.VerifyBuffer<T>(nullptr);
}
uint8_t *buf;
uoffset_t len;
bool must_free;
};
} // namespace flatbuffers
#endif // FLATBUFFERS_BUFFER_REF_H_
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_DEFAULT_ALLOCATOR_H_
#define FLATBUFFERS_DEFAULT_ALLOCATOR_H_
#include "flatbuffers/allocator.h"
#include "flatbuffers/base.h"
namespace flatbuffers {
// DefaultAllocator uses new/delete to allocate memory regions
class DefaultAllocator : public Allocator {
public:
uint8_t *allocate(size_t size) FLATBUFFERS_OVERRIDE {
return new uint8_t[size];
}
void deallocate(uint8_t *p, size_t) FLATBUFFERS_OVERRIDE { delete[] p; }
static void dealloc(void *p, size_t) { delete[] static_cast<uint8_t *>(p); }
};
// These functions allow for a null allocator to mean use the default allocator,
// as used by DetachedBuffer and vector_downward below.
// This is to avoid having a statically or dynamically allocated default
// allocator, or having to move it between the classes that may own it.
inline uint8_t *Allocate(Allocator *allocator, size_t size) {
return allocator ? allocator->allocate(size)
: DefaultAllocator().allocate(size);
}
inline void Deallocate(Allocator *allocator, uint8_t *p, size_t size) {
if (allocator)
allocator->deallocate(p, size);
else
DefaultAllocator().deallocate(p, size);
}
inline uint8_t *ReallocateDownward(Allocator *allocator, uint8_t *old_p,
size_t old_size, size_t new_size,
size_t in_use_back, size_t in_use_front) {
return allocator ? allocator->reallocate_downward(old_p, old_size, new_size,
in_use_back, in_use_front)
: DefaultAllocator().reallocate_downward(
old_p, old_size, new_size, in_use_back, in_use_front);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_DEFAULT_ALLOCATOR_H_
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_DETACHED_BUFFER_H_
#define FLATBUFFERS_DETACHED_BUFFER_H_
#include "flatbuffers/allocator.h"
#include "flatbuffers/base.h"
#include "flatbuffers/default_allocator.h"
namespace flatbuffers {
// DetachedBuffer is a finished flatbuffer memory region, detached from its
// builder. The original memory region and allocator are also stored so that
// the DetachedBuffer can manage the memory lifetime.
class DetachedBuffer {
public:
DetachedBuffer()
: allocator_(nullptr),
own_allocator_(false),
buf_(nullptr),
reserved_(0),
cur_(nullptr),
size_(0) {}
DetachedBuffer(Allocator *allocator, bool own_allocator, uint8_t *buf,
size_t reserved, uint8_t *cur, size_t sz)
: allocator_(allocator),
own_allocator_(own_allocator),
buf_(buf),
reserved_(reserved),
cur_(cur),
size_(sz) {}
DetachedBuffer(DetachedBuffer &&other) noexcept
: allocator_(other.allocator_),
own_allocator_(other.own_allocator_),
buf_(other.buf_),
reserved_(other.reserved_),
cur_(other.cur_),
size_(other.size_) {
other.reset();
}
DetachedBuffer &operator=(DetachedBuffer &&other) noexcept {
if (this == &other) return *this;
destroy();
allocator_ = other.allocator_;
own_allocator_ = other.own_allocator_;
buf_ = other.buf_;
reserved_ = other.reserved_;
cur_ = other.cur_;
size_ = other.size_;
other.reset();
return *this;
}
~DetachedBuffer() { destroy(); }
const uint8_t *data() const { return cur_; }
uint8_t *data() { return cur_; }
size_t size() const { return size_; }
// These may change access mode, leave these at end of public section
FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer &other));
FLATBUFFERS_DELETE_FUNC(
DetachedBuffer &operator=(const DetachedBuffer &other));
protected:
Allocator *allocator_;
bool own_allocator_;
uint8_t *buf_;
size_t reserved_;
uint8_t *cur_;
size_t size_;
inline void destroy() {
if (buf_) Deallocate(allocator_, buf_, reserved_);
if (own_allocator_ && allocator_) { delete allocator_; }
reset();
}
inline void reset() {
allocator_ = nullptr;
own_allocator_ = false;
buf_ = nullptr;
reserved_ = 0;
cur_ = nullptr;
size_ = 0;
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_DETACHED_BUFFER_H_
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_FLATBUFFER_BUILDER_H_
#define FLATBUFFERS_FLATBUFFER_BUILDER_H_
#include <algorithm>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <type_traits>
#include "flatbuffers/allocator.h"
#include "flatbuffers/array.h"
#include "flatbuffers/base.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/buffer_ref.h"
#include "flatbuffers/default_allocator.h"
#include "flatbuffers/detached_buffer.h"
#include "flatbuffers/stl_emulation.h"
#include "flatbuffers/string.h"
#include "flatbuffers/struct.h"
#include "flatbuffers/table.h"
#include "flatbuffers/vector.h"
#include "flatbuffers/vector_downward.h"
#include "flatbuffers/verifier.h"
namespace flatbuffers {
// Converts a Field ID to a virtual table offset.
inline voffset_t FieldIndexToOffset(voffset_t field_id) {
// Should correspond to what EndTable() below builds up.
const voffset_t fixed_fields =
2 * sizeof(voffset_t); // Vtable size and Object Size.
return fixed_fields + field_id * sizeof(voffset_t);
}
template<typename T, typename Alloc = std::allocator<T>>
const T *data(const std::vector<T, Alloc> &v) {
// Eventually the returned pointer gets passed down to memcpy, so
// we need it to be non-null to avoid undefined behavior.
static uint8_t t;
return v.empty() ? reinterpret_cast<const T *>(&t) : &v.front();
}
template<typename T, typename Alloc = std::allocator<T>>
T *data(std::vector<T, Alloc> &v) {
// Eventually the returned pointer gets passed down to memcpy, so
// we need it to be non-null to avoid undefined behavior.
static uint8_t t;
return v.empty() ? reinterpret_cast<T *>(&t) : &v.front();
}
/// @addtogroup flatbuffers_cpp_api
/// @{
/// @class FlatBufferBuilder
/// @brief Helper class to hold data needed in creation of a FlatBuffer.
/// To serialize data, you typically call one of the `Create*()` functions in
/// the generated code, which in turn call a sequence of `StartTable`/
/// `PushElement`/`AddElement`/`EndTable`, or the builtin `CreateString`/
/// `CreateVector` functions. Do this is depth-first order to build up a tree to
/// the root. `Finish()` wraps up the buffer ready for transport.
template<bool Is64Aware = false> class FlatBufferBuilderImpl {
public:
// This switches the size type of the builder, based on if its 64-bit aware
// (uoffset64_t) or not (uoffset_t).
typedef
typename std::conditional<Is64Aware, uoffset64_t, uoffset_t>::type SizeT;
/// @brief Default constructor for FlatBufferBuilder.
/// @param[in] initial_size The initial size of the buffer, in bytes. Defaults
/// to `1024`.
/// @param[in] allocator An `Allocator` to use. If null will use
/// `DefaultAllocator`.
/// @param[in] own_allocator Whether the builder/vector should own the
/// allocator. Defaults to / `false`.
/// @param[in] buffer_minalign Force the buffer to be aligned to the given
/// minimum alignment upon reallocation. Only needed if you intend to store
/// types with custom alignment AND you wish to read the buffer in-place
/// directly after creation.
explicit FlatBufferBuilderImpl(
size_t initial_size = 1024, Allocator *allocator = nullptr,
bool own_allocator = false,
size_t buffer_minalign = AlignOf<largest_scalar_t>())
: buf_(initial_size, allocator, own_allocator, buffer_minalign,
static_cast<SizeT>(Is64Aware ? FLATBUFFERS_MAX_64_BUFFER_SIZE
: FLATBUFFERS_MAX_BUFFER_SIZE)),
num_field_loc(0),
max_voffset_(0),
length_of_64_bit_region_(0),
nested(false),
finished(false),
minalign_(1),
force_defaults_(false),
dedup_vtables_(true),
string_pool(nullptr) {
EndianCheck();
}
/// @brief Move constructor for FlatBufferBuilder.
FlatBufferBuilderImpl(FlatBufferBuilderImpl &&other) noexcept
: buf_(1024, nullptr, false, AlignOf<largest_scalar_t>(),
static_cast<SizeT>(Is64Aware ? FLATBUFFERS_MAX_64_BUFFER_SIZE
: FLATBUFFERS_MAX_BUFFER_SIZE)),
num_field_loc(0),
max_voffset_(0),
length_of_64_bit_region_(0),
nested(false),
finished(false),
minalign_(1),
force_defaults_(false),
dedup_vtables_(true),
string_pool(nullptr) {
EndianCheck();
// Default construct and swap idiom.
// Lack of delegating constructors in vs2010 makes it more verbose than
// needed.
Swap(other);
}
/// @brief Move assignment operator for FlatBufferBuilder.
FlatBufferBuilderImpl &operator=(FlatBufferBuilderImpl &&other) noexcept {
// Move construct a temporary and swap idiom
FlatBufferBuilderImpl temp(std::move(other));
Swap(temp);
return *this;
}
void Swap(FlatBufferBuilderImpl &other) {
using std::swap;
buf_.swap(other.buf_);
swap(num_field_loc, other.num_field_loc);
swap(max_voffset_, other.max_voffset_);
swap(length_of_64_bit_region_, other.length_of_64_bit_region_);
swap(nested, other.nested);
swap(finished, other.finished);
swap(minalign_, other.minalign_);
swap(force_defaults_, other.force_defaults_);
swap(dedup_vtables_, other.dedup_vtables_);
swap(string_pool, other.string_pool);
}
~FlatBufferBuilderImpl() {
if (string_pool) delete string_pool;
}
void Reset() {
Clear(); // clear builder state
buf_.reset(); // deallocate buffer
}
/// @brief Reset all the state in this FlatBufferBuilder so it can be reused
/// to construct another buffer.
void Clear() {
ClearOffsets();
buf_.clear();
nested = false;
finished = false;
minalign_ = 1;
length_of_64_bit_region_ = 0;
if (string_pool) string_pool->clear();
}
/// @brief The current size of the serialized buffer, counting from the end.
/// @return Returns an `SizeT` with the current size of the buffer.
SizeT GetSize() const { return buf_.size(); }
/// @brief The current size of the serialized buffer relative to the end of
/// the 32-bit region.
/// @return Returns an `uoffset_t` with the current size of the buffer.
template<bool is_64 = Is64Aware>
// Only enable this method for the 64-bit builder, as only that builder is
// concerned with the 32/64-bit boundary, and should be the one to bare any
// run time costs.
typename std::enable_if<is_64, uoffset_t>::type GetSizeRelative32BitRegion()
const {
//[32-bit region][64-bit region]
// [XXXXXXXXXXXXXXXXXXX] GetSize()
// [YYYYYYYYYYYYY] length_of_64_bit_region_
// [ZZZZ] return size
return static_cast<uoffset_t>(GetSize() - length_of_64_bit_region_);
}
template<bool is_64 = Is64Aware>
// Only enable this method for the 32-bit builder.
typename std::enable_if<!is_64, uoffset_t>::type GetSizeRelative32BitRegion()
const {
return static_cast<uoffset_t>(GetSize());
}
/// @brief Get the serialized buffer (after you call `Finish()`).
/// @return Returns an `uint8_t` pointer to the FlatBuffer data inside the
/// buffer.
uint8_t *GetBufferPointer() const {
Finished();
return buf_.data();
}
/// @brief Get the serialized buffer (after you call `Finish()`) as a span.
/// @return Returns a constructed flatbuffers::span that is a view over the
/// FlatBuffer data inside the buffer.
flatbuffers::span<uint8_t> GetBufferSpan() const {
Finished();
return flatbuffers::span<uint8_t>(buf_.data(), buf_.size());
}
/// @brief Get a pointer to an unfinished buffer.
/// @return Returns a `uint8_t` pointer to the unfinished buffer.
uint8_t *GetCurrentBufferPointer() const { return buf_.data(); }
/// @brief Get the released pointer to the serialized buffer.
/// @warning Do NOT attempt to use this FlatBufferBuilder afterwards!
/// @return A `FlatBuffer` that owns the buffer and its allocator and
/// behaves similar to a `unique_ptr` with a deleter.
FLATBUFFERS_ATTRIBUTE([[deprecated("use Release() instead")]])
DetachedBuffer ReleaseBufferPointer() {
Finished();
return buf_.release();
}
/// @brief Get the released DetachedBuffer.
/// @return A `DetachedBuffer` that owns the buffer and its allocator.
DetachedBuffer Release() {
Finished();
return buf_.release();
}
/// @brief Get the released pointer to the serialized buffer.
/// @param size The size of the memory block containing
/// the serialized `FlatBuffer`.
/// @param offset The offset from the released pointer where the finished
/// `FlatBuffer` starts.
/// @return A raw pointer to the start of the memory block containing
/// the serialized `FlatBuffer`.
/// @remark If the allocator is owned, it gets deleted when the destructor is
/// called..
uint8_t *ReleaseRaw(size_t &size, size_t &offset) {
Finished();
return buf_.release_raw(size, offset);
}
/// @brief get the minimum alignment this buffer needs to be accessed
/// properly. This is only known once all elements have been written (after
/// you call Finish()). You can use this information if you need to embed
/// a FlatBuffer in some other buffer, such that you can later read it
/// without first having to copy it into its own buffer.
size_t GetBufferMinAlignment() const {
Finished();
return minalign_;
}
/// @cond FLATBUFFERS_INTERNAL
void Finished() const {
// If you get this assert, you're attempting to get access a buffer
// which hasn't been finished yet. Be sure to call
// FlatBufferBuilder::Finish with your root table.
// If you really need to access an unfinished buffer, call
// GetCurrentBufferPointer instead.
FLATBUFFERS_ASSERT(finished);
}
/// @endcond
/// @brief In order to save space, fields that are set to their default value
/// don't get serialized into the buffer.
/// @param[in] fd When set to `true`, always serializes default values that
/// are set. Optional fields which are not set explicitly, will still not be
/// serialized.
void ForceDefaults(bool fd) { force_defaults_ = fd; }
/// @brief By default vtables are deduped in order to save space.
/// @param[in] dedup When set to `true`, dedup vtables.
void DedupVtables(bool dedup) { dedup_vtables_ = dedup; }
/// @cond FLATBUFFERS_INTERNAL
void Pad(size_t num_bytes) { buf_.fill(num_bytes); }
void TrackMinAlign(size_t elem_size) {
if (elem_size > minalign_) minalign_ = elem_size;
}
void Align(size_t elem_size) {
TrackMinAlign(elem_size);
buf_.fill(PaddingBytes(buf_.size(), elem_size));
}
void PushFlatBuffer(const uint8_t *bytes, size_t size) {
PushBytes(bytes, size);
finished = true;
}
void PushBytes(const uint8_t *bytes, size_t size) { buf_.push(bytes, size); }
void PopBytes(size_t amount) { buf_.pop(amount); }
template<typename T> void AssertScalarT() {
// The code assumes power of 2 sizes and endian-swap-ability.
static_assert(flatbuffers::is_scalar<T>::value, "T must be a scalar type");
}
// Write a single aligned scalar to the buffer
template<typename T, typename ReturnT = uoffset_t>
ReturnT PushElement(T element) {
AssertScalarT<T>();
Align(sizeof(T));
buf_.push_small(EndianScalar(element));
return CalculateOffset<ReturnT>();
}
template<typename T, template<typename> class OffsetT = Offset>
uoffset_t PushElement(OffsetT<T> off) {
// Special case for offsets: see ReferTo below.
return PushElement(ReferTo(off.o));
}
// When writing fields, we track where they are, so we can create correct
// vtables later.
void TrackField(voffset_t field, uoffset_t off) {
FieldLoc fl = { off, field };
buf_.scratch_push_small(fl);
num_field_loc++;
if (field > max_voffset_) { max_voffset_ = field; }
}
// Like PushElement, but additionally tracks the field this represents.
template<typename T> void AddElement(voffset_t field, T e, T def) {
// We don't serialize values equal to the default.
if (IsTheSameAs(e, def) && !force_defaults_) return;
TrackField(field, PushElement(e));
}
template<typename T> void AddElement(voffset_t field, T e) {
TrackField(field, PushElement(e));
}
template<typename T> void AddOffset(voffset_t field, Offset<T> off) {
if (off.IsNull()) return; // Don't store.
AddElement(field, ReferTo(off.o), static_cast<uoffset_t>(0));
}
template<typename T> void AddOffset(voffset_t field, Offset64<T> off) {
if (off.IsNull()) return; // Don't store.
AddElement(field, ReferTo(off.o), static_cast<uoffset64_t>(0));
}
template<typename T> void AddStruct(voffset_t field, const T *structptr) {
if (!structptr) return; // Default, don't store.
Align(AlignOf<T>());
buf_.push_small(*structptr);
TrackField(field, CalculateOffset<uoffset_t>());
}
void AddStructOffset(voffset_t field, uoffset_t off) {
TrackField(field, off);
}
// Offsets initially are relative to the end of the buffer (downwards).
// This function converts them to be relative to the current location
// in the buffer (when stored here), pointing upwards.
uoffset_t ReferTo(uoffset_t off) {
// Align to ensure GetSizeRelative32BitRegion() below is correct.
Align(sizeof(uoffset_t));
// 32-bit offsets are relative to the tail of the 32-bit region of the
// buffer. For most cases (without 64-bit entities) this is equivalent to
// size of the whole buffer (e.g. GetSize())
return ReferTo(off, GetSizeRelative32BitRegion());
}
uoffset64_t ReferTo(uoffset64_t off) {
// Align to ensure GetSize() below is correct.
Align(sizeof(uoffset64_t));
// 64-bit offsets are relative to tail of the whole buffer
return ReferTo(off, GetSize());
}
template<typename T, typename T2> T ReferTo(const T off, const T2 size) {
FLATBUFFERS_ASSERT(off && off <= size);
return size - off + static_cast<T>(sizeof(T));
}
template<typename T> T ReferTo(const T off, const T size) {
FLATBUFFERS_ASSERT(off && off <= size);
return size - off + static_cast<T>(sizeof(T));
}
void NotNested() {
// If you hit this, you're trying to construct a Table/Vector/String
// during the construction of its parent table (between the MyTableBuilder
// and table.Finish().
// Move the creation of these sub-objects to above the MyTableBuilder to
// not get this assert.
// Ignoring this assert may appear to work in simple cases, but the reason
// it is here is that storing objects in-line may cause vtable offsets
// to not fit anymore. It also leads to vtable duplication.
FLATBUFFERS_ASSERT(!nested);
// If you hit this, fields were added outside the scope of a table.
FLATBUFFERS_ASSERT(!num_field_loc);
}
// From generated code (or from the parser), we call StartTable/EndTable
// with a sequence of AddElement calls in between.
uoffset_t StartTable() {
NotNested();
nested = true;
return GetSizeRelative32BitRegion();
}
// This finishes one serialized object by generating the vtable if it's a
// table, comparing it against existing vtables, and writing the
// resulting vtable offset.
uoffset_t EndTable(uoffset_t start) {
// If you get this assert, a corresponding StartTable wasn't called.
FLATBUFFERS_ASSERT(nested);
// Write the vtable offset, which is the start of any Table.
// We fill its value later.
// This is relative to the end of the 32-bit region.
const uoffset_t vtable_offset_loc =
static_cast<uoffset_t>(PushElement<soffset_t>(0));
// Write a vtable, which consists entirely of voffset_t elements.
// It starts with the number of offsets, followed by a type id, followed
// by the offsets themselves. In reverse:
// Include space for the last offset and ensure empty tables have a
// minimum size.
max_voffset_ =
(std::max)(static_cast<voffset_t>(max_voffset_ + sizeof(voffset_t)),
FieldIndexToOffset(0));
buf_.fill_big(max_voffset_);
const uoffset_t table_object_size = vtable_offset_loc - start;
// Vtable use 16bit offsets.
FLATBUFFERS_ASSERT(table_object_size < 0x10000);
WriteScalar<voffset_t>(buf_.data() + sizeof(voffset_t),
static_cast<voffset_t>(table_object_size));
WriteScalar<voffset_t>(buf_.data(), max_voffset_);
// Write the offsets into the table
for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc);
it < buf_.scratch_end(); it += sizeof(FieldLoc)) {
auto field_location = reinterpret_cast<FieldLoc *>(it);
const voffset_t pos =
static_cast<voffset_t>(vtable_offset_loc - field_location->off);
// If this asserts, it means you've set a field twice.
FLATBUFFERS_ASSERT(
!ReadScalar<voffset_t>(buf_.data() + field_location->id));
WriteScalar<voffset_t>(buf_.data() + field_location->id, pos);
}
ClearOffsets();
auto vt1 = reinterpret_cast<voffset_t *>(buf_.data());
auto vt1_size = ReadScalar<voffset_t>(vt1);
auto vt_use = GetSizeRelative32BitRegion();
// See if we already have generated a vtable with this exact same
// layout before. If so, make it point to the old one, remove this one.
if (dedup_vtables_) {
for (auto it = buf_.scratch_data(); it < buf_.scratch_end();
it += sizeof(uoffset_t)) {
auto vt_offset_ptr = reinterpret_cast<uoffset_t *>(it);
auto vt2 = reinterpret_cast<voffset_t *>(buf_.data_at(*vt_offset_ptr));
auto vt2_size = ReadScalar<voffset_t>(vt2);
if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size)) continue;
vt_use = *vt_offset_ptr;
buf_.pop(GetSizeRelative32BitRegion() - vtable_offset_loc);
break;
}
}
// If this is a new vtable, remember it.
if (vt_use == GetSizeRelative32BitRegion()) {
buf_.scratch_push_small(vt_use);
}
// Fill the vtable offset we created above.
// The offset points from the beginning of the object to where the vtable is
// stored.
// Offsets default direction is downward in memory for future format
// flexibility (storing all vtables at the start of the file).
WriteScalar(buf_.data_at(vtable_offset_loc + length_of_64_bit_region_),
static_cast<soffset_t>(vt_use) -
static_cast<soffset_t>(vtable_offset_loc));
nested = false;
return vtable_offset_loc;
}
FLATBUFFERS_ATTRIBUTE([[deprecated("call the version above instead")]])
uoffset_t EndTable(uoffset_t start, voffset_t /*numfields*/) {
return EndTable(start);
}
// This checks a required field has been set in a given table that has
// just been constructed.
template<typename T> void Required(Offset<T> table, voffset_t field) {
auto table_ptr = reinterpret_cast<const Table *>(buf_.data_at(table.o));
bool ok = table_ptr->GetOptionalFieldOffset(field) != 0;
// If this fails, the caller will show what field needs to be set.
FLATBUFFERS_ASSERT(ok);
(void)ok;
}
uoffset_t StartStruct(size_t alignment) {
Align(alignment);
return GetSizeRelative32BitRegion();
}
uoffset_t EndStruct() { return GetSizeRelative32BitRegion(); }
void ClearOffsets() {
buf_.scratch_pop(num_field_loc * sizeof(FieldLoc));
num_field_loc = 0;
max_voffset_ = 0;
}
// Aligns such that when "len" bytes are written, an object can be written
// after it (forward in the buffer) with "alignment" without padding.
void PreAlign(size_t len, size_t alignment) {
if (len == 0) return;
TrackMinAlign(alignment);
buf_.fill(PaddingBytes(GetSize() + len, alignment));
}
// Aligns such than when "len" bytes are written, an object of type `AlignT`
// can be written after it (forward in the buffer) without padding.
template<typename AlignT> void PreAlign(size_t len) {
AssertScalarT<AlignT>();
PreAlign(len, AlignOf<AlignT>());
}
/// @endcond
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const char pointer to the data to be stored as a string.
/// @param[in] len The number of bytes that should be stored from `str`.
/// @return Returns the offset in the buffer where the string starts.
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(const char *str, size_t len) {
CreateStringImpl(str, len);
return OffsetT<String>(
CalculateOffset<typename OffsetT<String>::offset_type>());
}
/// @brief Store a string in the buffer, which is null-terminated.
/// @param[in] str A const char pointer to a C-string to add to the buffer.
/// @return Returns the offset in the buffer where the string starts.
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(const char *str) {
return CreateString<OffsetT>(str, strlen(str));
}
/// @brief Store a string in the buffer, which is null-terminated.
/// @param[in] str A char pointer to a C-string to add to the buffer.
/// @return Returns the offset in the buffer where the string starts.
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(char *str) {
return CreateString<OffsetT>(str, strlen(str));
}
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const reference to a std::string to store in the buffer.
/// @return Returns the offset in the buffer where the string starts.
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(const std::string &str) {
return CreateString<OffsetT>(str.c_str(), str.length());
}
// clang-format off
#ifdef FLATBUFFERS_HAS_STRING_VIEW
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const string_view to copy in to the buffer.
/// @return Returns the offset in the buffer where the string starts.
template<template <typename> class OffsetT = Offset>
OffsetT<String>CreateString(flatbuffers::string_view str) {
return CreateString<OffsetT>(str.data(), str.size());
}
#endif // FLATBUFFERS_HAS_STRING_VIEW
// clang-format on
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const pointer to a `String` struct to add to the buffer.
/// @return Returns the offset in the buffer where the string starts
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(const String *str) {
return str ? CreateString<OffsetT>(str->c_str(), str->size()) : 0;
}
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const reference to a std::string like type with support
/// of T::c_str() and T::length() to store in the buffer.
/// @return Returns the offset in the buffer where the string starts.
template<template<typename> class OffsetT = Offset,
// No need to explicitly declare the T type, let the compiler deduce
// it.
int &...ExplicitArgumentBarrier, typename T>
OffsetT<String> CreateString(const T &str) {
return CreateString<OffsetT>(str.c_str(), str.length());
}
/// @brief Store a string in the buffer, which can contain any binary data.
/// If a string with this exact contents has already been serialized before,
/// instead simply returns the offset of the existing string. This uses a map
/// stored on the heap, but only stores the numerical offsets.
/// @param[in] str A const char pointer to the data to be stored as a string.
/// @param[in] len The number of bytes that should be stored from `str`.
/// @return Returns the offset in the buffer where the string starts.
Offset<String> CreateSharedString(const char *str, size_t len) {
FLATBUFFERS_ASSERT(FLATBUFFERS_GENERAL_HEAP_ALLOC_OK);
if (!string_pool) {
string_pool = new StringOffsetMap(StringOffsetCompare(buf_));
}
const size_t size_before_string = buf_.size();
// Must first serialize the string, since the set is all offsets into
// buffer.
const Offset<String> off = CreateString<Offset>(str, len);
auto it = string_pool->find(off);
// If it exists we reuse existing serialized data!
if (it != string_pool->end()) {
// We can remove the string we serialized.
buf_.pop(buf_.size() - size_before_string);
return *it;
}
// Record this string for future use.
string_pool->insert(off);
return off;
}
#ifdef FLATBUFFERS_HAS_STRING_VIEW
/// @brief Store a string in the buffer, which can contain any binary data.
/// If a string with this exact contents has already been serialized before,
/// instead simply returns the offset of the existing string. This uses a map
/// stored on the heap, but only stores the numerical offsets.
/// @param[in] str A const std::string_view to store in the buffer.
/// @return Returns the offset in the buffer where the string starts
Offset<String> CreateSharedString(const flatbuffers::string_view str) {
return CreateSharedString(str.data(), str.size());
}
#else
/// @brief Store a string in the buffer, which null-terminated.
/// If a string with this exact contents has already been serialized before,
/// instead simply returns the offset of the existing string. This uses a map
/// stored on the heap, but only stores the numerical offsets.
/// @param[in] str A const char pointer to a C-string to add to the buffer.
/// @return Returns the offset in the buffer where the string starts.
Offset<String> CreateSharedString(const char *str) {
return CreateSharedString(str, strlen(str));
}
/// @brief Store a string in the buffer, which can contain any binary data.
/// If a string with this exact contents has already been serialized before,
/// instead simply returns the offset of the existing string. This uses a map
/// stored on the heap, but only stores the numerical offsets.
/// @param[in] str A const reference to a std::string to store in the buffer.
/// @return Returns the offset in the buffer where the string starts.
Offset<String> CreateSharedString(const std::string &str) {
return CreateSharedString(str.c_str(), str.length());
}
#endif
/// @brief Store a string in the buffer, which can contain any binary data.
/// If a string with this exact contents has already been serialized before,
/// instead simply returns the offset of the existing string. This uses a map
/// stored on the heap, but only stores the numerical offsets.
/// @param[in] str A const pointer to a `String` struct to add to the buffer.
/// @return Returns the offset in the buffer where the string starts
Offset<String> CreateSharedString(const String *str) {
return str ? CreateSharedString(str->c_str(), str->size()) : 0;
}
/// @cond FLATBUFFERS_INTERNAL
template<typename LenT = uoffset_t, typename ReturnT = uoffset_t>
ReturnT EndVector(size_t len) {
FLATBUFFERS_ASSERT(nested); // Hit if no corresponding StartVector.
nested = false;
return PushElement<LenT, ReturnT>(static_cast<LenT>(len));
}
template<template<typename> class OffsetT = Offset, typename LenT = uint32_t>
void StartVector(size_t len, size_t elemsize, size_t alignment) {
NotNested();
nested = true;
// Align to the Length type of the vector (either 32-bit or 64-bit), so
// that the length of the buffer can be added without padding.
PreAlign<LenT>(len * elemsize);
PreAlign(len * elemsize, alignment); // Just in case elemsize > uoffset_t.
}
template<typename T, template<typename> class OffsetT = Offset,
typename LenT = uint32_t>
void StartVector(size_t len) {
return StartVector<OffsetT, LenT>(len, sizeof(T), AlignOf<T>());
}
// Call this right before StartVector/CreateVector if you want to force the
// alignment to be something different than what the element size would
// normally dictate.
// This is useful when storing a nested_flatbuffer in a vector of bytes,
// or when storing SIMD floats, etc.
void ForceVectorAlignment(size_t len, size_t elemsize, size_t alignment) {
if (len == 0) return;
FLATBUFFERS_ASSERT(VerifyAlignmentRequirements(alignment));
PreAlign(len * elemsize, alignment);
}
// Similar to ForceVectorAlignment but for String fields.
void ForceStringAlignment(size_t len, size_t alignment) {
if (len == 0) return;
FLATBUFFERS_ASSERT(VerifyAlignmentRequirements(alignment));
PreAlign((len + 1) * sizeof(char), alignment);
}
/// @endcond
/// @brief Serialize an array into a FlatBuffer `vector`.
/// @tparam T The data type of the array elements.
/// @tparam OffsetT the type of offset to return
/// @tparam VectorT the type of vector to cast to.
/// @param[in] v A pointer to the array of type `T` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @return Returns a typed `TOffset` into the serialized data indicating
/// where the vector is stored.
template<template<typename...> class OffsetT = Offset,
template<typename...> class VectorT = Vector,
int &...ExplicitArgumentBarrier, typename T>
OffsetT<VectorT<T>> CreateVector(const T *v, size_t len) {
// The type of the length field in the vector.
typedef typename VectorT<T>::size_type LenT;
typedef typename OffsetT<VectorT<T>>::offset_type offset_type;
// If this assert hits, you're specifying a template argument that is
// causing the wrong overload to be selected, remove it.
AssertScalarT<T>();
StartVector<T, OffsetT, LenT>(len);
if (len > 0) {
// clang-format off
#if FLATBUFFERS_LITTLEENDIAN
PushBytes(reinterpret_cast<const uint8_t *>(v), len * sizeof(T));
#else
if (sizeof(T) == 1) {
PushBytes(reinterpret_cast<const uint8_t *>(v), len);
} else {
for (auto i = len; i > 0; ) {
PushElement(v[--i]);
}
}
#endif
// clang-format on
}
return OffsetT<VectorT<T>>(EndVector<LenT, offset_type>(len));
}
/// @brief Serialize an array like object into a FlatBuffer `vector`.
/// @tparam T The data type of the array elements.
/// @tparam C The type of the array.
/// @param[in] array A reference to an array like object of type `T` to
/// serialize into the buffer as a `vector`.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, class C> Offset<Vector<T>> CreateVector(const C &array) {
return CreateVector(array.data(), array.size());
}
/// @brief Serialize an initializer list into a FlatBuffer `vector`.
/// @tparam T The data type of the initializer list elements.
/// @param[in] v The value of the initializer list.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T>
Offset<Vector<T>> CreateVector(std::initializer_list<T> v) {
return CreateVector(v.begin(), v.size());
}
template<typename T>
Offset<Vector<Offset<T>>> CreateVector(const Offset<T> *v, size_t len) {
StartVector<Offset<T>>(len);
for (auto i = len; i > 0;) { PushElement(v[--i]); }
return Offset<Vector<Offset<T>>>(EndVector(len));
}
/// @brief Serialize a `std::vector` into a FlatBuffer `vector`.
/// @tparam T The data type of the `std::vector` elements.
/// @param v A const reference to the `std::vector` to serialize into the
/// buffer as a `vector`.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename Alloc = std::allocator<T>>
Offset<Vector<T>> CreateVector(const std::vector<T, Alloc> &v) {
return CreateVector(data(v), v.size());
}
template<template<typename...> class VectorT = Vector64,
int &...ExplicitArgumentBarrier, typename T>
Offset64<VectorT<T>> CreateVector64(const std::vector<T> &v) {
return CreateVector<Offset64, VectorT>(data(v), v.size());
}
// vector<bool> may be implemented using a bit-set, so we can't access it as
// an array. Instead, read elements manually.
// Background: https://isocpp.org/blog/2012/11/on-vectorbool
Offset<Vector<uint8_t>> CreateVector(const std::vector<bool> &v) {
StartVector<uint8_t>(v.size());
for (auto i = v.size(); i > 0;) {
PushElement(static_cast<uint8_t>(v[--i]));
}
return Offset<Vector<uint8_t>>(EndVector(v.size()));
}
/// @brief Serialize values returned by a function into a FlatBuffer `vector`.
/// This is a convenience function that takes care of iteration for you.
/// @tparam T The data type of the `std::vector` elements.
/// @param f A function that takes the current iteration 0..vector_size-1 and
/// returns any type that you can construct a FlatBuffers vector out of.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T>
Offset<Vector<T>> CreateVector(size_t vector_size,
const std::function<T(size_t i)> &f) {
FLATBUFFERS_ASSERT(FLATBUFFERS_GENERAL_HEAP_ALLOC_OK);
std::vector<T> elems(vector_size);
for (size_t i = 0; i < vector_size; i++) elems[i] = f(i);
return CreateVector(elems);
}
/// @brief Serialize values returned by a function into a FlatBuffer `vector`.
/// This is a convenience function that takes care of iteration for you. This
/// uses a vector stored on the heap to store the intermediate results of the
/// iteration.
/// @tparam T The data type of the `std::vector` elements.
/// @param f A function that takes the current iteration 0..vector_size-1,
/// and the state parameter returning any type that you can construct a
/// FlatBuffers vector out of.
/// @param state State passed to f.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename F, typename S>
Offset<Vector<T>> CreateVector(size_t vector_size, F f, S *state) {
FLATBUFFERS_ASSERT(FLATBUFFERS_GENERAL_HEAP_ALLOC_OK);
std::vector<T> elems(vector_size);
for (size_t i = 0; i < vector_size; i++) elems[i] = f(i, state);
return CreateVector(elems);
}
/// @brief Serialize a `std::vector<StringType>` into a FlatBuffer `vector`.
/// whereas StringType is any type that is accepted by the CreateString()
/// overloads.
/// This is a convenience function for a common case.
/// @param v A const reference to the `std::vector` to serialize into the
/// buffer as a `vector`.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename StringType = std::string,
typename Alloc = std::allocator<StringType>>
Offset<Vector<Offset<String>>> CreateVectorOfStrings(
const std::vector<StringType, Alloc> &v) {
return CreateVectorOfStrings(v.cbegin(), v.cend());
}
/// @brief Serialize a collection of Strings into a FlatBuffer `vector`.
/// This is a convenience function for a common case.
/// @param begin The beginning iterator of the collection
/// @param end The ending iterator of the collection
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<class It>
Offset<Vector<Offset<String>>> CreateVectorOfStrings(It begin, It end) {
auto size = std::distance(begin, end);
auto scratch_buffer_usage = size * sizeof(Offset<String>);
// If there is not enough space to store the offsets, there definitely won't
// be enough space to store all the strings. So ensuring space for the
// scratch region is OK, for if it fails, it would have failed later.
buf_.ensure_space(scratch_buffer_usage);
for (auto it = begin; it != end; ++it) {
buf_.scratch_push_small(CreateString(*it));
}
StartVector<Offset<String>>(size);
for (auto i = 1; i <= size; i++) {
// Note we re-evaluate the buf location each iteration to account for any
// underlying buffer resizing that may occur.
PushElement(*reinterpret_cast<Offset<String> *>(
buf_.scratch_end() - i * sizeof(Offset<String>)));
}
buf_.scratch_pop(scratch_buffer_usage);
return Offset<Vector<Offset<String>>>(EndVector(size));
}
/// @brief Serialize an array of structs into a FlatBuffer `vector`.
/// @tparam T The data type of the struct array elements.
/// @param[in] v A pointer to the array of type `T` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, template<typename...> class OffsetT = Offset,
template<typename...> class VectorT = Vector>
OffsetT<VectorT<const T *>> CreateVectorOfStructs(const T *v, size_t len) {
// The type of the length field in the vector.
typedef typename VectorT<T>::size_type LenT;
typedef typename OffsetT<VectorT<const T *>>::offset_type offset_type;
StartVector<OffsetT, LenT>(len * sizeof(T) / AlignOf<T>(), sizeof(T),
AlignOf<T>());
if (len > 0) {
PushBytes(reinterpret_cast<const uint8_t *>(v), sizeof(T) * len);
}
return OffsetT<VectorT<const T *>>(EndVector<LenT, offset_type>(len));
}
/// @brief Serialize an array of structs into a FlatBuffer `vector`.
/// @tparam T The data type of the struct array elements.
/// @param[in] filler A function that takes the current iteration
/// 0..vector_size-1 and a pointer to the struct that must be filled.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
/// This is mostly useful when flatbuffers are generated with mutation
/// accessors.
template<typename T>
Offset<Vector<const T *>> CreateVectorOfStructs(
size_t vector_size, const std::function<void(size_t i, T *)> &filler) {
T *structs = StartVectorOfStructs<T>(vector_size);
for (size_t i = 0; i < vector_size; i++) {
filler(i, structs);
structs++;
}
return EndVectorOfStructs<T>(vector_size);
}
/// @brief Serialize an array of structs into a FlatBuffer `vector`.
/// @tparam T The data type of the struct array elements.
/// @param[in] f A function that takes the current iteration 0..vector_size-1,
/// a pointer to the struct that must be filled and the state argument.
/// @param[in] state Arbitrary state to pass to f.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
/// This is mostly useful when flatbuffers are generated with mutation
/// accessors.
template<typename T, typename F, typename S>
Offset<Vector<const T *>> CreateVectorOfStructs(size_t vector_size, F f,
S *state) {
T *structs = StartVectorOfStructs<T>(vector_size);
for (size_t i = 0; i < vector_size; i++) {
f(i, structs, state);
structs++;
}
return EndVectorOfStructs<T>(vector_size);
}
/// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector`.
/// @tparam T The data type of the `std::vector` struct elements.
/// @param[in] v A const reference to the `std::vector` of structs to
/// serialize into the buffer as a `vector`.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, template<typename...> class OffsetT = Offset,
template<typename...> class VectorT = Vector,
typename Alloc = std::allocator<T>>
OffsetT<VectorT<const T *>> CreateVectorOfStructs(
const std::vector<T, Alloc> &v) {
return CreateVectorOfStructs<T, OffsetT, VectorT>(data(v), v.size());
}
template<template<typename...> class VectorT = Vector64, int &..., typename T>
Offset64<VectorT<const T *>> CreateVectorOfStructs64(
const std::vector<T> &v) {
return CreateVectorOfStructs<T, Offset64, VectorT>(data(v), v.size());
}
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
/// @tparam T The data type of the struct array elements.
/// @tparam S The data type of the native struct array elements.
/// @param[in] v A pointer to the array of type `S` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @param[in] pack_func Pointer to a function to convert the native struct
/// to the FlatBuffer struct.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S>
Offset<Vector<const T *>> CreateVectorOfNativeStructs(
const S *v, size_t len, T (*const pack_func)(const S &)) {
FLATBUFFERS_ASSERT(pack_func);
auto structs = StartVectorOfStructs<T>(len);
for (size_t i = 0; i < len; i++) { structs[i] = pack_func(v[i]); }
return EndVectorOfStructs<T>(len);
}
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
/// @tparam T The data type of the struct array elements.
/// @tparam S The data type of the native struct array elements.
/// @param[in] v A pointer to the array of type `S` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S>
Offset<Vector<const T *>> CreateVectorOfNativeStructs(const S *v,
size_t len) {
extern T Pack(const S &);
return CreateVectorOfNativeStructs(v, len, Pack);
}
/// @brief Serialize a `std::vector` of native structs into a FlatBuffer
/// `vector`.
/// @tparam T The data type of the `std::vector` struct elements.
/// @tparam S The data type of the `std::vector` native struct elements.
/// @param[in] v A const reference to the `std::vector` of structs to
/// serialize into the buffer as a `vector`.
/// @param[in] pack_func Pointer to a function to convert the native struct
/// to the FlatBuffer struct.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S, typename Alloc = std::allocator<T>>
Offset<Vector<const T *>> CreateVectorOfNativeStructs(
const std::vector<S, Alloc> &v, T (*const pack_func)(const S &)) {
return CreateVectorOfNativeStructs<T, S>(data(v), v.size(), pack_func);
}
/// @brief Serialize a `std::vector` of native structs into a FlatBuffer
/// `vector`.
/// @tparam T The data type of the `std::vector` struct elements.
/// @tparam S The data type of the `std::vector` native struct elements.
/// @param[in] v A const reference to the `std::vector` of structs to
/// serialize into the buffer as a `vector`.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S, typename Alloc = std::allocator<S>>
Offset<Vector<const T *>> CreateVectorOfNativeStructs(
const std::vector<S, Alloc> &v) {
return CreateVectorOfNativeStructs<T, S>(data(v), v.size());
}
/// @cond FLATBUFFERS_INTERNAL
template<typename T> struct StructKeyComparator {
bool operator()(const T &a, const T &b) const {
return a.KeyCompareLessThan(&b);
}
};
/// @endcond
/// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector`
/// in sorted order.
/// @tparam T The data type of the `std::vector` struct elements.
/// @param[in] v A const reference to the `std::vector` of structs to
/// serialize into the buffer as a `vector`.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename Alloc = std::allocator<T>>
Offset<Vector<const T *>> CreateVectorOfSortedStructs(
std::vector<T, Alloc> *v) {
return CreateVectorOfSortedStructs(data(*v), v->size());
}
/// @brief Serialize a `std::vector` of native structs into a FlatBuffer
/// `vector` in sorted order.
/// @tparam T The data type of the `std::vector` struct elements.
/// @tparam S The data type of the `std::vector` native struct elements.
/// @param[in] v A const reference to the `std::vector` of structs to
/// serialize into the buffer as a `vector`.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S, typename Alloc = std::allocator<T>>
Offset<Vector<const T *>> CreateVectorOfSortedNativeStructs(
std::vector<S, Alloc> *v) {
return CreateVectorOfSortedNativeStructs<T, S>(data(*v), v->size());
}
/// @brief Serialize an array of structs into a FlatBuffer `vector` in sorted
/// order.
/// @tparam T The data type of the struct array elements.
/// @param[in] v A pointer to the array of type `T` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T>
Offset<Vector<const T *>> CreateVectorOfSortedStructs(T *v, size_t len) {
std::stable_sort(v, v + len, StructKeyComparator<T>());
return CreateVectorOfStructs(v, len);
}
/// @brief Serialize an array of native structs into a FlatBuffer `vector` in
/// sorted order.
/// @tparam T The data type of the struct array elements.
/// @tparam S The data type of the native struct array elements.
/// @param[in] v A pointer to the array of type `S` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S>
Offset<Vector<const T *>> CreateVectorOfSortedNativeStructs(S *v,
size_t len) {
extern T Pack(const S &);
auto structs = StartVectorOfStructs<T>(len);
for (size_t i = 0; i < len; i++) { structs[i] = Pack(v[i]); }
std::stable_sort(structs, structs + len, StructKeyComparator<T>());
return EndVectorOfStructs<T>(len);
}
/// @cond FLATBUFFERS_INTERNAL
template<typename T> struct TableKeyComparator {
explicit TableKeyComparator(vector_downward<SizeT> &buf) : buf_(buf) {}
TableKeyComparator(const TableKeyComparator &other) : buf_(other.buf_) {}
bool operator()(const Offset<T> &a, const Offset<T> &b) const {
auto table_a = reinterpret_cast<T *>(buf_.data_at(a.o));
auto table_b = reinterpret_cast<T *>(buf_.data_at(b.o));
return table_a->KeyCompareLessThan(table_b);
}
vector_downward<SizeT> &buf_;
private:
FLATBUFFERS_DELETE_FUNC(
TableKeyComparator &operator=(const TableKeyComparator &other));
};
/// @endcond
/// @brief Serialize an array of `table` offsets as a `vector` in the buffer
/// in sorted order.
/// @tparam T The data type that the offset refers to.
/// @param[in] v An array of type `Offset<T>` that contains the `table`
/// offsets to store in the buffer in sorted order.
/// @param[in] len The number of elements to store in the `vector`.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T>
Offset<Vector<Offset<T>>> CreateVectorOfSortedTables(Offset<T> *v,
size_t len) {
std::stable_sort(v, v + len, TableKeyComparator<T>(buf_));
return CreateVector(v, len);
}
/// @brief Serialize an array of `table` offsets as a `vector` in the buffer
/// in sorted order.
/// @tparam T The data type that the offset refers to.
/// @param[in] v An array of type `Offset<T>` that contains the `table`
/// offsets to store in the buffer in sorted order.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename Alloc = std::allocator<T>>
Offset<Vector<Offset<T>>> CreateVectorOfSortedTables(
std::vector<Offset<T>, Alloc> *v) {
return CreateVectorOfSortedTables(data(*v), v->size());
}
/// @brief Specialized version of `CreateVector` for non-copying use cases.
/// Write the data any time later to the returned buffer pointer `buf`.
/// @param[in] len The number of elements to store in the `vector`.
/// @param[in] elemsize The size of each element in the `vector`.
/// @param[out] buf A pointer to a `uint8_t` pointer that can be
/// written to at a later time to serialize the data into a `vector`
/// in the buffer.
uoffset_t CreateUninitializedVector(size_t len, size_t elemsize,
size_t alignment, uint8_t **buf) {
NotNested();
StartVector(len, elemsize, alignment);
buf_.make_space(len * elemsize);
const uoffset_t vec_start = GetSizeRelative32BitRegion();
auto vec_end = EndVector(len);
*buf = buf_.data_at(vec_start);
return vec_end;
}
FLATBUFFERS_ATTRIBUTE([[deprecated("call the version above instead")]])
uoffset_t CreateUninitializedVector(size_t len, size_t elemsize,
uint8_t **buf) {
return CreateUninitializedVector(len, elemsize, elemsize, buf);
}
/// @brief Specialized version of `CreateVector` for non-copying use cases.
/// Write the data any time later to the returned buffer pointer `buf`.
/// @tparam T The data type of the data that will be stored in the buffer
/// as a `vector`.
/// @param[in] len The number of elements to store in the `vector`.
/// @param[out] buf A pointer to a pointer of type `T` that can be
/// written to at a later time to serialize the data into a `vector`
/// in the buffer.
template<typename T>
Offset<Vector<T>> CreateUninitializedVector(size_t len, T **buf) {
AssertScalarT<T>();
return CreateUninitializedVector(len, sizeof(T), AlignOf<T>(),
reinterpret_cast<uint8_t **>(buf));
}
template<typename T>
Offset<Vector<const T *>> CreateUninitializedVectorOfStructs(size_t len,
T **buf) {
return CreateUninitializedVector(len, sizeof(T), AlignOf<T>(),
reinterpret_cast<uint8_t **>(buf));
}
// @brief Create a vector of scalar type T given as input a vector of scalar
// type U, useful with e.g. pre "enum class" enums, or any existing scalar
// data of the wrong type.
template<typename T, typename U>
Offset<Vector<T>> CreateVectorScalarCast(const U *v, size_t len) {
AssertScalarT<T>();
AssertScalarT<U>();
StartVector<T>(len);
for (auto i = len; i > 0;) { PushElement(static_cast<T>(v[--i])); }
return Offset<Vector<T>>(EndVector(len));
}
/// @brief Write a struct by itself, typically to be part of a union.
template<typename T> Offset<const T *> CreateStruct(const T &structobj) {
NotNested();
Align(AlignOf<T>());
buf_.push_small(structobj);
return Offset<const T *>(
CalculateOffset<typename Offset<const T *>::offset_type>());
}
/// @brief Finish serializing a buffer by writing the root offset.
/// @param[in] file_identifier If a `file_identifier` is given, the buffer
/// will be prefixed with a standard FlatBuffers file header.
template<typename T>
void Finish(Offset<T> root, const char *file_identifier = nullptr) {
Finish(root.o, file_identifier, false);
}
/// @brief Finish a buffer with a 32 bit size field pre-fixed (size of the
/// buffer following the size field). These buffers are NOT compatible
/// with standard buffers created by Finish, i.e. you can't call GetRoot
/// on them, you have to use GetSizePrefixedRoot instead.
/// All >32 bit quantities in this buffer will be aligned when the whole
/// size pre-fixed buffer is aligned.
/// These kinds of buffers are useful for creating a stream of FlatBuffers.
template<typename T>
void FinishSizePrefixed(Offset<T> root,
const char *file_identifier = nullptr) {
Finish(root.o, file_identifier, true);
}
void SwapBufAllocator(FlatBufferBuilderImpl &other) {
buf_.swap_allocator(other.buf_);
}
/// @brief The length of a FlatBuffer file header.
static const size_t kFileIdentifierLength =
::flatbuffers::kFileIdentifierLength;
protected:
// You shouldn't really be copying instances of this class.
FlatBufferBuilderImpl(const FlatBufferBuilderImpl &);
FlatBufferBuilderImpl &operator=(const FlatBufferBuilderImpl &);
void Finish(uoffset_t root, const char *file_identifier, bool size_prefix) {
NotNested();
buf_.clear_scratch();
const size_t prefix_size = size_prefix ? sizeof(SizeT) : 0;
// Make sure we track the alignment of the size prefix.
TrackMinAlign(prefix_size);
const size_t root_offset_size = sizeof(uoffset_t);
const size_t file_id_size = file_identifier ? kFileIdentifierLength : 0;
// This will cause the whole buffer to be aligned.
PreAlign(prefix_size + root_offset_size + file_id_size, minalign_);
if (file_identifier) {
FLATBUFFERS_ASSERT(strlen(file_identifier) == kFileIdentifierLength);
PushBytes(reinterpret_cast<const uint8_t *>(file_identifier),
kFileIdentifierLength);
}
PushElement(ReferTo(root)); // Location of root.
if (size_prefix) { PushElement(GetSize()); }
finished = true;
}
struct FieldLoc {
uoffset_t off;
voffset_t id;
};
vector_downward<SizeT> buf_;
// Accumulating offsets of table members while it is being built.
// We store these in the scratch pad of buf_, after the vtable offsets.
uoffset_t num_field_loc;
// Track how much of the vtable is in use, so we can output the most compact
// possible vtable.
voffset_t max_voffset_;
// This is the length of the 64-bit region of the buffer. The buffer supports
// 64-bit offsets by forcing serialization of those elements in the "tail"
// region of the buffer (i.e. "64-bit region"). To properly keep track of
// offsets that are referenced from the tail of the buffer to not overflow
// their size (e.g. Offset is a uint32_t type), the boundary of the 32-/64-bit
// regions must be tracked.
//
// [ Complete FlatBuffer ]
// [32-bit region][64-bit region]
// ^ ^
// | Tail of the buffer.
// |
// Tail of the 32-bit region of the buffer.
//
// This keeps track of the size of the 64-bit region so that the tail of the
// 32-bit region can be calculated as `GetSize() - length_of_64_bit_region_`.
//
// This will remain 0 if no 64-bit offset types are added to the buffer.
size_t length_of_64_bit_region_;
// When true, 64-bit offsets can still be added to the builder. When false,
// only 32-bit offsets can be added, and attempts to add a 64-bit offset will
// raise an assertion. This is typically a compile-time error in ordering the
// serialization of 64-bit offset fields not at the tail of the buffer.
// Ensure objects are not nested.
bool nested;
// Ensure the buffer is finished before it is being accessed.
bool finished;
size_t minalign_;
bool force_defaults_; // Serialize values equal to their defaults anyway.
bool dedup_vtables_;
struct StringOffsetCompare {
explicit StringOffsetCompare(const vector_downward<SizeT> &buf)
: buf_(&buf) {}
bool operator()(const Offset<String> &a, const Offset<String> &b) const {
auto stra = reinterpret_cast<const String *>(buf_->data_at(a.o));
auto strb = reinterpret_cast<const String *>(buf_->data_at(b.o));
return StringLessThan(stra->data(), stra->size(), strb->data(),
strb->size());
}
const vector_downward<SizeT> *buf_;
};
// For use with CreateSharedString. Instantiated on first use only.
typedef std::set<Offset<String>, StringOffsetCompare> StringOffsetMap;
StringOffsetMap *string_pool;
private:
void CanAddOffset64() {
// If you hit this assertion, you are attempting to add a 64-bit offset to
// a 32-bit only builder. This is because the builder has overloads that
// differ only on the offset size returned: e.g.:
//
// FlatBufferBuilder builder;
// Offset64<String> string_offset = builder.CreateString<Offset64>();
//
// Either use a 64-bit aware builder, or don't try to create an Offset64
// return type.
//
// TODO(derekbailey): we can probably do more enable_if to avoid this
// looking like its possible to the user.
static_assert(Is64Aware, "cannot add 64-bit offset to a 32-bit builder");
// If you hit this assertion, you are attempting to add an 64-bit offset
// item after already serializing a 32-bit item. All 64-bit offsets have to
// added to the tail of the buffer before any 32-bit items can be added.
// Otherwise some items might not be addressable due to the maximum range of
// the 32-bit offset.
FLATBUFFERS_ASSERT(GetSize() == length_of_64_bit_region_);
}
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const char pointer to the data to be stored as a string.
/// @param[in] len The number of bytes that should be stored from `str`.
/// @return Returns the offset in the buffer where the string starts.
void CreateStringImpl(const char *str, size_t len) {
NotNested();
PreAlign<uoffset_t>(len + 1); // Always 0-terminated.
buf_.fill(1);
PushBytes(reinterpret_cast<const uint8_t *>(str), len);
PushElement(static_cast<uoffset_t>(len));
}
// Allocates space for a vector of structures.
// Must be completed with EndVectorOfStructs().
template<typename T, template<typename> class OffsetT = Offset>
T *StartVectorOfStructs(size_t vector_size) {
StartVector<OffsetT>(vector_size * sizeof(T) / AlignOf<T>(), sizeof(T),
AlignOf<T>());
return reinterpret_cast<T *>(buf_.make_space(vector_size * sizeof(T)));
}
// End the vector of structures in the flatbuffers.
// Vector should have previously be started with StartVectorOfStructs().
template<typename T, template<typename> class OffsetT = Offset>
OffsetT<Vector<const T *>> EndVectorOfStructs(size_t vector_size) {
return OffsetT<Vector<const T *>>(
EndVector<typename Vector<const T *>::size_type,
typename OffsetT<Vector<const T *>>::offset_type>(
vector_size));
}
template<typename T>
typename std::enable_if<std::is_same<T, uoffset_t>::value, T>::type
CalculateOffset() {
// Default to the end of the 32-bit region. This may or may not be the end
// of the buffer, depending on if any 64-bit offsets have been added.
return GetSizeRelative32BitRegion();
}
// Specializations to handle the 64-bit CalculateOffset, which is relative to
// end of the buffer.
template<typename T>
typename std::enable_if<std::is_same<T, uoffset64_t>::value, T>::type
CalculateOffset() {
// This should never be compiled in when not using a 64-bit builder.
static_assert(Is64Aware, "invalid 64-bit offset in 32-bit builder");
// Store how big the 64-bit region of the buffer is, so we can determine
// where the 32/64 bit boundary is.
length_of_64_bit_region_ = GetSize();
return length_of_64_bit_region_;
}
};
/// @}
// Hack to `FlatBufferBuilder` mean `FlatBufferBuilder<false>` or
// `FlatBufferBuilder<>`, where the template < > syntax is required.
typedef FlatBufferBuilderImpl<false> FlatBufferBuilder;
typedef FlatBufferBuilderImpl<true> FlatBufferBuilder64;
// These are external due to GCC not allowing them in the class.
// See: https://stackoverflow.com/q/8061456/868247
template<>
template<>
inline Offset64<String> FlatBufferBuilder64::CreateString(const char *str,
size_t len) {
CanAddOffset64();
CreateStringImpl(str, len);
return Offset64<String>(
CalculateOffset<typename Offset64<String>::offset_type>());
}
// Used to distinguish from real Offsets.
template<typename T = void> struct EmptyOffset {};
// TODO(derekbailey): it would be nice to combine these two methods.
template<>
template<>
inline void FlatBufferBuilder64::StartVector<Offset64, uint32_t>(
size_t len, size_t elemsize, size_t alignment) {
CanAddOffset64();
StartVector<EmptyOffset, uint32_t>(len, elemsize, alignment);
}
template<>
template<>
inline void FlatBufferBuilder64::StartVector<Offset64, uint64_t>(
size_t len, size_t elemsize, size_t alignment) {
CanAddOffset64();
StartVector<EmptyOffset, uint64_t>(len, elemsize, alignment);
}
/// Helpers to get a typed pointer to objects that are currently being built.
/// @warning Creating new objects will lead to reallocations and invalidates
/// the pointer!
template<typename T>
T *GetMutableTemporaryPointer(FlatBufferBuilder &fbb, Offset<T> offset) {
return reinterpret_cast<T *>(fbb.GetCurrentBufferPointer() + fbb.GetSize() -
offset.o);
}
template<typename T>
const T *GetTemporaryPointer(FlatBufferBuilder &fbb, Offset<T> offset) {
return GetMutableTemporaryPointer<T>(fbb, offset);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_FLATBUFFER_BUILDER_H_
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment