/*
* Codegenerator for C, building FlatBuffers.
*
* There are several approaches, some light, some requiring a library,
* some with vectored I/O etc.
*
* Here we focus on a reasonable balance of light code and efficiency.
*
* Builder code is generated to a separate file that includes the
* generated read-only code.
*
* Mutable buffers are not supported in this version.
*
*/
#include <stdlib.h>
#include <string.h>
#include "flatcc/flatcc_builder.h"
#include "flatcc/flatcc_emitter.h"
/*
* `check` is designed to handle incorrect use errors that can be
* ignored in production of a tested product.
*
* `check_error` fails if condition is false and is designed to return an
* error code in production.
*/
#if FLATCC_BUILDER_ASSERT_ON_ERROR
#define check(cond, reason) FLATCC_BUILDER_ASSERT(cond, reason)
#else
#define check(cond, reason) ((void)0)
#endif
#if FLATCC_BUILDER_SKIP_CHECKS
#define check_error(cond, err, reason) ((void)0)
#else
#define check_error(cond, err, reason) if (!(cond)) { check(cond, reason); return err; }
#endif
/* `strnlen` not widely supported. */
static inline size_t pstrnlen(const char *s, size_t max_len)
{
const char *end = memchr(s, 0, max_len);
return end ? (size_t)(end - s) : max_len;
}
#undef strnlen
#define strnlen pstrnlen
/* Padding can be up to 255 zeroes, and 1 zero string termination byte.
* When two paddings are combined at nested buffers, we need twice that.
* Visible to emitter so it can test for zero padding in iov. */
const uint8_t flatcc_builder_padding_base[512] = { 0 };
#define _pad flatcc_builder_padding_base
#define uoffset_t flatbuffers_uoffset_t
#define soffset_t flatbuffers_soffset_t
#define voffset_t flatbuffers_voffset_t
#define utype_t flatbuffers_utype_t
#define write_uoffset __flatbuffers_uoffset_write_to_pe
#define write_voffset __flatbuffers_voffset_write_to_pe
#define write_identifier __flatbuffers_uoffset_write_to_pe
#define write_utype __flatbuffers_utype_write_to_pe
#define field_size sizeof(uoffset_t)
#define max_offset_count FLATBUFFERS_COUNT_MAX(field_size)
#define union_size sizeof(flatcc_builder_union_ref_t)
#define max_union_count FLATBUFFERS_COUNT_MAX(union_size)
#define utype_size sizeof(utype_t)
#define max_utype_count FLATBUFFERS_COUNT_MAX(utype_size)
#define max_string_len FLATBUFFERS_COUNT_MAX(1)
#define identifier_size FLATBUFFERS_IDENTIFIER_SIZE
#define iovec_t flatcc_iovec_t
#define frame_size sizeof(__flatcc_builder_frame_t)
#define frame(x) (B->frame[0].x)
/* `align` must be a power of 2. */
static inline uoffset_t alignup_uoffset(uoffset_t x, size_t align)
{
return (x + (uoffset_t)align - 1u) & ~((uoffset_t)align - 1u);
}
static inline size_t alignup_size(size_t x, size_t align)
{
return (x + align - 1u) & ~(align - 1u);
}
typedef struct vtable_descriptor vtable_descriptor_t;
struct vtable_descriptor {
/* Where the vtable is emitted. */
flatcc_builder_ref_t vt_ref;
/* Which buffer it was emitted to. */
uoffset_t nest_id;
/* Where the vtable is cached. */
uoffset_t vb_start;
/* Hash table collision chain. */
uoffset_t next;
};
typedef struct flatcc_iov_state flatcc_iov_state_t;
struct flatcc_iov_state {
size_t len;
int count;
flatcc_iovec_t iov[FLATCC_IOV_COUNT_MAX];
};
#define iov_state_t flatcc_iov_state_t
/* This assumes `iov_state_t iov;` has been declared in scope */
#define push_iov_cond(base, size, cond) if ((size) > 0 && (cond)) { iov.len += size;\
iov.iov[iov.count].iov_base = (void *)(base); iov.iov[iov.count].iov_len = (size); ++iov.count; }
#define push_iov(base, size) push_iov_cond(base, size, 1)
#define init_iov() { iov.len = 0; iov.count = 0; }
int flatcc_builder_default_alloc(void *alloc_context, iovec_t *b, size_t request, int zero_fill, int hint)
{
void *p;
size_t n;
(void)alloc_context;
if (request == 0) {
if (b->iov_base) {
FLATCC_BUILDER_FREE(b->iov_base);
b->iov_base = 0;
b->iov_len = 0;
}
return 0;
}
switch (hint) {
case flatcc_builder_alloc_ds:
n = 256;
break;
case flatcc_builder_alloc_ht:
/* Should be exact size, or space size is just wasted. */
n = request;
break;
case flatcc_builder_alloc_fs:
n = sizeof(__flatcc_builder_frame_t) * 8;
break;
case flatcc_builder_alloc_us:
n = 64;
break;
default:
/*
* We have many small structures - vs stack for tables with few
* elements, and few offset fields in patch log. No need to
* overallocate in case of busy small messages.
*/
n = 32;
break;
}
while (n < request) {
n *= 2;
}
if (request <= b->iov_len && b->iov_len / 2 >= n) {
/* Add hysteresis to shrink. */
return 0;
}
if (!(p = FLATCC_BUILDER_REALLOC(b->iov_base, n))) {
return -1;
}
/* Realloc might also shrink. */
if (zero_fill && b->iov_len < n) {
memset((uint8_t *)p + b->iov_len, 0, n - b->iov_len);
}
b->iov_base = p;
b->iov_len = n;
return 0;
}
#define T_ptr(base, pos) ((void *)((size_t)(base) + (size_t)(pos)))
#define ds_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_ds].iov_base, (pos)))
#define vs_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vs].iov_base, (pos)))
#define pl_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_pl].iov_base, (pos)))
#define us_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_us].iov_base, (pos)))
#define vd_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vd].iov_base, (pos)))
#define vb_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vb].iov_base, (pos)))
#define vs_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_vs].iov_base))
#define pl_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_pl].iov_base))
#define us_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_us].iov_base))
#define table_limit (FLATBUFFERS_VOFFSET_MAX - field_size + 1)
#define data_limit (FLATBUFFERS_UOFFSET_MAX - field_size + 1)
#define set_identifier(id) memcpy(&B->identifier, (id) ? (void *)(id) : (void *)_pad, identifier_size)
/* Must also return true when no buffer has been started. */
#define is_top_buffer(B) (B->nest_id == 0)
/*
* Tables use a stack represention better suited for quickly adding
* fields to tables, but it must occasionally be refreshed following
* reallocation or reentry from child frame.
*/
static inline void refresh_ds(flatcc_builder_t *B, uoffset_t type_limit)
{
iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
B->ds = ds_ptr(B->ds_first);
B->ds_limit = (uoffset_t)buf->iov_len - B->ds_first;
/*
* So we don't allocate outside tables representation size, nor our
* current buffer size.
*/
if (B->ds_limit > type_limit) {
B->ds_limit = type_limit;
}
/* So exit frame can refresh fast. */
frame(type_limit) = type_limit;
}
static int reserve_ds(flatcc_builder_t *B, size_t need, uoffset_t limit)
{
iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
if (B->alloc(B->alloc_context, buf, B->ds_first + need, 1, flatcc_builder_alloc_ds)) {
return -1;
}
refresh_ds(B, limit);
return 0;
}
/*
* Make sure there is always an extra zero termination on stack
* even if it isn't emitted such that string updates may count
* on zero termination being present always.
*/
static inline void *push_ds(flatcc_builder_t *B, uoffset_t size)
{
size_t offset;
offset = B->ds_offset;
if ((B->ds_offset += size) >= B->ds_limit) {
if (reserve_ds(B, B->ds_offset + 1, data_limit)) {
return 0;
}
}
return B->ds + offset;
}
static inline void unpush_ds(flatcc_builder_t *B, uoffset_t size)
{
B->ds_offset -= size;
memset(B->ds + B->ds_offset, 0, size);
}
static inline void *push_ds_copy(flatcc_builder_t *B, const void *data, uoffset_t size)
{
void *p;
if (!(p = push_ds(B, size))) {
return 0;
}
memcpy(p, data, size);
return p;
}
static inline void *push_ds_field(flatcc_builder_t *B, uoffset_t size, uint16_t align, voffset_t id)
{
uoffset_t offset;
/*
* We calculate table field alignment relative to first entry, not
* header field with vtable offset.
*
* Note: >= comparison handles special case where B->ds is not
* allocated yet and size is 0 so the return value would be mistaken
* for an error.
*/
offset = alignup_uoffset(B->ds_offset, align);
if ((B->ds_offset = offset + size) >= B->ds_limit) {
if (reserve_ds(B, B->ds_offset + 1, table_limit)) {
return 0;
}
}
B->vs[id] = (voffset_t)(offset + field_size);
if (id >= B->id_end) {
B->id_end = id + 1u;
}
return B->ds + offset;
}
static inline void *push_ds_offset_field(flatcc_builder_t *B, voffset_t id)
{
uoffset_t offset;
offset = alignup_uoffset(B->ds_offset, field_size);
if ((B->ds_offset = offset + field_size) > B->ds_limit) {
if (reserve_ds(B, B->ds_offset, table_limit)) {
return 0;
}
}
B->vs[id] = (voffset_t)(offset + field_size);
if (id >= B->id_end) {
B->id_end = id + 1u;
}
*B->pl++ = (flatbuffers_voffset_t)offset;
return B->ds + offset;
}
static inline void *reserve_buffer(flatcc_builder_t *B, int alloc_type, size_t used, size_t need, int zero_init)
{
iovec_t *buf = B->buffers + alloc_type;
if (used + need > buf->iov_len) {
if (B->alloc(B->alloc_context, buf, used + need, zero_init, alloc_type)) {
check(0, "memory allocation failed");
return 0;
}
}
return (void *)((size_t)buf->iov_base + used);
}
static inline int reserve_fields(flatcc_builder_t *B, int count)
{
size_t used, need;
/* Provide faster stack operations for common table operations. */
used = frame(container.table.vs_end) + frame(container.table.id_end) * sizeof(voffset_t);
need = (size_t)(count + 2) * sizeof(voffset_t);
if (!(B->vs = reserve_buffer(B, flatcc_builder_alloc_vs, used, need, 1))) {
return -1;
}
/* Move past header for convenience. */
B->vs += 2;
used = frame(container.table.pl_end);
/* Add one to handle special case of first table being empty. */
need = (size_t)count * sizeof(*(B->pl)) + 1;
if (!(B->pl = reserve_buffer(B, flatcc_builder_alloc_pl, used, need, 0))) {
return -1;
}
return 0;
}
static int alloc_ht(flatcc_builder_t *B)
{
Loading ...