12 #include "upb/port_def.inc"
14 /* Guarantee null-termination and provide ellipsis truncation.
15 * It may be tempting to "optimize" this by initializing these final
16 * four bytes up-front and then being careful never to overwrite them,
17 * this is safer and simpler. */
18 static void nullz(upb_status *status) {
19 const char *ellipsis = "...";
20 size_t len = strlen(ellipsis);
21 UPB_ASSERT(sizeof(status->msg) > len);
22 memcpy(status->msg + sizeof(status->msg) - len, ellipsis, len);
25 /* upb_status *****************************************************************/
27 void upb_status_clear(upb_status *status) {
30 status->msg[0] = '\0';
33 bool upb_ok(const upb_status *status) { return status->ok; }
35 const char *upb_status_errmsg(const upb_status *status) { return status->msg; }
37 void upb_status_seterrmsg(upb_status *status, const char *msg) {
40 strncpy(status->msg, msg, sizeof(status->msg));
44 void upb_status_seterrf(upb_status *status, const char *fmt, ...) {
47 upb_status_vseterrf(status, fmt, args);
51 void upb_status_vseterrf(upb_status *status, const char *fmt, va_list args) {
54 _upb_vsnprintf(status->msg, sizeof(status->msg), fmt, args);
58 /* upb_alloc ******************************************************************/
60 static void *upb_global_allocfunc(upb_alloc *alloc, void *ptr, size_t oldsize,
68 return realloc(ptr, size);
72 upb_alloc upb_alloc_global = {&upb_global_allocfunc};
74 /* upb_arena ******************************************************************/
76 /* Be conservative and choose 16 in case anyone is using SSE. */
77 static const size_t maxalign = 16;
79 static size_t align_up_max(size_t size) {
80 return ((size + maxalign - 1) / maxalign) * maxalign;
84 /* We implement the allocator interface.
85 * This must be the first member of upb_arena! */
88 /* Allocator to allocate arena blocks. We are responsible for freeing these
89 * when we are destroyed. */
90 upb_alloc *block_alloc;
92 size_t bytes_allocated;
93 size_t next_block_size;
94 size_t max_block_size;
96 /* Linked list of blocks. Points to an arena_block, defined in env.c */
99 /* Cleanup entries. Pointer to a cleanup_ent, defined in env.c */
103 typedef struct mem_block {
104 struct mem_block *next;
111 typedef struct cleanup_ent {
112 struct cleanup_ent *next;
113 upb_cleanup_func *cleanup;
117 static void upb_arena_addblock(upb_arena *a, void *ptr, size_t size,
119 mem_block *block = ptr;
121 block->next = a->block_head;
123 block->used = align_up_max(sizeof(mem_block));
124 block->owned = owned;
126 a->block_head = block;
128 /* TODO(haberman): ASAN poison. */
131 static mem_block *upb_arena_allocblock(upb_arena *a, size_t size) {
132 size_t block_size = UPB_MAX(size, a->next_block_size) + sizeof(mem_block);
133 mem_block *block = upb_malloc(a->block_alloc, block_size);
139 upb_arena_addblock(a, block, block_size, true);
140 a->next_block_size = UPB_MIN(block_size * 2, a->max_block_size);
145 static void *upb_arena_doalloc(upb_alloc *alloc, void *ptr, size_t oldsize,
147 upb_arena *a = (upb_arena*)alloc; /* upb_alloc is initial member. */
148 mem_block *block = a->block_head;
152 return NULL; /* We are an arena, don't need individual frees. */
155 size = align_up_max(size);
157 /* TODO(haberman): special-case if this is a realloc of the last alloc? */
159 if (!block || block->size - block->used < size) {
160 /* Slow path: have to allocate a new block. */
161 block = upb_arena_allocblock(a, size);
164 return NULL; /* Out of memory. */
168 ret = (char*)block + block->used;
172 memcpy(ret, ptr, oldsize); /* Preserve existing data. */
175 /* TODO(haberman): ASAN unpoison. */
177 a->bytes_allocated += size;
181 /* Public Arena API ***********************************************************/
183 #define upb_alignof(type) offsetof (struct { char c; type member; }, member)
185 upb_arena *upb_arena_init(void *mem, size_t n, upb_alloc *alloc) {
186 const size_t first_block_overhead = sizeof(upb_arena) + sizeof(mem_block);
190 /* Round block size down to alignof(*a) since we will allocate the arena
191 * itself at the end. */
192 n &= ~(upb_alignof(upb_arena) - 1);
194 if (n < first_block_overhead) {
195 /* We need to malloc the initial block. */
196 n = first_block_overhead + 256;
198 if (!alloc || !(mem = upb_malloc(alloc, n))) {
203 a = (void*)((char*)mem + n - sizeof(*a));
206 a->alloc.func = &upb_arena_doalloc;
207 a->block_alloc = &upb_alloc_global;
208 a->bytes_allocated = 0;
209 a->next_block_size = 256;
210 a->max_block_size = 16384;
211 a->cleanup_head = NULL;
212 a->block_head = NULL;
213 a->block_alloc = alloc;
215 upb_arena_addblock(a, mem, n, owned);
222 void upb_arena_free(upb_arena *a) {
223 cleanup_ent *ent = a->cleanup_head;
224 mem_block *block = a->block_head;
227 ent->cleanup(ent->ud);
231 /* Must do this after running cleanup functions, because this will delete
232 * the memory we store our cleanup entries in! */
234 /* Load first since we are deleting block. */
235 mem_block *next = block->next;
238 upb_free(a->block_alloc, block);
245 bool upb_arena_addcleanup(upb_arena *a, void *ud, upb_cleanup_func *func) {
246 cleanup_ent *ent = upb_malloc(&a->alloc, sizeof(cleanup_ent));
248 return false; /* Out of memory. */
253 ent->next = a->cleanup_head;
254 a->cleanup_head = ent;
259 size_t upb_arena_bytesallocated(const upb_arena *a) {
260 return a->bytes_allocated;