LCOV - code coverage report
Current view: top level - lib - lwan-coro.c (source / functions) Hit Total Coverage
Test: coverage.info.cleaned Lines: 140 164 85.4 %
Date: 2023-04-18 16:19:03 Functions: 28 31 90.3 %

          Line data    Source code
       1             : /*
       2             :  * lwan - web server
       3             :  * Copyright (c) 2012 L. A. F. Pereira <l@tia.mat.br>
       4             :  *
       5             :  * This program is free software; you can redistribute it and/or
       6             :  * modify it under the terms of the GNU General Public License
       7             :  * as published by the Free Software Foundation; either version 2
       8             :  * of the License, or any later version.
       9             :  *
      10             :  * This program is distributed in the hope that it will be useful,
      11             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      12             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      13             :  * GNU General Public License for more details.
      14             :  *
      15             :  * You should have received a copy of the GNU General Public License
      16             :  * along with this program; if not, write to the Free Software
      17             :  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
      18             :  * USA.
      19             :  */
      20             : 
      21             : #define _GNU_SOURCE
      22             : #include <assert.h>
      23             : #include <limits.h>
      24             : #include <signal.h>
      25             : #include <stdarg.h>
      26             : #include <stdio.h>
      27             : #include <stdlib.h>
      28             : #include <string.h>
      29             : #include <sys/mman.h>
      30             : 
      31             : #include "lwan-private.h"
      32             : 
      33             : #include "lwan-array.h"
      34             : #include "lwan-coro.h"
      35             : 
      36             : #if !defined(NDEBUG) && defined(LWAN_HAVE_VALGRIND)
      37             : #define INSTRUMENT_FOR_VALGRIND
      38             : #include <valgrind.h>
      39             : #include <memcheck.h>
      40             : #endif
      41             : 
      42             : #if defined(__clang__)
      43             : # if defined(__has_feature) && __has_feature(address_sanitizer)
      44             : #  define __SANITIZE_ADDRESS__
      45             : # endif
      46             : #endif
      47             : #if defined(__SANITIZE_ADDRESS__)
      48             : #define INSTRUMENT_FOR_ASAN
      49             : void __asan_poison_memory_region(void const volatile *addr, size_t size);
      50             : void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
      51             : #endif
      52             : 
      53             : #if !defined(SIGSTKSZ)
      54             : #define SIGSTKSZ 16384
      55             : #endif
      56             : 
      57             : #ifdef LWAN_HAVE_BROTLI
      58             : #define CORO_STACK_SIZE ((size_t)(8 * SIGSTKSZ))
      59             : #else
      60             : #define CORO_STACK_SIZE ((size_t)(4 * SIGSTKSZ))
      61             : #endif
      62             : 
      63             : #define CORO_BUMP_PTR_ALLOC_SIZE 1024
      64             : 
      65             : #if (!defined(NDEBUG) && defined(MAP_STACK)) || defined(__OpenBSD__)
      66             : /* As an exploit mitigation, OpenBSD requires any stacks to be allocated via
      67             :  * mmap(...  MAP_STACK ...).
      68             :  *
      69             :  * Also enable this on debug builds to catch stack overflows while testing
      70             :  * (MAP_STACK exists in Linux, but it's a no-op).  */
      71             : #define ALLOCATE_STACK_WITH_MMAP
      72             : #endif
      73             : 
      74             : #ifndef NDEBUG
      75          92 : __attribute__((constructor)) static void assert_sizes_are_sane(void)
      76             : {
      77             :     /* This is done in runtime rather than during compilation time because
      78             :      * in Glibc >= 2.34, SIGSTKSZ is defined as sysconf(_SC_MINSIGSTKSZ). */
      79             : 
      80             :     /* Request buffer fits inside coroutine stack */
      81          92 :     assert(DEFAULT_BUFFER_SIZE < CORO_STACK_SIZE);
      82             : #ifdef ALLOCATE_STACK_WITH_MMAP
      83             :     /* Coroutine stack size is a multiple of page size */
      84          92 :     assert((CORO_STACK_SIZE % PAGE_SIZE) == 0);
      85             :     /* Coroutine stack size is at least a page long */
      86          92 :     assert((CORO_STACK_SIZE >= PAGE_SIZE));
      87             : #endif
      88          92 : }
      89             : #endif
      90             : 
      91             : typedef void (*defer1_func)(void *data);
      92             : typedef void (*defer2_func)(void *data1, void *data2);
      93             : 
      94             : struct coro_defer {
      95             :     union {
      96             :         struct {
      97             :             defer1_func func;
      98             :             void *data;
      99             :         } one;
     100             :         struct {
     101             :             defer2_func func;
     102             :             void *data1;
     103             :             void *data2;
     104             :         } two;
     105             :     };
     106             :     bool has_two_args;
     107             : };
     108             : 
     109        2601 : DEFINE_ARRAY_TYPE_INLINEFIRST(coro_defer_array, struct coro_defer)
     110             : 
     111             : struct coro {
     112             :     struct coro_switcher *switcher;
     113             :     coro_context context;
     114             :     struct coro_defer_array defer;
     115             : 
     116             :     int64_t yield_value;
     117             : 
     118             :     struct {
     119             :         /* This allocator is instrumented on debug builds using asan and/or valgrind, if
     120             :          * enabled during configuration time.  See coro_malloc_bump_ptr() for details. */
     121             :         void *ptr;
     122             :         size_t remaining;
     123             :     } bump_ptr_alloc;
     124             : 
     125             : #if defined(INSTRUMENT_FOR_VALGRIND)
     126             :     unsigned int vg_stack_id;
     127             : #endif
     128             : 
     129             : #if defined(ALLOCATE_STACK_WITH_MMAP)
     130             :     unsigned char *stack;
     131             : #else
     132             :     unsigned char stack[];
     133             : #endif
     134             : };
     135             : 
     136             : #if defined(__APPLE__)
     137             : #define ASM_SYMBOL(name_) "_" #name_
     138             : #else
     139             : #define ASM_SYMBOL(name_) #name_
     140             : #endif
     141             : 
     142             : #define ASM_ROUTINE(name_)                                                     \
     143             :     ".globl " ASM_SYMBOL(name_) "\n\t" ASM_SYMBOL(name_) ":\n\t"
     144             : 
     145             : /*
     146             :  * This swapcontext() implementation was obtained from glibc and modified
     147             :  * slightly to not save/restore the floating point registers, unneeded
     148             :  * registers, and signal mask.  It is Copyright (C) 2001, 2002, 2003 Free
     149             :  * Software Foundation, Inc and is distributed under GNU LGPL version 2.1
     150             :  * (or later).  I'm not sure if I can distribute them inside a GPL program;
     151             :  * they're straightforward so I'm assuming there won't be any problem; if
     152             :  * there is, I'll just roll my own.
     153             :  *     -- L.
     154             :  */
     155             : #if defined(__x86_64__)
     156             : void __attribute__((noinline, visibility("internal")))
     157             : coro_swapcontext(coro_context *current, coro_context *other);
     158             : asm(".text\n\t"
     159             :     ".p2align 5\n\t"
     160             :     ASM_ROUTINE(coro_swapcontext)
     161             :     "movq   %rbx,0(%rdi)\n\t"
     162             :     "movq   %rbp,8(%rdi)\n\t"
     163             :     "movq   %r12,16(%rdi)\n\t"
     164             :     "movq   %r13,24(%rdi)\n\t"
     165             :     "movq   %r14,32(%rdi)\n\t"
     166             :     "movq   %r15,40(%rdi)\n\t"
     167             :     "movq   %rdi,48(%rdi)\n\t"
     168             :     "movq   %rsi,56(%rdi)\n\t"
     169             :     "movq   (%rsp),%rcx\n\t"
     170             :     "movq   %rcx,64(%rdi)\n\t"
     171             :     "leaq   0x8(%rsp),%rcx\n\t"
     172             :     "movq   %rcx,72(%rdi)\n\t"
     173             :     "movq   72(%rsi),%rsp\n\t"
     174             :     "movq   0(%rsi),%rbx\n\t"
     175             :     "movq   8(%rsi),%rbp\n\t"
     176             :     "movq   16(%rsi),%r12\n\t"
     177             :     "movq   24(%rsi),%r13\n\t"
     178             :     "movq   32(%rsi),%r14\n\t"
     179             :     "movq   40(%rsi),%r15\n\t"
     180             :     "movq   48(%rsi),%rdi\n\t"
     181             :     "movq   64(%rsi),%rcx\n\t"
     182             :     "movq   56(%rsi),%rsi\n\t"
     183             :     "jmpq   *%rcx\n\t");
     184             : #elif defined(LWAN_HAVE_LIBUCONTEXT)
     185             : #define coro_swapcontext(cur, oth) libucontext_swapcontext(cur, oth)
     186             : #else
     187             : #error Unsupported platform.
     188             : #endif
     189             : 
     190             : __attribute__((used, visibility("internal")))
     191             : void
     192         337 : coro_entry_point(struct coro *coro, coro_function_t func, void *data)
     193             : {
     194         337 :     return (void)coro_yield(coro, func(coro, data));
     195             : }
     196             : 
     197             : #ifdef __x86_64__
     198             : /* See comment in coro_reset() for an explanation of why this routine is
     199             :  * necessary. */
     200             : void __attribute__((visibility("internal"))) coro_entry_point_x86_64();
     201             : 
     202             : asm(".text\n\t"
     203             :     ".p2align 5\n\t"
     204             :     ASM_ROUTINE(coro_entry_point_x86_64)
     205             :     "mov %r15, %rdx\n\t"
     206             :     "jmp " ASM_SYMBOL(coro_entry_point) "\n\t"
     207             : );
     208             : #endif
     209             : 
     210        1083 : void coro_deferred_run(struct coro *coro, size_t generation)
     211             : {
     212        1083 :     struct lwan_array *array = (struct lwan_array *)&coro->defer;
     213        1083 :     struct coro_defer *defers = array->base;
     214             : 
     215        1828 :     for (size_t i = array->elements; i != generation; i--) {
     216         745 :         struct coro_defer *defer = &defers[i - 1];
     217             : 
     218         745 :         if (defer->has_two_args)
     219         176 :             defer->two.func(defer->two.data1, defer->two.data2);
     220             :         else
     221         569 :             defer->one.func(defer->one.data);
     222             :     }
     223             : 
     224        1083 :     array->elements = generation;
     225        1083 : }
     226             : 
     227         332 : ALWAYS_INLINE size_t coro_deferred_get_generation(const struct coro *coro)
     228             : {
     229         332 :     const struct lwan_array *array = (struct lwan_array *)&coro->defer;
     230             : 
     231         332 :     return array->elements;
     232             : }
     233             : 
     234         337 : void coro_reset(struct coro *coro, coro_function_t func, void *data)
     235             : {
     236         337 :     unsigned char *stack = coro->stack;
     237             : 
     238         337 :     coro_deferred_run(coro, 0);
     239         337 :     coro_defer_array_reset(&coro->defer);
     240         337 :     coro->bump_ptr_alloc.remaining = 0;
     241             : 
     242             : #if defined(__x86_64__)
     243             :     /* coro_entry_point() for x86-64 has 3 arguments, but RDX isn't
     244             :      * stored.  Use R15 instead, and implement the trampoline
     245             :      * function in assembly in order to use this register when
     246             :      * calling the user function. */
     247         337 :     coro->context[5 /* R15 */] = (uintptr_t)data;
     248         337 :     coro->context[6 /* RDI */] = (uintptr_t)coro;
     249         337 :     coro->context[7 /* RSI */] = (uintptr_t)func;
     250         337 :     coro->context[8 /* RIP */] = (uintptr_t)coro_entry_point_x86_64;
     251             : 
     252             :     /* Ensure stack is properly aligned: it should be aligned to a
     253             :      * 16-bytes boundary so SSE will work properly, but should be
     254             :      * aligned on an 8-byte boundary right after calling a function. */
     255         337 :     uintptr_t rsp = (uintptr_t)stack + CORO_STACK_SIZE;
     256             : 
     257             : #define STACK_PTR 9
     258         337 :     coro->context[STACK_PTR] = (rsp & ~0xful) - 0x8ul;
     259             : #elif defined(LWAN_HAVE_LIBUCONTEXT)
     260             :     libucontext_getcontext(&coro->context);
     261             : 
     262             :     coro->context.uc_stack.ss_sp = stack;
     263             :     coro->context.uc_stack.ss_size = CORO_STACK_SIZE;
     264             :     coro->context.uc_stack.ss_flags = 0;
     265             :     coro->context.uc_link = NULL;
     266             : 
     267             :     libucontext_makecontext(&coro->context, (void (*)())coro_entry_point, 3,
     268             :                             coro, func, data);
     269             : 
     270             : #endif
     271         337 : }
     272             : 
     273             : ALWAYS_INLINE struct coro *
     274         337 : coro_new(struct coro_switcher *switcher, coro_function_t function, void *data)
     275             : {
     276             :     struct coro *coro;
     277             : 
     278             : #if defined(ALLOCATE_STACK_WITH_MMAP)
     279         337 :     void *stack = mmap(NULL, CORO_STACK_SIZE, PROT_READ | PROT_WRITE,
     280             :                        MAP_STACK | MAP_ANON | MAP_PRIVATE, -1, 0);
     281         337 :     if (UNLIKELY(stack == MAP_FAILED))
     282           0 :         return NULL;
     283             : 
     284         337 :     coro = lwan_aligned_alloc(sizeof(*coro), 64);
     285         337 :     if (UNLIKELY(!coro)) {
     286           0 :         munmap(stack, CORO_STACK_SIZE);
     287           0 :         return NULL;
     288             :     }
     289             : 
     290         337 :     coro->stack = stack;
     291             : #else
     292             :     coro = lwan_aligned_alloc(sizeof(struct coro) + CORO_STACK_SIZE, 64);
     293             : 
     294             :     if (UNLIKELY(!coro))
     295             :         return NULL;
     296             : #endif
     297             : 
     298         337 :     coro_defer_array_init(&coro->defer);
     299             : 
     300         337 :     coro->switcher = switcher;
     301         337 :     coro_reset(coro, function, data);
     302             : 
     303             : #if defined(INSTRUMENT_FOR_VALGRIND)
     304         337 :     coro->vg_stack_id = VALGRIND_STACK_REGISTER(
     305             :         coro->stack, (char *)coro->stack + CORO_STACK_SIZE);
     306             : #endif
     307             : 
     308         337 :     return coro;
     309             : }
     310             : 
     311         377 : ALWAYS_INLINE int64_t coro_resume(struct coro *coro)
     312             : {
     313         377 :     assert(coro);
     314             : 
     315             : #if defined(STACK_PTR)
     316         405 :     assert(coro->context[STACK_PTR] >= (uintptr_t)coro->stack &&
     317             :            coro->context[STACK_PTR] <=
     318             :                (uintptr_t)(coro->stack + CORO_STACK_SIZE));
     319             : #endif
     320             : 
     321         405 :     coro_swapcontext(&coro->switcher->caller, &coro->context);
     322             : 
     323         310 :     return coro->yield_value;
     324             : }
     325             : 
     326          28 : ALWAYS_INLINE int64_t coro_resume_value(struct coro *coro, int64_t value)
     327             : {
     328          28 :     assert(coro);
     329             : 
     330          28 :     coro->yield_value = value;
     331          28 :     return coro_resume(coro);
     332             : }
     333             : 
     334         310 : inline int64_t coro_yield(struct coro *coro, int64_t value)
     335             : {
     336         310 :     assert(coro);
     337             : 
     338         310 :     coro->yield_value = value;
     339         310 :     coro_swapcontext(&coro->context, &coro->switcher->caller);
     340             : 
     341          68 :     return coro->yield_value;
     342             : }
     343             : 
     344         242 : void coro_free(struct coro *coro)
     345             : {
     346         242 :     assert(coro);
     347             : 
     348         242 :     coro_deferred_run(coro, 0);
     349         242 :     coro_defer_array_reset(&coro->defer);
     350             : 
     351             : #if defined(INSTRUMENT_FOR_VALGRIND)
     352         242 :     VALGRIND_STACK_DEREGISTER(coro->vg_stack_id);
     353             : #endif
     354             : 
     355             : #if defined(ALLOCATE_STACK_WITH_MMAP)
     356         242 :     int result = munmap(coro->stack, CORO_STACK_SIZE);
     357         242 :     assert(result == 0);  /* only fails if addr, len are invalid */
     358             : #endif
     359             : 
     360         242 :     free(coro);
     361         242 : }
     362             : 
     363           0 : static void disarmed_defer(void *data __attribute__((unused)))
     364             : {
     365           0 : }
     366             : 
     367           1 : static void coro_defer_disarm_internal(struct coro *coro,
     368             :                                        struct coro_defer *defer)
     369             : {
     370           1 :     const size_t num_defers = coro_defer_array_len(&coro->defer);
     371             : 
     372           1 :     assert(num_defers != 0 && defer != NULL);
     373             : 
     374           1 :     if (defer == coro_defer_array_get_elem(&coro->defer, num_defers - 1)) {
     375             :         /* If we're disarming the last defer we armed, there's no need to waste
     376             :          * space of a deferred callback to an empty function like
     377             :          * disarmed_defer(). */
     378           1 :         struct lwan_array *defer_base = (struct lwan_array *)&coro->defer;
     379           1 :         defer_base->elements--;
     380             :     } else {
     381           0 :         defer->one.func = disarmed_defer;
     382           0 :         defer->has_two_args = false;
     383             :     }
     384           1 : }
     385             : 
     386           0 : void coro_defer_disarm(struct coro *coro, coro_deferred d)
     387             : {
     388           0 :     assert(d >= 0);
     389             : 
     390           0 :     return coro_defer_disarm_internal(
     391           0 :         coro, coro_defer_array_get_elem(&coro->defer, (size_t)d));
     392             : }
     393             : 
     394           1 : void coro_defer_fire_and_disarm(struct coro *coro, coro_deferred d)
     395             : {
     396           1 :     assert(d >= 0);
     397             : 
     398           1 :     struct coro_defer *defer = coro_defer_array_get_elem(&coro->defer, (size_t)d);
     399           1 :     assert(coro);
     400             : 
     401           1 :     if (defer->has_two_args)
     402           1 :         defer->two.func(defer->two.data1, defer->two.data2);
     403             :     else
     404           0 :         defer->one.func(defer->one.data);
     405             : 
     406           1 :     return coro_defer_disarm_internal(coro, defer);
     407             : }
     408             : 
     409             : ALWAYS_INLINE coro_deferred
     410         657 : coro_defer(struct coro *coro, defer1_func func, void *data)
     411             : {
     412         661 :     struct coro_defer *defer = coro_defer_array_append(&coro->defer);
     413             : 
     414         664 :     if (UNLIKELY(!defer)) {
     415           0 :         lwan_status_error("Could not add new deferred function for coro %p",
     416             :                           coro);
     417           0 :         return -1;
     418             :     }
     419             : 
     420         664 :     defer->one.func = func;
     421         664 :     defer->one.data = data;
     422         664 :     defer->has_two_args = false;
     423             : 
     424         664 :     return (coro_deferred)coro_defer_array_get_elem_index(&coro->defer, defer);
     425             : }
     426             : 
     427             : ALWAYS_INLINE coro_deferred
     428          66 : coro_defer2(struct coro *coro, defer2_func func, void *data1, void *data2)
     429             : {
     430          66 :     struct coro_defer *defer = coro_defer_array_append(&coro->defer);
     431             : 
     432         177 :     if (UNLIKELY(!defer)) {
     433           0 :         lwan_status_error("Could not add new deferred function for coro %p",
     434             :                           coro);
     435           0 :         return -1;
     436             :     }
     437             : 
     438         177 :     defer->two.func = func;
     439         177 :     defer->two.data1 = data1;
     440         177 :     defer->two.data2 = data2;
     441         177 :     defer->has_two_args = true;
     442             : 
     443         177 :     return (coro_deferred)coro_defer_array_get_elem_index(&coro->defer, defer);
     444             : }
     445             : 
     446           4 : void *coro_malloc_full(struct coro *coro,
     447             :                        size_t size,
     448             :                        void (*destroy_func)(void *data))
     449             : {
     450           4 :     void *ptr = malloc(size);
     451           4 :     if (LIKELY(ptr))
     452             :         coro_defer(coro, destroy_func, ptr);
     453             : 
     454           4 :     return ptr;
     455             : }
     456             : 
     457             : #if defined(INSTRUMENT_FOR_VALGRIND) || defined(INSTRUMENT_FOR_ASAN)
     458          71 : static void instrument_bpa_free(void *ptr, void *size)
     459             : {
     460             : #if defined(INSTRUMENT_FOR_VALGRIND)
     461          71 :     VALGRIND_MAKE_MEM_NOACCESS(ptr, (size_t)(uintptr_t)size);
     462             : #endif
     463             : 
     464             : #if defined(INSTRUMENT_FOR_ASAN)
     465             :     __asan_poison_memory_region(ptr, (size_t)(uintptr_t)size);
     466             : #endif
     467          71 : }
     468             : #endif
     469             : 
     470             : #if defined(INSTRUMENT_FOR_ASAN) || defined(INSTRUMENT_FOR_VALGRIND)
     471          71 : static inline void *coro_malloc_bump_ptr(struct coro *coro,
     472             :                                          size_t aligned_size,
     473             :                                          size_t requested_size)
     474             : #else
     475             : static inline void *coro_malloc_bump_ptr(struct coro *coro, size_t aligned_size)
     476             : #endif
     477             : {
     478          71 :     void *ptr = coro->bump_ptr_alloc.ptr;
     479             : 
     480          71 :     coro->bump_ptr_alloc.remaining -= aligned_size;
     481          71 :     coro->bump_ptr_alloc.ptr = (char *)ptr + aligned_size;
     482             : 
     483             :     /* This instrumentation is desirable to find buffer overflows, but it's not
     484             :      * cheap. Enable it only in debug builds (for Valgrind) or when using
     485             :      * address sanitizer (always the case when fuzz-testing on OSS-Fuzz). See:
     486             :      * https://blog.fuzzing-project.org/65-When-your-Memory-Allocator-hides-Security-Bugs.html
     487             :      */
     488             : 
     489             : #if defined(INSTRUMENT_FOR_VALGRIND)
     490          71 :     VALGRIND_MAKE_MEM_UNDEFINED(ptr, requested_size);
     491             : #endif
     492             : #if defined(INSTRUMENT_FOR_ASAN)
     493             :     __asan_unpoison_memory_region(ptr, requested_size);
     494             : #endif
     495             : #if defined(INSTRUMENT_FOR_VALGRIND) || defined(INSTRUMENT_FOR_ASAN)
     496          71 :     coro_defer2(coro, instrument_bpa_free, ptr,
     497             :                 (void *)(uintptr_t)requested_size);
     498             : #endif
     499             : 
     500          71 :     return ptr;
     501             : }
     502             : 
     503             : #if defined(INSTRUMENT_FOR_ASAN) || defined(INSTRUMENT_FOR_VALGRIND)
     504             : #define CORO_MALLOC_BUMP_PTR(coro_, aligned_size_, requested_size_)            \
     505             :     coro_malloc_bump_ptr(coro_, aligned_size_, requested_size_)
     506             : #else
     507             : #define CORO_MALLOC_BUMP_PTR(coro_, aligned_size_, requested_size_)            \
     508             :     coro_malloc_bump_ptr(coro_, aligned_size_)
     509             : #endif
     510             : 
     511          40 : static void free_bump_ptr(void *arg1, void *arg2)
     512             : {
     513          40 :     struct coro *coro = arg1;
     514             : 
     515             : #if defined(INSTRUMENT_FOR_VALGRIND)
     516          40 :     VALGRIND_MAKE_MEM_UNDEFINED(arg2, CORO_BUMP_PTR_ALLOC_SIZE);
     517             : #endif
     518             : #if defined(INSTRUMENT_FOR_ASAN)
     519             :     __asan_unpoison_memory_region(arg2, CORO_BUMP_PTR_ALLOC_SIZE);
     520             : #endif
     521             : 
     522             :     /* Instead of checking if bump_ptr_alloc.ptr is part of the allocation
     523             :      * with base in arg2, just zero out the arena for this coroutine to
     524             :      * prevent coro_malloc() from carving up this and any other
     525             :      * (potentially) freed arenas.  */
     526          40 :     coro->bump_ptr_alloc.remaining = 0;
     527             : 
     528          40 :     return free(arg2);
     529             : }
     530             : 
     531          73 : void *coro_malloc(struct coro *coro, size_t size)
     532             : {
     533             :     /* The bump pointer allocator can't be in the generic coro_malloc_full()
     534             :      * since destroy_funcs are supposed to free the memory. In this function, we
     535             :      * guarantee that the destroy_func is free(), so that if an allocation goes
     536             :      * through the bump pointer allocator, there's nothing that needs to be done
     537             :      * to free the memory (other than freeing the whole bump pointer arena with
     538             :      * the defer call below).  */
     539             : 
     540          73 :     const size_t aligned_size =
     541          73 :         (size + sizeof(void *) - 1ul) & ~(sizeof(void *) - 1ul);
     542             : 
     543          73 :     if (LIKELY(coro->bump_ptr_alloc.remaining >= aligned_size))
     544          31 :         return CORO_MALLOC_BUMP_PTR(coro, aligned_size, size);
     545             : 
     546             :     /* This will allocate as many "bump pointer arenas" as necessary; the
     547             :      * old ones will be freed automatically as each allocations coro_defers
     548             :      * the free() call.   Just don't bother allocating an arena larger than
     549             :      * CORO_BUMP_PTR_ALLOC.  */
     550          42 :     if (LIKELY(aligned_size <= CORO_BUMP_PTR_ALLOC_SIZE)) {
     551          40 :         coro->bump_ptr_alloc.ptr = malloc(CORO_BUMP_PTR_ALLOC_SIZE);
     552          40 :         if (UNLIKELY(!coro->bump_ptr_alloc.ptr))
     553           0 :             return NULL;
     554             : 
     555          40 :         coro->bump_ptr_alloc.remaining = CORO_BUMP_PTR_ALLOC_SIZE;
     556             : 
     557             : #if defined(INSTRUMENT_FOR_ASAN)
     558             :         __asan_poison_memory_region(coro->bump_ptr_alloc.ptr,
     559             :                                     CORO_BUMP_PTR_ALLOC_SIZE);
     560             : #endif
     561             : #if defined(INSTRUMENT_FOR_VALGRIND)
     562          40 :         VALGRIND_MAKE_MEM_NOACCESS(coro->bump_ptr_alloc.ptr,
     563             :                                    CORO_BUMP_PTR_ALLOC_SIZE);
     564             : #endif
     565             : 
     566          40 :         coro_defer2(coro, free_bump_ptr, coro, coro->bump_ptr_alloc.ptr);
     567             : 
     568          40 :         return CORO_MALLOC_BUMP_PTR(coro, aligned_size, size);
     569             :     }
     570             : 
     571           2 :     return coro_malloc_full(coro, size, free);
     572             : }
     573             : 
     574           0 : char *coro_strndup(struct coro *coro, const char *str, size_t max_len)
     575             : {
     576           0 :     const size_t len = strnlen(str, max_len) + 1;
     577           0 :     char *dup = coro_memdup(coro, str, len);
     578             : 
     579           0 :     if (LIKELY(dup))
     580           0 :         dup[len - 1] = '\0';
     581             : 
     582           0 :     return dup;
     583             : }
     584             : 
     585          36 : char *coro_strdup(struct coro *coro, const char *str)
     586             : {
     587          36 :     return coro_memdup(coro, str, strlen(str) + 1);
     588             : }
     589             : 
     590           3 : char *coro_printf(struct coro *coro, const char *fmt, ...)
     591             : {
     592             :     va_list values;
     593             :     int len;
     594             :     char *tmp_str;
     595             : 
     596           3 :     va_start(values, fmt);
     597           3 :     len = vasprintf(&tmp_str, fmt, values);
     598           3 :     va_end(values);
     599             : 
     600           3 :     if (UNLIKELY(len < 0))
     601           0 :         return NULL;
     602             : 
     603           3 :     coro_defer(coro, free, tmp_str);
     604           3 :     return tmp_str;
     605             : }
     606             : 
     607          51 : void *coro_memdup(struct coro *coro, const void *src, size_t len)
     608             : {
     609          51 :     void *ptr = coro_malloc(coro, len);
     610             : 
     611          51 :     return LIKELY(ptr) ? memcpy(ptr, src, len) : NULL;
     612             : }

Generated by: LCOV version 1.15-2-gb9d6727