LCOV - code coverage report
Current view: top level - lib - lwan-cache.c (source / functions) Hit Total Coverage
Test: coverage.info.cleaned Lines: 111 168 66.1 %
Date: 2023-04-18 16:19:03 Functions: 9 12 75.0 %

          Line data    Source code
       1             : /*
       2             :  * lwan - web server
       3             :  * Copyright (c) 2013 L. A. F. Pereira <l@tia.mat.br>
       4             :  *
       5             :  * This program is free software; you can redistribute it and/or
       6             :  * modify it under the terms of the GNU General Public License
       7             :  * as published by the Free Software Foundation; either version 2
       8             :  * of the License, or any later version.
       9             :  *
      10             :  * This program is distributed in the hope that it will be useful,
      11             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      12             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      13             :  * GNU General Public License for more details.
      14             :  *
      15             :  * You should have received a copy of the GNU General Public License
      16             :  * along with this program; if not, write to the Free Software
      17             :  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
      18             :  */
      19             : 
      20             : #include <assert.h>
      21             : #include <errno.h>
      22             : #include <pthread.h>
      23             : #include <stdbool.h>
      24             : #include <stdlib.h>
      25             : #include <string.h>
      26             : #include <time.h>
      27             : 
      28             : #include "lwan-private.h"
      29             : 
      30             : #include "lwan-cache.h"
      31             : #include "hash.h"
      32             : 
      33             : #define GET_AND_REF_TRIES 5
      34             : 
      35             : enum {
      36             :     /* Entry flags */
      37             :     FLOATING = 1 << 0,
      38             :     TEMPORARY = 1 << 1,
      39             :     FREE_KEY_ON_DESTROY = 1 << 2,
      40             : 
      41             :     /* Cache flags */
      42             :     SHUTTING_DOWN = 1 << 0
      43             : };
      44             : 
      45             : struct cache {
      46             :     struct {
      47             :         struct hash *table;
      48             :         pthread_rwlock_t lock;
      49             :     } hash;
      50             : 
      51             :     struct {
      52             :         struct list_head list;
      53             :         pthread_rwlock_t lock;
      54             :     } queue;
      55             : 
      56             :     struct {
      57             :         cache_create_entry_cb create_entry;
      58             :         cache_destroy_entry_cb destroy_entry;
      59             :         void *context;
      60             :     } cb;
      61             : 
      62             :     /* FIXME: I'm not really a fan of how these are set in cache_create(),
      63             :      *        but this has to do for now. */
      64             :     struct {
      65             :         void *(*copy)(const void *);
      66             :         void (*free)(void *);
      67             :     } key;
      68             : 
      69             :     struct {
      70             :         time_t time_to_live;
      71             :     } settings;
      72             : 
      73             :     unsigned flags;
      74             : 
      75             : #ifndef NDEBUG
      76             :     struct {
      77             :         unsigned hits;
      78             :         unsigned misses;
      79             :         unsigned evicted;
      80             :     } stats;
      81             : #endif
      82             : };
      83             : 
      84             : static bool cache_pruner_job(void *data);
      85             : 
      86           0 : static ALWAYS_INLINE void *identity_key_copy(const void *key)
      87             : {
      88           0 :     return (void *)key;
      89             : }
      90             : 
      91           0 : static void identity_key_free(void *key)
      92             : {
      93           0 : }
      94             : 
      95          46 : static ALWAYS_INLINE void *str_key_copy(const void *key)
      96             : {
      97          46 :     return strdup(key);
      98             : }
      99             : 
     100           5 : static ALWAYS_INLINE void str_key_free(void *key)
     101             : {
     102           5 :     free(key);
     103           5 : }
     104             : 
     105         192 : struct cache *cache_create_full(cache_create_entry_cb create_entry_cb,
     106             :                                 cache_destroy_entry_cb destroy_entry_cb,
     107             :                                 hash_create_func_cb hash_create_func,
     108             :                                 void *cb_context,
     109             :                                 time_t time_to_live)
     110             : {
     111             :     struct cache *cache;
     112             : 
     113         192 :     assert(create_entry_cb);
     114         192 :     assert(destroy_entry_cb);
     115         192 :     assert(time_to_live > 0);
     116             : 
     117         192 :     cache = calloc(1, sizeof(*cache));
     118         192 :     if (!cache)
     119           0 :         return NULL;
     120             : 
     121         192 :     if (hash_create_func == hash_str_new) {
     122         187 :         cache->hash.table = hash_create_func(free, NULL);
     123             :     } else {
     124           5 :         cache->hash.table = hash_create_func(NULL, NULL);
     125             :     }
     126         192 :     if (!cache->hash.table)
     127           0 :         goto error_no_hash;
     128             : 
     129         192 :     if (pthread_rwlock_init(&cache->hash.lock, NULL))
     130           0 :         goto error_no_hash_lock;
     131         192 :     if (pthread_rwlock_init(&cache->queue.lock, NULL))
     132           0 :         goto error_no_queue_lock;
     133             : 
     134         192 :     cache->cb.create_entry = create_entry_cb;
     135         192 :     cache->cb.destroy_entry = destroy_entry_cb;
     136         192 :     cache->cb.context = cb_context;
     137             : 
     138         192 :     if (hash_create_func == hash_str_new) {
     139         187 :         cache->key.copy = str_key_copy;
     140         187 :         cache->key.free = str_key_free;
     141             :     } else {
     142           5 :         cache->key.copy = identity_key_copy;
     143           5 :         cache->key.free = identity_key_free;
     144             :     }
     145             : 
     146         192 :     cache->settings.time_to_live = time_to_live;
     147             : 
     148         192 :     list_head_init(&cache->queue.list);
     149             : 
     150         192 :     lwan_job_add(cache_pruner_job, cache);
     151             : 
     152         192 :     return cache;
     153             : 
     154           0 : error_no_queue_lock:
     155           0 :     pthread_rwlock_destroy(&cache->hash.lock);
     156           0 : error_no_hash_lock:
     157           0 :     hash_free(cache->hash.table);
     158           0 : error_no_hash:
     159           0 :     free(cache);
     160             : 
     161           0 :     return NULL;
     162             : }
     163             : 
     164         187 : struct cache *cache_create(cache_create_entry_cb create_entry_cb,
     165             :                            cache_destroy_entry_cb destroy_entry_cb,
     166             :                            void *cb_context,
     167             :                            time_t time_to_live)
     168             : {
     169         187 :     return cache_create_full(create_entry_cb, destroy_entry_cb, hash_str_new,
     170             :                              cb_context, time_to_live);
     171             : }
     172             : 
     173           0 : void cache_destroy(struct cache *cache)
     174             : {
     175           0 :     assert(cache);
     176             : 
     177             : #ifndef NDEBUG
     178           0 :     lwan_status_debug("Cache stats: %d hits, %d misses, %d evictions",
     179             :                       cache->stats.hits, cache->stats.misses,
     180             :                       cache->stats.evicted);
     181             : #endif
     182             : 
     183           0 :     lwan_job_del(cache_pruner_job, cache);
     184           0 :     cache->flags |= SHUTTING_DOWN;
     185           0 :     cache_pruner_job(cache);
     186           0 :     pthread_rwlock_destroy(&cache->hash.lock);
     187           0 :     pthread_rwlock_destroy(&cache->queue.lock);
     188           0 :     hash_free(cache->hash.table);
     189           0 :     free(cache);
     190           0 : }
     191             : 
     192          62 : struct cache_entry *cache_get_and_ref_entry(struct cache *cache,
     193             :                                             const void *key, int *error)
     194             : {
     195             :     struct cache_entry *entry;
     196             :     char *key_copy;
     197             : 
     198          62 :     assert(cache);
     199          62 :     assert(error);
     200          62 :     assert(key);
     201             : 
     202          62 :     *error = 0;
     203             : 
     204             :     /* If the lock can't be obtained, return an error to allow, for instance,
     205             :      * yielding from the coroutine and trying to obtain the lock at a later
     206             :      * time. */
     207          62 :     if (UNLIKELY(pthread_rwlock_tryrdlock(&cache->hash.lock) == EBUSY)) {
     208           0 :         *error = EWOULDBLOCK;
     209           0 :         return NULL;
     210             :     }
     211          62 :     entry = hash_find(cache->hash.table, key);
     212          62 :     if (LIKELY(entry)) {
     213          16 :         ATOMIC_INC(entry->refs);
     214          16 :         pthread_rwlock_unlock(&cache->hash.lock);
     215             : #ifndef NDEBUG
     216          16 :         ATOMIC_INC(cache->stats.hits);
     217             : #endif
     218          16 :         return entry;
     219             :     }
     220             : 
     221             :     /* No need to keep the hash table lock locked while the item is being created. */
     222          46 :     pthread_rwlock_unlock(&cache->hash.lock);
     223             : 
     224             : #ifndef NDEBUG
     225          46 :     ATOMIC_INC(cache->stats.misses);
     226             : #endif
     227             : 
     228          46 :     key_copy = cache->key.copy(key);
     229          46 :     if (UNLIKELY(!key_copy)) {
     230           0 :         if (cache->key.copy != identity_key_copy) {
     231           0 :             *error = ENOMEM;
     232           0 :             return NULL;
     233             :         }
     234             :     }
     235             : 
     236          46 :     entry = cache->cb.create_entry(key, cache->cb.context);
     237          46 :     if (UNLIKELY(!entry)) {
     238           5 :         *error = ECANCELED;
     239           5 :         cache->key.free(key_copy);
     240           5 :         return NULL;
     241             :     }
     242             : 
     243          41 :     *entry = (struct cache_entry) { .key =  key_copy, .refs = 1 };
     244             : 
     245          41 :     if (pthread_rwlock_trywrlock(&cache->hash.lock) == EBUSY) {
     246             :         /* Couldn't obtain hash write lock: instead of waiting, just return
     247             :          * the recently-created item as a temporary item.  Might result in
     248             :          * items not being added to the cache, though, so this might be
     249             :          * changed back to pthread_rwlock_wrlock() again someday if this
     250             :          * proves to be a problem.  */
     251           0 :         entry->flags = TEMPORARY | FREE_KEY_ON_DESTROY;
     252           0 :         return entry;
     253             :     }
     254             : 
     255          41 :     if (!hash_add_unique(cache->hash.table, entry->key, entry)) {
     256             :         struct timespec now;
     257             : 
     258          41 :         if (UNLIKELY(clock_gettime(monotonic_clock_id, &now) < 0))
     259           0 :             lwan_status_critical("clock_gettime");
     260             : 
     261          41 :         entry->time_to_expire = now.tv_sec + cache->settings.time_to_live;
     262             : 
     263          41 :         if (LIKELY(!pthread_rwlock_wrlock(&cache->queue.lock))) {
     264          41 :             list_add_tail(&cache->queue.list, &entry->entries);
     265          41 :             pthread_rwlock_unlock(&cache->queue.lock);
     266             :         } else {
     267             :             /* Key is freed when this entry is removed from the hash
     268             :              * table below. */
     269           0 :             entry->flags = TEMPORARY;
     270             : 
     271             :             /* Ensure item is removed from the hash table; otherwise,
     272             :              * another thread could potentially get another reference
     273             :              * to this entry and cause an invalid memory access. */
     274           0 :             hash_del(cache->hash.table, entry->key);
     275             :         }
     276             :     } else {
     277             :         /* Either there's another item with the same key (-EEXIST), or
     278             :          * there was an error inside the hash table. In either case,
     279             :          * just return a TEMPORARY entry so that it is destroyed the first
     280             :          * time someone unrefs this entry. TEMPORARY entries are pretty much
     281             :          * like FLOATING entries, but unreffing them do not use atomic
     282             :          * operations. */
     283           0 :         entry->flags = TEMPORARY | FREE_KEY_ON_DESTROY;
     284             :     }
     285             : 
     286          41 :     pthread_rwlock_unlock(&cache->hash.lock);
     287          41 :     return entry;
     288             : }
     289             : 
     290          57 : void cache_entry_unref(struct cache *cache, struct cache_entry *entry)
     291             : {
     292          57 :     assert(entry);
     293             : 
     294             :     /* FIXME: There's a race condition in this function: if the cache is
     295             :      * destroyed while there are either temporary or floating entries,
     296             :      * calling the destroy_entry callback function will dereference
     297             :      * deallocated memory. */
     298             : 
     299          57 :     if (entry->flags & TEMPORARY) {
     300             :         /* FREE_KEY_ON_DESTROY is set on elements that never got into the
     301             :          * hash table, so their keys are never destroyed automatically. */
     302           0 :         if (entry->flags & FREE_KEY_ON_DESTROY)
     303           0 :             cache->key.free(entry->key);
     304             : 
     305           0 :         return cache->cb.destroy_entry(entry, cache->cb.context);
     306             :     }
     307             : 
     308          57 :     if (ATOMIC_DEC(entry->refs))
     309           0 :         return;
     310             : 
     311             :     /* FLOATING entries without references won't be picked up by the pruner
     312             :      * job, so destroy them right here. */
     313          57 :     if (entry->flags & FLOATING) {
     314           0 :         assert(!(entry->flags & FREE_KEY_ON_DESTROY));
     315           0 :         return cache->cb.destroy_entry(entry, cache->cb.context);
     316             :     }
     317             : }
     318             : 
     319         194 : static bool cache_pruner_job(void *data)
     320             : {
     321         194 :     struct cache *cache = data;
     322             :     struct cache_entry *node, *next;
     323             :     struct timespec now;
     324         194 :     bool shutting_down = cache->flags & SHUTTING_DOWN;
     325             :     struct list_head queue;
     326         194 :     unsigned int evicted = 0;
     327             : 
     328         194 :     if (UNLIKELY(pthread_rwlock_trywrlock(&cache->queue.lock) == EBUSY))
     329           0 :         return false;
     330             : 
     331             :     /* If the queue is empty, there's nothing to do; unlock/return*/
     332         194 :     if (list_empty(&cache->queue.list)) {
     333         189 :         if (UNLIKELY(pthread_rwlock_unlock(&cache->queue.lock)))
     334           0 :             lwan_status_perror("pthread_rwlock_unlock");
     335         189 :         return false;
     336             :     }
     337             : 
     338             :     /* There are things to do; work on a local queue so the lock doesn't
     339             :      * need to be held while items are being pruned. */
     340           5 :     list_head_init(&queue);
     341           5 :     list_append_list(&queue, &cache->queue.list);
     342           5 :     list_head_init(&cache->queue.list);
     343             : 
     344           5 :     if (UNLIKELY(pthread_rwlock_unlock(&cache->queue.lock))) {
     345           0 :         lwan_status_perror("pthread_rwlock_unlock");
     346           0 :         goto end;
     347             :     }
     348             : 
     349           5 :     if (UNLIKELY(clock_gettime(monotonic_clock_id, &now) < 0)) {
     350           0 :         lwan_status_perror("clock_gettime");
     351           0 :         goto end;
     352             :     }
     353             : 
     354           7 :     list_for_each_safe(&queue, node, next, entries) {
     355           5 :         char *key = node->key;
     356             : 
     357           5 :         if (now.tv_sec < node->time_to_expire && LIKELY(!shutting_down))
     358           3 :             break;
     359             : 
     360           2 :         list_del(&node->entries);
     361             : 
     362           2 :         if (UNLIKELY(pthread_rwlock_wrlock(&cache->hash.lock))) {
     363           0 :             lwan_status_perror("pthread_rwlock_wrlock");
     364           0 :             continue;
     365             :         }
     366             : 
     367           2 :         hash_del(cache->hash.table, key);
     368             : 
     369           2 :         if (UNLIKELY(pthread_rwlock_unlock(&cache->hash.lock)))
     370           0 :             lwan_status_perror("pthread_rwlock_unlock");
     371             : 
     372           2 :         if (ATOMIC_INC(node->refs) == 1) {
     373             :             /* If the refcount was 0, and turned 1 after the increment, it means the item can
     374             :              * be destroyed here. */
     375           2 :             cache->cb.destroy_entry(node, cache->cb.context);
     376             :         } else {
     377             :             /* If not, some other thread had references to this object. */
     378           0 :             ATOMIC_OP(&node->flags, or, FLOATING);
     379             :             /* If in the time between the ref check above and setting the floating flag the
     380             :              * thread holding the reference drops it, if our reference is 0 after dropping it,
     381             :              * the pruner thread was the last thread holding the reference to this entry, so
     382             :              * it's safe to destroy it at this point. */
     383           0 :             if (!ATOMIC_DEC(node->refs))
     384           0 :                 cache->cb.destroy_entry(node, cache->cb.context);
     385             :         }
     386             : 
     387           2 :         evicted++;
     388             :     }
     389             : 
     390             :     /* If local queue has been entirely processed, there's no need to
     391             :      * append items in the cache queue to it; just update statistics and
     392             :      * return */
     393           5 :     if (list_empty(&queue))
     394           2 :         goto end;
     395             : 
     396             :     /* Prepend local, unprocessed queue, to the cache queue. Since the cache
     397             :      * item TTL is constant, items created later will be destroyed later. */
     398           3 :     if (LIKELY(!pthread_rwlock_wrlock(&cache->queue.lock))) {
     399           3 :         list_prepend_list(&cache->queue.list, &queue);
     400           3 :         pthread_rwlock_unlock(&cache->queue.lock);
     401             :     } else {
     402           0 :         lwan_status_perror("pthread_rwlock_wrlock");
     403             :     }
     404             : 
     405           5 : end:
     406             : #ifndef NDEBUG
     407           5 :     ATOMIC_AAF(&cache->stats.evicted, evicted);
     408             : #endif
     409           5 :     return evicted;
     410             : }
     411             : 
     412          57 : static void cache_entry_unref_defer(void *data1, void *data2)
     413             : {
     414          57 :     cache_entry_unref((struct cache *)data1, (struct cache_entry *)data2);
     415          57 : }
     416             : 
     417          62 : struct cache_entry *cache_coro_get_and_ref_entry(struct cache *cache,
     418             :                                                  struct coro *coro,
     419             :                                                  const void *key)
     420             : {
     421          62 :     for (int tries = GET_AND_REF_TRIES; tries; tries--) {
     422             :         int error;
     423          62 :         struct cache_entry *ce = cache_get_and_ref_entry(cache, key, &error);
     424             : 
     425          62 :         if (LIKELY(ce)) {
     426             :             /*
     427             :              * This is deferred here so that, if the coroutine is killed
     428             :              * after it has been yielded, this cache entry is properly
     429             :              * freed.
     430             :              */
     431          57 :             coro_defer2(coro, cache_entry_unref_defer, cache, ce);
     432          57 :             return ce;
     433             :         }
     434             : 
     435           5 :         if (error != EWOULDBLOCK)
     436           5 :             break;
     437             : 
     438             :         /* If the cache would block while reading its hash table, yield and
     439             :          * try again.   (This yields "want-write" because otherwise this
     440             :          * worker thread might never be resumed again; it's not always that
     441             :          * a socket can be read from, but you can always write to it.)  */
     442           0 :         coro_yield(coro, CONN_CORO_WANT_WRITE);
     443             :     }
     444             : 
     445           5 :     return NULL;
     446             : }

Generated by: LCOV version 1.15-2-gb9d6727