Bug Summary

File:lwan-cache.c
Warning:line 258, column 5
This lock has already been unlocked

Annotated Source Code

1/*
2 * lwan - simple web server
3 * Copyright (c) 2013 Leandro A. F. Pereira <leandro@hardinfo.org>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */
19
20#include <assert.h>
21#include <errno(*__errno_location ()).h>
22#include <pthread.h>
23#include <stdbool.h>
24#include <stdlib.h>
25#include <string.h>
26#include <time.h>
27
28#include "lwan-private.h"
29
30#include "lwan-cache.h"
31#include "hash.h"
32
33#define GET_AND_REF_TRIES5 5
34
35enum {
36 /* Entry flags */
37 FLOATING = 1 << 0,
38 TEMPORARY = 1 << 1,
39
40 /* Cache flags */
41 SHUTTING_DOWN = 1 << 0
42};
43
44struct cache {
45 struct {
46 struct hash *table;
47 pthread_rwlock_t lock;
48 } hash;
49
50 struct {
51 struct list_head list;
52 pthread_rwlock_t lock;
53 } queue;
54
55 struct {
56 cache_create_entry_cb create_entry;
57 cache_destroy_entry_cb destroy_entry;
58 void *context;
59 } cb;
60
61 struct {
62 time_t time_to_live;
63 clockid_t clock_id;
64 } settings;
65
66 unsigned flags;
67
68#ifndef NDEBUG
69 struct {
70 unsigned hits;
71 unsigned misses;
72 unsigned evicted;
73 } stats;
74#endif
75};
76
77static bool_Bool cache_pruner_job(void *data);
78
79static clockid_t detect_fastest_monotonic_clock(void)
80{
81#ifdef CLOCK_MONOTONIC_COARSE6
82 struct timespec ts;
83
84 if (!clock_gettime(CLOCK_MONOTONIC_COARSE6, &ts))
85 return CLOCK_MONOTONIC_COARSE6;
86#endif
87 return CLOCK_MONOTONIC1;
88}
89
90static ALWAYS_INLINEinline __attribute__((always_inline)) void clock_monotonic_gettime(struct cache *cache,
91 struct timespec *ts)
92{
93 if (UNLIKELY(clock_gettime(cache->settings.clock_id, ts) < 0)__builtin_expect(((clock_gettime(cache->settings.clock_id,
ts) < 0)), (0))
)
94 lwan_status_perror("clock_gettime")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 94, __FUNCTION__, "clock_gettime")
;
95}
96
97struct cache *cache_create(cache_create_entry_cb create_entry_cb,
98 cache_destroy_entry_cb destroy_entry_cb,
99 void *cb_context,
100 time_t time_to_live)
101{
102 struct cache *cache;
103
104 assert(create_entry_cb)({ if (create_entry_cb) ; else __assert_fail ("create_entry_cb"
, "/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 104, __PRETTY_FUNCTION__); })
;
105 assert(destroy_entry_cb)({ if (destroy_entry_cb) ; else __assert_fail ("destroy_entry_cb"
, "/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 105, __PRETTY_FUNCTION__); })
;
106 assert(time_to_live > 0)({ if (time_to_live > 0) ; else __assert_fail ("time_to_live > 0"
, "/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 106, __PRETTY_FUNCTION__); })
;
107
108 cache = calloc(1, sizeof(*cache));
109 if (!cache)
110 return NULL((void*)0);
111
112 cache->hash.table = hash_str_new(free, NULL((void*)0));
113 if (!cache->hash.table)
114 goto error_no_hash;
115
116 if (pthread_rwlock_init(&cache->hash.lock, NULL((void*)0)))
117 goto error_no_hash_lock;
118 if (pthread_rwlock_init(&cache->queue.lock, NULL((void*)0)))
119 goto error_no_queue_lock;
120
121 cache->cb.create_entry = create_entry_cb;
122 cache->cb.destroy_entry = destroy_entry_cb;
123 cache->cb.context = cb_context;
124
125 cache->settings.clock_id = detect_fastest_monotonic_clock();
126 cache->settings.time_to_live = time_to_live;
127
128 list_head_init(&cache->queue.list);
129
130 lwan_job_add(cache_pruner_job, cache);
131
132 return cache;
133
134error_no_queue_lock:
135 pthread_rwlock_destroy(&cache->hash.lock);
136error_no_hash_lock:
137 hash_free(cache->hash.table);
138error_no_hash:
139 free(cache);
140
141 return NULL((void*)0);
142}
143
144void cache_destroy(struct cache *cache)
145{
146 assert(cache)({ if (cache) ; else __assert_fail ("cache", "/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 146, __PRETTY_FUNCTION__); })
;
147
148#ifndef NDEBUG
149 lwan_status_debug("Cache stats: %d hits, %d misses, %d evictions",lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 151, __FUNCTION__, "Cache stats: %d hits, %d misses, %d evictions"
, cache->stats.hits, cache->stats.misses, cache->stats
.evicted)
150 cache->stats.hits, cache->stats.misses,lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 151, __FUNCTION__, "Cache stats: %d hits, %d misses, %d evictions"
, cache->stats.hits, cache->stats.misses, cache->stats
.evicted)
151 cache->stats.evicted)lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 151, __FUNCTION__, "Cache stats: %d hits, %d misses, %d evictions"
, cache->stats.hits, cache->stats.misses, cache->stats
.evicted)
;
152#endif
153
154 lwan_job_del(cache_pruner_job, cache);
155 cache->flags |= SHUTTING_DOWN;
156 cache_pruner_job(cache);
157 pthread_rwlock_destroy(&cache->hash.lock);
158 pthread_rwlock_destroy(&cache->queue.lock);
159 hash_free(cache->hash.table);
160 free(cache);
161}
162
163static ALWAYS_INLINEinline __attribute__((always_inline)) void convert_to_temporary(struct cache_entry *entry)
164{
165 entry->flags = TEMPORARY;
166}
167
168struct cache_entry *cache_get_and_ref_entry(struct cache *cache,
169 const char *key, int *error)
170{
171 struct cache_entry *entry;
172 char *key_copy;
173
174 assert(cache)({ if (cache) ; else __assert_fail ("cache", "/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 174, __PRETTY_FUNCTION__); })
;
175 assert(error)({ if (error) ; else __assert_fail ("error", "/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 175, __PRETTY_FUNCTION__); })
;
176 assert(key)({ if (key) ; else __assert_fail ("key", "/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 176, __PRETTY_FUNCTION__); })
;
177
178 *error = 0;
179
180 /* If the lock can't be obtained, return an error to allow, for instance,
181 * yielding from the coroutine and trying to obtain the lock at a later
182 * time. */
183 if (UNLIKELY(pthread_rwlock_tryrdlock(&cache->hash.lock) == EBUSY)__builtin_expect(((pthread_rwlock_tryrdlock(&cache->hash
.lock) == 16)), (0))
) {
3
Taking false branch
184 *error = EWOULDBLOCK11;
185 return NULL((void*)0);
186 }
187 /* Find the item in the hash table. If it's there, increment the reference
188 * and return it. */
189 entry = hash_find(cache->hash.table, key);
190 if (LIKELY(entry)__builtin_expect((!!(entry)), (1))) {
4
Taking false branch
191 ATOMIC_INC(entry->refs)(__sync_add_and_fetch((&(entry->refs)), (1)));
192 pthread_rwlock_unlock(&cache->hash.lock);
193#ifndef NDEBUG
194 ATOMIC_INC(cache->stats.hits)(__sync_add_and_fetch((&(cache->stats.hits)), (1)));
195#endif
196 return entry;
197 }
198
199 /* Unlock the cache so the item can be created. */
200 pthread_rwlock_unlock(&cache->hash.lock);
201
202#ifndef NDEBUG
203 ATOMIC_INC(cache->stats.misses)(__sync_add_and_fetch((&(cache->stats.misses)), (1)));
204#endif
205
206 key_copy = strdup(key);
207 if (UNLIKELY(!key_copy)__builtin_expect(((!key_copy)), (0))) {
5
Taking false branch
208 *error = ENOMEM12;
209 return NULL((void*)0);
210 }
211
212 entry = cache->cb.create_entry(key, cache->cb.context);
213 if (!entry) {
6
Assuming 'entry' is non-null
7
Taking false branch
214 free(key_copy);
215 return NULL((void*)0);
216 }
217
218 memset(entry, 0, sizeof(*entry));
219 entry->key = key_copy;
220 entry->refs = 1;
221
222 if (pthread_rwlock_trywrlock(&cache->hash.lock) == EBUSY16) {
8
Assuming the condition is false
9
Taking false branch
223 /* Couldn't obtain hash lock: instead of waiting, just return
224 * the recently-created item as a temporary item. Might result
225 * in starvation, though, so this might be changed back to
226 * pthread_rwlock_wrlock() again someday if this proves to be
227 * a problem. */
228 convert_to_temporary(entry);
229 return entry;
230 }
231
232 if (!hash_add_unique(cache->hash.table, entry->key, entry)) {
10
Assuming the condition is false
11
Taking false branch
233 struct timespec time_to_die;
234 clock_monotonic_gettime(cache, &time_to_die);
235 entry->time_to_die = time_to_die.tv_sec + cache->settings.time_to_live;
236
237 if (LIKELY(!pthread_rwlock_wrlock(&cache->queue.lock))__builtin_expect((!!(!pthread_rwlock_wrlock(&cache->queue
.lock))), (1))
) {
238 list_add_tail(&cache->queue.list, &entry->entries);
239 pthread_rwlock_unlock(&cache->queue.lock);
240 } else {
241 convert_to_temporary(entry);
242
243 /* Ensure item is removed from the hash table; otherwise,
244 * another thread could potentially get another reference
245 * to this entry and cause an invalid memory access. */
246 hash_del(cache->hash.table, entry->key);
247 }
248 } else {
249 /* Either there's another item with the same key (-EEXIST), or
250 * there was an error inside the hash table. In either case,
251 * just return a TEMPORARY entry so that it is destroyed the first
252 * time someone unrefs this entry. TEMPORARY entries are pretty much
253 * like FLOATING entries, but unreffing them do not use atomic
254 * operations. */
255 convert_to_temporary(entry);
256 }
257
258 pthread_rwlock_unlock(&cache->hash.lock);
12
This lock has already been unlocked
259 return entry;
260}
261
262void cache_entry_unref(struct cache *cache, struct cache_entry *entry)
263{
264 assert(entry)({ if (entry) ; else __assert_fail ("entry", "/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 264, __PRETTY_FUNCTION__); })
;
265
266 if (entry->flags & TEMPORARY) {
267 free(entry->key);
268 goto destroy_entry;
269 }
270
271 if (ATOMIC_DEC(entry->refs)(__sync_add_and_fetch((&(entry->refs)), (-1))))
272 return;
273
274 /* FLOATING entries without references won't be picked up by the pruner
275 * job, so destroy them right here. */
276 if (entry->flags & FLOATING) {
277destroy_entry:
278 /* FIXME: There's a race condition here: if the cache is destroyed
279 * while there are cache items floating around, this will dereference
280 * deallocated memory. */
281 cache->cb.destroy_entry(entry, cache->cb.context);
282 }
283}
284
285static bool_Bool cache_pruner_job(void *data)
286{
287 struct cache *cache = data;
288 struct cache_entry *node, *next;
289 struct timespec now;
290 bool_Bool shutting_down = cache->flags & SHUTTING_DOWN;
291 unsigned evicted = 0;
292 struct list_head queue;
293
294 if (UNLIKELY(pthread_rwlock_trywrlock(&cache->queue.lock) == EBUSY)__builtin_expect(((pthread_rwlock_trywrlock(&cache->queue
.lock) == 16)), (0))
)
295 return false0;
296
297 /* If the queue is empty, there's nothing to do; unlock/return*/
298 if (list_empty(&cache->queue.list)) {
299 if (UNLIKELY(pthread_rwlock_unlock(&cache->queue.lock))__builtin_expect(((pthread_rwlock_unlock(&cache->queue
.lock))), (0))
)
300 lwan_status_perror("pthread_rwlock_unlock")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 300, __FUNCTION__, "pthread_rwlock_unlock")
;
301 return false0;
302 }
303
304 /* There are things to do; assign cache queue to a local queue,
305 * initialize cache queue to an empty queue. Then unlock */
306 list_head_init(&queue);
307 list_append_list(&queue, &cache->queue.list);
308 list_head_init(&cache->queue.list);
309
310 if (UNLIKELY(pthread_rwlock_unlock(&cache->queue.lock))__builtin_expect(((pthread_rwlock_unlock(&cache->queue
.lock))), (0))
) {
311 lwan_status_perror("pthread_rwlock_unlock")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 311, __FUNCTION__, "pthread_rwlock_unlock")
;
312 goto end;
313 }
314
315 clock_monotonic_gettime(cache, &now);
316 list_for_each_safe(&queue, node, next, entries)for (node = list_node_to_off_((&queue)->n.next, ((__builtin_offsetof
(typeof(*node), entries) + ((typeof(node->entries) *)0 != (
struct list_node *)0)))), next = list_node_to_off_(list_node_from_off_
(node, ((__builtin_offsetof(typeof(*node), entries) + ((typeof
(node->entries) *)0 != (struct list_node *)0))))->next,
((__builtin_offsetof(typeof(*node), entries) + ((typeof(node
->entries) *)0 != (struct list_node *)0)))); list_node_from_off_
(node, ((__builtin_offsetof(typeof(*node), entries) + ((typeof
(node->entries) *)0 != (struct list_node *)0)))) != &(
&queue)->n; node = next, next = list_node_to_off_(list_node_from_off_
(node, ((__builtin_offsetof(typeof(*node), entries) + ((typeof
(node->entries) *)0 != (struct list_node *)0))))->next,
((__builtin_offsetof(typeof(*node), entries) + ((typeof(node
->entries) *)0 != (struct list_node *)0)))))
{
317 char *key = node->key;
318
319 if (now.tv_sec < node->time_to_die && LIKELY(!shutting_down)__builtin_expect((!!(!shutting_down)), (1)))
320 break;
321
322 list_del(&node->entries);
323
324 if (UNLIKELY(pthread_rwlock_wrlock(&cache->hash.lock))__builtin_expect(((pthread_rwlock_wrlock(&cache->hash.
lock))), (0))
) {
325 lwan_status_perror("pthread_rwlock_wrlock")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 325, __FUNCTION__, "pthread_rwlock_wrlock")
;
326 continue;
327 }
328
329 hash_del(cache->hash.table, key);
330
331 if (UNLIKELY(pthread_rwlock_unlock(&cache->hash.lock))__builtin_expect(((pthread_rwlock_unlock(&cache->hash.
lock))), (0))
)
332 lwan_status_perror("pthread_rwlock_unlock")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 332, __FUNCTION__, "pthread_rwlock_unlock")
;
333
334 if (ATOMIC_INC(node->refs)(__sync_add_and_fetch((&(node->refs)), (1))) == 1) {
335 cache->cb.destroy_entry(node, cache->cb.context);
336 } else {
337 ATOMIC_BITWISE(&node->flags, or, FLOATING)(__sync_or_and_fetch((&node->flags), (FLOATING)));
338 /* Decrement the reference and see if we were genuinely the last one
339 * holding it. If so, destroy the entry. */
340 if (!ATOMIC_DEC(node->refs)(__sync_add_and_fetch((&(node->refs)), (-1))))
341 cache->cb.destroy_entry(node, cache->cb.context);
342 }
343
344 evicted++;
345 }
346
347 /* If local queue has been entirely processed, there's no need to
348 * append items in the cache queue to it; just update statistics and
349 * return */
350 if (list_empty(&queue))
351 goto end;
352
353 /* Prepend local, unprocessed queue, to the cache queue. Since the cache
354 * item TTL is constant, items created later will be destroyed later. */
355 if (LIKELY(!pthread_rwlock_wrlock(&cache->queue.lock))__builtin_expect((!!(!pthread_rwlock_wrlock(&cache->queue
.lock))), (1))
) {
356 list_prepend_list(&cache->queue.list, &queue);
357 pthread_rwlock_unlock(&cache->queue.lock);
358 } else {
359 lwan_status_perror("pthread_rwlock_wrlock")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/common/lwan-cache.c"
, 359, __FUNCTION__, "pthread_rwlock_wrlock")
;
360 }
361
362end:
363#ifndef NDEBUG
364 ATOMIC_AAF(&cache->stats.evicted, evicted)(__sync_add_and_fetch((&cache->stats.evicted), (evicted
)))
;
365#endif
366 return evicted;
367}
368
369struct cache_entry*
370cache_coro_get_and_ref_entry(struct cache *cache, struct coro *coro,
371 const char *key)
372{
373 for (int tries = GET_AND_REF_TRIES5; tries; tries--) {
1
Loop condition is true. Entering loop body
374 int error;
375 struct cache_entry *ce = cache_get_and_ref_entry(cache, key, &error);
2
Calling 'cache_get_and_ref_entry'
376
377 if (LIKELY(ce)__builtin_expect((!!(ce)), (1))) {
378 /*
379 * This is deferred here so that, if the coroutine is killed
380 * after it has been yielded, this cache entry is properly
381 * freed.
382 */
383 coro_defer2(coro, CORO_DEFER2(cache_entry_unref)((void (*)(void *, void *))(cache_entry_unref)), cache, ce);
384 return ce;
385 }
386
387 /*
388 * If the cache would block while reading its hash table, yield and
389 * try again. On any other error, just return NULL.
390 */
391 if (error == EWOULDBLOCK11) {
392 coro_yield(coro, CONN_CORO_MAY_RESUME);
393 } else {
394 break;
395 }
396 }
397
398 return NULL((void*)0);
399}