File: | lwan-thread.c |
Warning: | line 668, column 9 Assigned value is garbage or undefined |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * lwan - simple web server | |||
3 | * Copyright (c) 2012, 2013 Leandro A. F. Pereira <leandro@hardinfo.org> | |||
4 | * | |||
5 | * This program is free software; you can redistribute it and/or | |||
6 | * modify it under the terms of the GNU General Public License | |||
7 | * as published by the Free Software Foundation; either version 2 | |||
8 | * of the License, or any later version. | |||
9 | * | |||
10 | * This program is distributed in the hope that it will be useful, | |||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
13 | * GNU General Public License for more details. | |||
14 | * | |||
15 | * You should have received a copy of the GNU General Public License | |||
16 | * along with this program; if not, write to the Free Software | |||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | |||
18 | * USA. | |||
19 | */ | |||
20 | ||||
21 | #define _GNU_SOURCE | |||
22 | #include <assert.h> | |||
23 | #include <errno(*__errno_location ()).h> | |||
24 | #include <fcntl.h> | |||
25 | #include <pthread.h> | |||
26 | #include <sched.h> | |||
27 | #include <stdlib.h> | |||
28 | #include <string.h> | |||
29 | #include <sys/epoll.h> | |||
30 | #include <sys/ioctl.h> | |||
31 | #include <sys/socket.h> | |||
32 | #include <unistd.h> | |||
33 | ||||
34 | #if defined(HAVE_EVENTFD) | |||
35 | #include <sys/eventfd.h> | |||
36 | #endif | |||
37 | ||||
38 | #include "lwan-private.h" | |||
39 | #include "lwan-tq.h" | |||
40 | #include "list.h" | |||
41 | ||||
42 | static void lwan_strbuf_free_defer(void *data) | |||
43 | { | |||
44 | lwan_strbuf_free((struct lwan_strbuf *)data); | |||
45 | } | |||
46 | ||||
47 | static void graceful_close(struct lwan *l, | |||
48 | struct lwan_connection *conn, | |||
49 | char buffer[static DEFAULT_BUFFER_SIZE4096]) | |||
50 | { | |||
51 | int fd = lwan_connection_get_fd(l, conn); | |||
52 | ||||
53 | while (TIOCOUTQ0x5411) { | |||
54 | /* This ioctl isn't probably doing what it says on the tin; the details | |||
55 | * are subtle, but it seems to do the trick to allow gracefully closing | |||
56 | * the connection in some cases with minimal system calls. */ | |||
57 | int bytes_waiting; | |||
58 | int r = ioctl(fd, TIOCOUTQ0x5411, &bytes_waiting); | |||
59 | ||||
60 | if (!r && !bytes_waiting) /* See note about close(2) below. */ | |||
61 | return; | |||
62 | if (r < 0 && errno(*__errno_location ()) == EINTR4) | |||
63 | continue; | |||
64 | ||||
65 | break; | |||
66 | } | |||
67 | ||||
68 | if (UNLIKELY(shutdown(fd, SHUT_WR) < 0)__builtin_expect(((shutdown(fd, SHUT_WR) < 0)), (0))) { | |||
69 | if (UNLIKELY(errno == ENOTCONN)__builtin_expect((((*__errno_location ()) == 107)), (0))) | |||
70 | return; | |||
71 | } | |||
72 | ||||
73 | for (int tries = 0; tries < 20; tries++) { | |||
74 | ssize_t r = read(fd, buffer, DEFAULT_BUFFER_SIZE4096); | |||
75 | ||||
76 | if (!r) | |||
77 | break; | |||
78 | ||||
79 | if (r < 0) { | |||
80 | switch (errno(*__errno_location ())) { | |||
81 | case EINTR4: | |||
82 | continue; | |||
83 | case EAGAIN11: | |||
84 | coro_yield(conn->coro, CONN_CORO_WANT_READ); | |||
85 | continue; | |||
86 | default: | |||
87 | return; | |||
88 | } | |||
89 | } | |||
90 | ||||
91 | coro_yield(conn->coro, CONN_CORO_YIELD); | |||
92 | } | |||
93 | ||||
94 | /* close(2) will be called when the coroutine yields with CONN_CORO_ABORT */ | |||
95 | } | |||
96 | ||||
97 | __attribute__((noreturn)) static int process_request_coro(struct coro *coro, | |||
98 | void *data) | |||
99 | { | |||
100 | /* NOTE: This function should not return; coro_yield should be used | |||
101 | * instead. This ensures the storage for `strbuf` is alive when the | |||
102 | * coroutine ends and lwan_strbuf_free() is called. */ | |||
103 | struct lwan_connection *conn = data; | |||
104 | struct lwan *lwan = conn->thread->lwan; | |||
105 | int fd = lwan_connection_get_fd(lwan, conn); | |||
106 | enum lwan_request_flags flags = lwan->config.request_flags; | |||
107 | struct lwan_strbuf strbuf = LWAN_STRBUF_STATIC_INIT(struct lwan_strbuf) { .buffer = "" }; | |||
108 | char request_buffer[DEFAULT_BUFFER_SIZE4096]; | |||
109 | struct lwan_value buffer = {.value = request_buffer, .len = 0}; | |||
110 | char *next_request = NULL((void*)0); | |||
111 | struct lwan_proxy proxy; | |||
112 | ||||
113 | coro_defer(coro, lwan_strbuf_free_defer, &strbuf); | |||
114 | ||||
115 | const size_t init_gen = 1; /* 1 call to coro_defer() */ | |||
116 | assert(init_gen == coro_deferred_get_generation(coro))((void) sizeof ((init_gen == coro_deferred_get_generation(coro )) ? 1 : 0), __extension__ ({ if (init_gen == coro_deferred_get_generation (coro)) ; else __assert_fail ("init_gen == coro_deferred_get_generation(coro)" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 116, __extension__ __PRETTY_FUNCTION__); })); | |||
117 | ||||
118 | while (true1) { | |||
119 | struct lwan_request request = {.conn = conn, | |||
120 | .fd = fd, | |||
121 | .response = {.buffer = &strbuf}, | |||
122 | .flags = flags, | |||
123 | .proxy = &proxy}; | |||
124 | ||||
125 | next_request = | |||
126 | lwan_process_request(lwan, &request, &buffer, next_request); | |||
127 | ||||
128 | if (coro_deferred_get_generation(coro) > ((2 * LWAN_ARRAY_INCREMENT16) / 3)) { | |||
129 | /* Batch execution of coro_defers() up to 2/3 LWAN_ARRAY_INCREMENT times, | |||
130 | * to avoid moving deferred array to heap in most cases. (This is to give | |||
131 | * some slack to the next request being processed by this coro.) */ | |||
132 | coro_deferred_run(coro, init_gen); | |||
133 | } | |||
134 | ||||
135 | if (LIKELY(conn->flags & CONN_IS_KEEP_ALIVE)__builtin_expect((!!(conn->flags & CONN_IS_KEEP_ALIVE) ), (1))) { | |||
136 | if (next_request && *next_request) { | |||
137 | conn->flags |= CONN_CORK; | |||
138 | coro_yield(coro, CONN_CORO_WANT_WRITE); | |||
139 | } else { | |||
140 | conn->flags &= ~CONN_CORK; | |||
141 | coro_yield(coro, CONN_CORO_WANT_READ); | |||
142 | } | |||
143 | } else { | |||
144 | graceful_close(lwan, conn, request_buffer); | |||
145 | break; | |||
146 | } | |||
147 | ||||
148 | lwan_strbuf_reset(&strbuf); | |||
149 | ||||
150 | /* Only allow flags from config. */ | |||
151 | flags = request.flags & (REQUEST_PROXIED | REQUEST_ALLOW_CORS); | |||
152 | } | |||
153 | ||||
154 | coro_yield(coro, CONN_CORO_ABORT); | |||
155 | __builtin_unreachable(); | |||
156 | } | |||
157 | ||||
158 | #undef REQUEST_FLAG | |||
159 | ||||
160 | static ALWAYS_INLINEinline __attribute__((always_inline)) uint32_t | |||
161 | conn_flags_to_epoll_events(enum lwan_connection_flags flags) | |||
162 | { | |||
163 | static const uint32_t map[CONN_EVENTS_MASK + 1] = { | |||
164 | [0 /* Suspended (timer or await) */] = EPOLLRDHUPEPOLLRDHUP, | |||
165 | [CONN_EVENTS_WRITE] = EPOLLOUTEPOLLOUT | EPOLLRDHUPEPOLLRDHUP, | |||
166 | [CONN_EVENTS_READ] = EPOLLINEPOLLIN | EPOLLRDHUPEPOLLRDHUP, | |||
167 | [CONN_EVENTS_READ_WRITE] = EPOLLINEPOLLIN | EPOLLOUTEPOLLOUT | EPOLLRDHUPEPOLLRDHUP, | |||
168 | }; | |||
169 | ||||
170 | return map[flags & CONN_EVENTS_MASK]; | |||
171 | } | |||
172 | ||||
173 | static void update_epoll_flags(int fd, | |||
174 | struct lwan_connection *conn, | |||
175 | int epoll_fd, | |||
176 | enum lwan_connection_coro_yield yield_result) | |||
177 | { | |||
178 | static const enum lwan_connection_flags or_mask[CONN_CORO_MAX] = { | |||
179 | [CONN_CORO_YIELD] = 0, | |||
180 | ||||
181 | [CONN_CORO_WANT_READ_WRITE] = CONN_EVENTS_READ_WRITE, | |||
182 | [CONN_CORO_WANT_READ] = CONN_EVENTS_READ, | |||
183 | [CONN_CORO_WANT_WRITE] = CONN_EVENTS_WRITE, | |||
184 | ||||
185 | /* While the coro is suspended, we're not interested in either EPOLLIN | |||
186 | * or EPOLLOUT events. We still want to track this fd in epoll, though, | |||
187 | * so unset both so that only EPOLLRDHUP (plus the implicitly-set ones) | |||
188 | * are set. */ | |||
189 | [CONN_CORO_SUSPEND_TIMER] = CONN_SUSPENDED_TIMER, | |||
190 | [CONN_CORO_SUSPEND_ASYNC_AWAIT] = CONN_SUSPENDED_ASYNC_AWAIT, | |||
191 | ||||
192 | /* Ideally, when suspending a coroutine, the current flags&CONN_EVENTS_MASK | |||
193 | * would have to be stored and restored -- however, resuming as if the | |||
194 | * client coroutine is interested in a write event always guarantees that | |||
195 | * they'll be resumed as they're TCP sockets. There's a good chance that | |||
196 | * trying to read from a socket after resuming a coroutine will succeed, | |||
197 | * but if it doesn't because read() returns -EAGAIN, the I/O wrappers will | |||
198 | * yield with CONN_CORO_WANT_READ anyway. */ | |||
199 | [CONN_CORO_RESUME] = CONN_EVENTS_WRITE, | |||
200 | }; | |||
201 | static const enum lwan_connection_flags and_mask[CONN_CORO_MAX] = { | |||
202 | [CONN_CORO_YIELD] = ~0, | |||
203 | ||||
204 | [CONN_CORO_WANT_READ_WRITE] = ~0, | |||
205 | [CONN_CORO_WANT_READ] = ~CONN_EVENTS_WRITE, | |||
206 | [CONN_CORO_WANT_WRITE] = ~CONN_EVENTS_READ, | |||
207 | ||||
208 | [CONN_CORO_SUSPEND_TIMER] = ~(CONN_EVENTS_READ_WRITE | CONN_SUSPENDED_ASYNC_AWAIT), | |||
209 | [CONN_CORO_SUSPEND_ASYNC_AWAIT] = ~(CONN_EVENTS_READ_WRITE | CONN_SUSPENDED_TIMER), | |||
210 | [CONN_CORO_RESUME] = ~CONN_SUSPENDED, | |||
211 | }; | |||
212 | enum lwan_connection_flags prev_flags = conn->flags; | |||
213 | ||||
214 | conn->flags |= or_mask[yield_result]; | |||
215 | conn->flags &= and_mask[yield_result]; | |||
216 | ||||
217 | if (conn->flags == prev_flags) | |||
218 | return; | |||
219 | ||||
220 | struct epoll_event event = { | |||
221 | .events = conn_flags_to_epoll_events(conn->flags), | |||
222 | .data.ptr = conn, | |||
223 | }; | |||
224 | ||||
225 | if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0)__builtin_expect(((epoll_ctl(epoll_fd, 3, fd, &event) < 0)), (0))) | |||
226 | lwan_status_perror("epoll_ctl")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 226, __FUNCTION__, "epoll_ctl"); | |||
227 | } | |||
228 | ||||
229 | static void clear_async_await_flag(void *data) | |||
230 | { | |||
231 | struct lwan_connection *async_fd_conn = data; | |||
232 | ||||
233 | async_fd_conn->flags &= ~CONN_ASYNC_AWAIT; | |||
234 | } | |||
235 | ||||
236 | static enum lwan_connection_coro_yield | |||
237 | resume_async(struct timeout_queue *tq, | |||
238 | enum lwan_connection_coro_yield yield_result, | |||
239 | int64_t from_coro, | |||
240 | struct lwan_connection *conn, | |||
241 | int epoll_fd) | |||
242 | { | |||
243 | static const enum lwan_connection_flags to_connection_flags[] = { | |||
244 | [CONN_CORO_ASYNC_AWAIT_READ] = CONN_EVENTS_READ, | |||
245 | [CONN_CORO_ASYNC_AWAIT_WRITE] = CONN_EVENTS_WRITE, | |||
246 | [CONN_CORO_ASYNC_AWAIT_READ_WRITE] = CONN_EVENTS_READ_WRITE, | |||
247 | }; | |||
248 | int await_fd = (int)((uint64_t)from_coro >> 32); | |||
249 | enum lwan_connection_flags flags; | |||
250 | int op; | |||
251 | ||||
252 | assert(await_fd >= 0)((void) sizeof ((await_fd >= 0) ? 1 : 0), __extension__ ({ if (await_fd >= 0) ; else __assert_fail ("await_fd >= 0" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 252, __extension__ __PRETTY_FUNCTION__); })); | |||
253 | assert(yield_result >= CONN_CORO_ASYNC_AWAIT_READ &&((void) sizeof ((yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE ) ? 1 : 0), __extension__ ({ if (yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE ) ; else __assert_fail ("yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 254, __extension__ __PRETTY_FUNCTION__); })) | |||
254 | yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE)((void) sizeof ((yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE ) ? 1 : 0), __extension__ ({ if (yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE ) ; else __assert_fail ("yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 254, __extension__ __PRETTY_FUNCTION__); })); | |||
255 | ||||
256 | flags = to_connection_flags[yield_result]; | |||
257 | ||||
258 | struct lwan_connection *await_fd_conn = &tq->lwan->conns[await_fd]; | |||
259 | if (LIKELY(await_fd_conn->flags & CONN_ASYNC_AWAIT)__builtin_expect((!!(await_fd_conn->flags & CONN_ASYNC_AWAIT )), (1))) { | |||
260 | if (LIKELY((await_fd_conn->flags & CONN_EVENTS_MASK) == flags)__builtin_expect((!!((await_fd_conn->flags & CONN_EVENTS_MASK ) == flags)), (1))) | |||
261 | return CONN_CORO_SUSPEND_ASYNC_AWAIT; | |||
262 | ||||
263 | op = EPOLL_CTL_MOD3; | |||
264 | } else { | |||
265 | op = EPOLL_CTL_ADD1; | |||
266 | flags |= CONN_ASYNC_AWAIT; | |||
267 | coro_defer(conn->coro, clear_async_await_flag, await_fd_conn); | |||
268 | } | |||
269 | ||||
270 | struct epoll_event event = {.events = conn_flags_to_epoll_events(flags), | |||
271 | .data.ptr = conn}; | |||
272 | if (LIKELY(!epoll_ctl(epoll_fd, op, await_fd, &event))__builtin_expect((!!(!epoll_ctl(epoll_fd, op, await_fd, & event))), (1))) { | |||
273 | await_fd_conn->flags &= ~CONN_EVENTS_MASK; | |||
274 | await_fd_conn->flags |= flags; | |||
275 | return CONN_CORO_SUSPEND_ASYNC_AWAIT; | |||
276 | } | |||
277 | ||||
278 | return CONN_CORO_ABORT; | |||
279 | } | |||
280 | ||||
281 | static ALWAYS_INLINEinline __attribute__((always_inline)) void resume_coro(struct timeout_queue *tq, | |||
282 | struct lwan_connection *conn, | |||
283 | int epoll_fd) | |||
284 | { | |||
285 | assert(conn->coro)((void) sizeof ((conn->coro) ? 1 : 0), __extension__ ({ if (conn->coro) ; else __assert_fail ("conn->coro", "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 285, __extension__ __PRETTY_FUNCTION__); })); | |||
286 | ||||
287 | int64_t from_coro = coro_resume(conn->coro); | |||
288 | enum lwan_connection_coro_yield yield_result = from_coro & 0xffffffff; | |||
289 | ||||
290 | if (UNLIKELY(yield_result >= CONN_CORO_ASYNC)__builtin_expect(((yield_result >= CONN_CORO_ASYNC)), (0))) | |||
291 | yield_result = resume_async(tq, yield_result, from_coro, conn, epoll_fd); | |||
292 | ||||
293 | if (UNLIKELY(yield_result == CONN_CORO_ABORT)__builtin_expect(((yield_result == CONN_CORO_ABORT)), (0))) | |||
294 | return timeout_queue_expire(tq, conn); | |||
295 | ||||
296 | return update_epoll_flags(lwan_connection_get_fd(tq->lwan, conn), conn, | |||
297 | epoll_fd, yield_result); | |||
298 | } | |||
299 | ||||
300 | static void update_date_cache(struct lwan_thread *thread) | |||
301 | { | |||
302 | time_t now = time(NULL((void*)0)); | |||
303 | ||||
304 | lwan_format_rfc_time(now, thread->date.date); | |||
305 | lwan_format_rfc_time(now + (time_t)thread->lwan->config.expires, | |||
306 | thread->date.expires); | |||
307 | } | |||
308 | ||||
309 | static ALWAYS_INLINEinline __attribute__((always_inline)) void spawn_coro(struct lwan_connection *conn, | |||
310 | struct coro_switcher *switcher, | |||
311 | struct timeout_queue *tq) | |||
312 | { | |||
313 | struct lwan_thread *t = conn->thread; | |||
314 | ||||
315 | assert(!conn->coro)((void) sizeof ((!conn->coro) ? 1 : 0), __extension__ ({ if (!conn->coro) ; else __assert_fail ("!conn->coro", "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 315, __extension__ __PRETTY_FUNCTION__); })); | |||
316 | assert(t)((void) sizeof ((t) ? 1 : 0), __extension__ ({ if (t) ; else __assert_fail ("t", "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 316, __extension__ __PRETTY_FUNCTION__); })); | |||
317 | assert((uintptr_t)t >= (uintptr_t)tq->lwan->thread.threads)((void) sizeof (((uintptr_t)t >= (uintptr_t)tq->lwan-> thread.threads) ? 1 : 0), __extension__ ({ if ((uintptr_t)t >= (uintptr_t)tq->lwan->thread.threads) ; else __assert_fail ("(uintptr_t)t >= (uintptr_t)tq->lwan->thread.threads" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 317, __extension__ __PRETTY_FUNCTION__); })); | |||
318 | assert((uintptr_t)t <((void) sizeof (((uintptr_t)t < (uintptr_t)(tq->lwan-> thread.threads + tq->lwan->thread.count)) ? 1 : 0), __extension__ ({ if ((uintptr_t)t < (uintptr_t)(tq->lwan->thread. threads + tq->lwan->thread.count)) ; else __assert_fail ("(uintptr_t)t < (uintptr_t)(tq->lwan->thread.threads + tq->lwan->thread.count)" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 319, __extension__ __PRETTY_FUNCTION__); })) | |||
319 | (uintptr_t)(tq->lwan->thread.threads + tq->lwan->thread.count))((void) sizeof (((uintptr_t)t < (uintptr_t)(tq->lwan-> thread.threads + tq->lwan->thread.count)) ? 1 : 0), __extension__ ({ if ((uintptr_t)t < (uintptr_t)(tq->lwan->thread. threads + tq->lwan->thread.count)) ; else __assert_fail ("(uintptr_t)t < (uintptr_t)(tq->lwan->thread.threads + tq->lwan->thread.count)" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 319, __extension__ __PRETTY_FUNCTION__); })); | |||
320 | ||||
321 | *conn = (struct lwan_connection) { | |||
322 | .coro = coro_new(switcher, process_request_coro, conn), | |||
323 | .flags = CONN_EVENTS_READ, | |||
324 | .time_to_expire = tq->current_time + tq->move_to_last_bump, | |||
325 | .thread = t, | |||
326 | }; | |||
327 | if (UNLIKELY(!conn->coro)__builtin_expect(((!conn->coro)), (0))) { | |||
328 | conn->flags = 0; | |||
329 | lwan_status_error("Could not create coroutine")lwan_status_error_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 329, __FUNCTION__, "Could not create coroutine"); | |||
330 | return; | |||
331 | } | |||
332 | ||||
333 | timeout_queue_insert(tq, conn); | |||
334 | } | |||
335 | ||||
336 | static void accept_nudge(int pipe_fd, | |||
337 | struct lwan_thread *t, | |||
338 | struct lwan_connection *conns, | |||
339 | struct timeout_queue *tq, | |||
340 | struct coro_switcher *switcher, | |||
341 | int epoll_fd) | |||
342 | { | |||
343 | uint64_t event; | |||
344 | int new_fd; | |||
345 | ||||
346 | /* Errors are ignored here as pipe_fd serves just as a way to wake the | |||
347 | * thread from epoll_wait(). It's fine to consume the queue at this | |||
348 | * point, regardless of the error type. */ | |||
349 | (void)read(pipe_fd, &event, sizeof(event)); | |||
350 | ||||
351 | while (spsc_queue_pop(&t->pending_fds, &new_fd)) { | |||
352 | struct lwan_connection *conn = &conns[new_fd]; | |||
353 | struct epoll_event ev = { | |||
354 | .data.ptr = conn, | |||
355 | .events = conn_flags_to_epoll_events(CONN_EVENTS_READ), | |||
356 | }; | |||
357 | ||||
358 | if (LIKELY(!epoll_ctl(epoll_fd, EPOLL_CTL_ADD, new_fd, &ev))__builtin_expect((!!(!epoll_ctl(epoll_fd, 1, new_fd, &ev) )), (1))) | |||
359 | spawn_coro(conn, switcher, tq); | |||
360 | } | |||
361 | ||||
362 | timeouts_add(t->wheel, &tq->timeout, 1000); | |||
363 | } | |||
364 | ||||
365 | static bool_Bool process_pending_timers(struct timeout_queue *tq, | |||
366 | struct lwan_thread *t, | |||
367 | int epoll_fd) | |||
368 | { | |||
369 | struct timeout *timeout; | |||
370 | bool_Bool should_expire_timers = false0; | |||
371 | ||||
372 | while ((timeout = timeouts_get(t->wheel))) { | |||
373 | struct lwan_request *request; | |||
374 | ||||
375 | if (timeout == &tq->timeout) { | |||
376 | should_expire_timers = true1; | |||
377 | continue; | |||
378 | } | |||
379 | ||||
380 | request = container_of(timeout, struct lwan_request, timeout)((struct lwan_request *) ((char *)(timeout) - __builtin_offsetof (struct lwan_request, timeout)) + ((typeof(*(timeout)) *)0 != (typeof(((struct lwan_request *)0)->timeout) *)0)); | |||
381 | ||||
382 | update_epoll_flags(request->fd, request->conn, epoll_fd, | |||
383 | CONN_CORO_RESUME); | |||
384 | } | |||
385 | ||||
386 | if (should_expire_timers) { | |||
387 | timeout_queue_expire_waiting(tq); | |||
388 | ||||
389 | /* tq timeout expires every 1000ms if there are connections, so | |||
390 | * update the date cache at this point as well. */ | |||
391 | update_date_cache(t); | |||
392 | ||||
393 | if (!timeout_queue_empty(tq)) { | |||
394 | timeouts_add(t->wheel, &tq->timeout, 1000); | |||
395 | return true1; | |||
396 | } | |||
397 | ||||
398 | timeouts_del(t->wheel, &tq->timeout); | |||
399 | } | |||
400 | ||||
401 | return false0; | |||
402 | } | |||
403 | ||||
404 | static int | |||
405 | turn_timer_wheel(struct timeout_queue *tq, struct lwan_thread *t, int epoll_fd) | |||
406 | { | |||
407 | timeout_t wheel_timeout; | |||
408 | struct timespec now; | |||
409 | ||||
410 | if (UNLIKELY(clock_gettime(monotonic_clock_id, &now) < 0)__builtin_expect(((clock_gettime(monotonic_clock_id, &now ) < 0)), (0))) | |||
411 | lwan_status_critical("Could not get monotonic time")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 411, __FUNCTION__, "Could not get monotonic time"); | |||
412 | ||||
413 | timeouts_update(t->wheel, | |||
414 | (timeout_t)(now.tv_sec * 1000 + now.tv_nsec / 1000000)); | |||
415 | ||||
416 | wheel_timeout = timeouts_timeout(t->wheel); | |||
417 | if (UNLIKELY((int64_t)wheel_timeout < 0)__builtin_expect((((int64_t)wheel_timeout < 0)), (0))) | |||
418 | goto infinite_timeout; | |||
419 | ||||
420 | if (wheel_timeout == 0) { | |||
421 | if (!process_pending_timers(tq, t, epoll_fd)) | |||
422 | goto infinite_timeout; | |||
423 | ||||
424 | wheel_timeout = timeouts_timeout(t->wheel); | |||
425 | if (wheel_timeout == 0) | |||
426 | goto infinite_timeout; | |||
427 | } | |||
428 | ||||
429 | return (int)wheel_timeout; | |||
430 | ||||
431 | infinite_timeout: | |||
432 | return -1; | |||
433 | } | |||
434 | ||||
435 | static void *thread_io_loop(void *data) | |||
436 | { | |||
437 | struct lwan_thread *t = data; | |||
438 | int epoll_fd = t->epoll_fd; | |||
439 | const int read_pipe_fd = t->pipe_fd[0]; | |||
440 | const int max_events = LWAN_MIN((int)t->lwan->thread.max_fd, 1024)({ const __typeof__(((int)t->lwan->thread.max_fd) + 0) lwan_tmp_id8 = ((int)t->lwan->thread.max_fd); const __typeof__((1024 ) + 0) lwan_tmp_id9 = (1024); lwan_tmp_id8 > lwan_tmp_id9 ? lwan_tmp_id9 : lwan_tmp_id8; }); | |||
441 | struct lwan *lwan = t->lwan; | |||
442 | struct epoll_event *events; | |||
443 | struct coro_switcher switcher; | |||
444 | struct timeout_queue tq; | |||
445 | ||||
446 | lwan_status_debug("Worker thread #%zd starting",lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 447, __FUNCTION__, "Worker thread #%zd starting", t - t-> lwan->thread.threads + 1) | |||
447 | t - t->lwan->thread.threads + 1)lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 447, __FUNCTION__, "Worker thread #%zd starting", t - t-> lwan->thread.threads + 1); | |||
448 | lwan_set_thread_name("worker"); | |||
449 | ||||
450 | events = calloc((size_t)max_events, sizeof(*events)); | |||
451 | if (UNLIKELY(!events)__builtin_expect(((!events)), (0))) | |||
452 | lwan_status_critical("Could not allocate memory for events")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 452, __FUNCTION__, "Could not allocate memory for events"); | |||
453 | ||||
454 | update_date_cache(t); | |||
455 | ||||
456 | timeout_queue_init(&tq, lwan); | |||
457 | ||||
458 | pthread_barrier_wait(&lwan->thread.barrier); | |||
459 | ||||
460 | for (;;) { | |||
461 | int timeout = turn_timer_wheel(&tq, t, epoll_fd); | |||
462 | int n_fds = epoll_wait(epoll_fd, events, max_events, timeout); | |||
463 | ||||
464 | if (UNLIKELY(n_fds < 0)__builtin_expect(((n_fds < 0)), (0))) { | |||
465 | if (errno(*__errno_location ()) == EBADF9 || errno(*__errno_location ()) == EINVAL22) | |||
466 | break; | |||
467 | continue; | |||
468 | } | |||
469 | ||||
470 | for (struct epoll_event *event = events; n_fds--; event++) { | |||
471 | struct lwan_connection *conn; | |||
472 | ||||
473 | if (UNLIKELY(!event->data.ptr)__builtin_expect(((!event->data.ptr)), (0))) { | |||
474 | accept_nudge(read_pipe_fd, t, lwan->conns, &tq, &switcher, | |||
475 | epoll_fd); | |||
476 | continue; | |||
477 | } | |||
478 | ||||
479 | conn = event->data.ptr; | |||
480 | ||||
481 | if (UNLIKELY(event->events & (EPOLLRDHUP | EPOLLHUP))__builtin_expect(((event->events & (EPOLLRDHUP | EPOLLHUP ))), (0))) { | |||
482 | timeout_queue_expire(&tq, conn); | |||
483 | continue; | |||
484 | } | |||
485 | ||||
486 | resume_coro(&tq, conn, epoll_fd); | |||
487 | timeout_queue_move_to_last(&tq, conn); | |||
488 | } | |||
489 | } | |||
490 | ||||
491 | pthread_barrier_wait(&lwan->thread.barrier); | |||
492 | ||||
493 | timeout_queue_expire_all(&tq); | |||
494 | free(events); | |||
495 | ||||
496 | return NULL((void*)0); | |||
497 | } | |||
498 | ||||
499 | static void create_thread(struct lwan *l, struct lwan_thread *thread, | |||
500 | const size_t n_queue_fds) | |||
501 | { | |||
502 | int ignore; | |||
503 | pthread_attr_t attr; | |||
504 | ||||
505 | memset(thread, 0, sizeof(*thread)); | |||
506 | thread->lwan = l; | |||
507 | ||||
508 | thread->wheel = timeouts_open(&ignore); | |||
509 | if (!thread->wheel) | |||
510 | lwan_status_critical("Could not create timer wheel")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 510, __FUNCTION__, "Could not create timer wheel"); | |||
511 | ||||
512 | if ((thread->epoll_fd = epoll_create1(EPOLL_CLOEXECEPOLL_CLOEXEC)) < 0) | |||
513 | lwan_status_critical_perror("epoll_create")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 513, __FUNCTION__, "epoll_create"); | |||
514 | ||||
515 | if (pthread_attr_init(&attr)) | |||
516 | lwan_status_critical_perror("pthread_attr_init")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 516, __FUNCTION__, "pthread_attr_init"); | |||
517 | ||||
518 | if (pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEMPTHREAD_SCOPE_SYSTEM)) | |||
519 | lwan_status_critical_perror("pthread_attr_setscope")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 519, __FUNCTION__, "pthread_attr_setscope"); | |||
520 | ||||
521 | if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLEPTHREAD_CREATE_JOINABLE)) | |||
522 | lwan_status_critical_perror("pthread_attr_setdetachstate")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 522, __FUNCTION__, "pthread_attr_setdetachstate"); | |||
523 | ||||
524 | #if defined(HAVE_EVENTFD) | |||
525 | int efd = eventfd(0, EFD_NONBLOCKEFD_NONBLOCK | EFD_SEMAPHOREEFD_SEMAPHORE | EFD_CLOEXECEFD_CLOEXEC); | |||
526 | if (efd < 0) | |||
527 | lwan_status_critical_perror("eventfd")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 527, __FUNCTION__, "eventfd"); | |||
528 | ||||
529 | thread->pipe_fd[0] = thread->pipe_fd[1] = efd; | |||
530 | #else | |||
531 | if (pipe2(thread->pipe_fd, O_NONBLOCK04000 | O_CLOEXEC02000000) < 0) | |||
532 | lwan_status_critical_perror("pipe")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 532, __FUNCTION__, "pipe"); | |||
533 | #endif | |||
534 | ||||
535 | struct epoll_event event = { .events = EPOLLINEPOLLIN, .data.ptr = NULL((void*)0) }; | |||
536 | if (epoll_ctl(thread->epoll_fd, EPOLL_CTL_ADD1, thread->pipe_fd[0], &event) < 0) | |||
537 | lwan_status_critical_perror("epoll_ctl")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 537, __FUNCTION__, "epoll_ctl"); | |||
538 | ||||
539 | if (pthread_create(&thread->self, &attr, thread_io_loop, thread)) | |||
540 | lwan_status_critical_perror("pthread_create")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 540, __FUNCTION__, "pthread_create"); | |||
541 | ||||
542 | if (pthread_attr_destroy(&attr)) | |||
543 | lwan_status_critical_perror("pthread_attr_destroy")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 543, __FUNCTION__, "pthread_attr_destroy"); | |||
544 | ||||
545 | if (spsc_queue_init(&thread->pending_fds, n_queue_fds) < 0) { | |||
546 | lwan_status_critical("Could not initialize pending fd "lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 547, __FUNCTION__, "Could not initialize pending fd " "queue width %zu elements" , n_queue_fds) | |||
547 | "queue width %zu elements", n_queue_fds)lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 547, __FUNCTION__, "Could not initialize pending fd " "queue width %zu elements" , n_queue_fds); | |||
548 | } | |||
549 | } | |||
550 | ||||
551 | void lwan_thread_nudge(struct lwan_thread *t) | |||
552 | { | |||
553 | uint64_t event = 1; | |||
554 | ||||
555 | if (UNLIKELY(write(t->pipe_fd[1], &event, sizeof(event)) < 0)__builtin_expect(((write(t->pipe_fd[1], &event, sizeof (event)) < 0)), (0))) | |||
556 | lwan_status_perror("write")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 556, __FUNCTION__, "write"); | |||
557 | } | |||
558 | ||||
559 | void lwan_thread_add_client(struct lwan_thread *t, int fd) | |||
560 | { | |||
561 | for (int i = 0; i < 10; i++) { | |||
562 | bool_Bool pushed = spsc_queue_push(&t->pending_fds, fd); | |||
563 | ||||
564 | if (LIKELY(pushed)__builtin_expect((!!(pushed)), (1))) | |||
565 | return; | |||
566 | ||||
567 | /* Queue is full; nudge the thread to consume it. */ | |||
568 | lwan_thread_nudge(t); | |||
569 | } | |||
570 | ||||
571 | lwan_status_error("Dropping connection %d", fd)lwan_status_error_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 571, __FUNCTION__, "Dropping connection %d", fd); | |||
572 | /* FIXME: send "busy" response now, even without receiving request? */ | |||
573 | close(fd); | |||
574 | } | |||
575 | ||||
576 | #if defined(__linux__1) && defined(__x86_64__1) | |||
577 | static bool_Bool read_cpu_topology(struct lwan *l, uint32_t siblings[]) | |||
578 | { | |||
579 | char path[PATH_MAX4096]; | |||
580 | ||||
581 | for (unsigned int i = 0; i < l->n_cpus; i++) { | |||
582 | FILE *sib; | |||
583 | uint32_t id, sibling; | |||
584 | char separator; | |||
585 | ||||
586 | snprintf(path, sizeof(path), | |||
587 | "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", | |||
588 | i); | |||
589 | ||||
590 | sib = fopen(path, "re"); | |||
591 | if (!sib) { | |||
592 | lwan_status_warning("Could not open `%s` to determine CPU topology",lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 593, __FUNCTION__, "Could not open `%s` to determine CPU topology" , path) | |||
593 | path)lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 593, __FUNCTION__, "Could not open `%s` to determine CPU topology" , path); | |||
594 | return false0; | |||
595 | } | |||
596 | ||||
597 | switch (fscanf(sib, "%u%c%u", &id, &separator, &sibling)) { | |||
598 | case 2: /* No SMT */ | |||
599 | siblings[i] = id; | |||
600 | break; | |||
601 | case 3: /* SMT */ | |||
602 | if (!(separator == ',' || separator == '-')) { | |||
603 | lwan_status_critical("Expecting either ',' or '-' for sibling separator")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 603, __FUNCTION__, "Expecting either ',' or '-' for sibling separator" ); | |||
604 | __builtin_unreachable(); | |||
605 | } | |||
606 | ||||
607 | siblings[i] = sibling; | |||
608 | break; | |||
609 | default: | |||
610 | lwan_status_critical("%s has invalid format", path)lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 610, __FUNCTION__, "%s has invalid format", path); | |||
611 | __builtin_unreachable(); | |||
612 | } | |||
613 | ||||
614 | ||||
615 | fclose(sib); | |||
616 | } | |||
617 | ||||
618 | return true1; | |||
619 | } | |||
620 | ||||
621 | static void | |||
622 | siblings_to_schedtbl(struct lwan *l, uint32_t siblings[], uint32_t schedtbl[]) | |||
623 | { | |||
624 | int *seen = alloca(l->n_cpus * sizeof(int))__builtin_alloca (l->n_cpus * sizeof(int)); | |||
625 | int n_schedtbl = 0; | |||
626 | ||||
627 | for (uint32_t i = 0; i < l->n_cpus; i++) | |||
628 | seen[i] = -1; | |||
629 | ||||
630 | for (uint32_t i = 0; i < l->n_cpus; i++) { | |||
631 | if (seen[siblings[i]] < 0) { | |||
632 | seen[siblings[i]] = (int)i; | |||
633 | } else { | |||
634 | schedtbl[n_schedtbl++] = (uint32_t)seen[siblings[i]]; | |||
635 | schedtbl[n_schedtbl++] = i; | |||
636 | } | |||
637 | } | |||
638 | ||||
639 | if (!n_schedtbl) | |||
640 | memcpy(schedtbl, seen, l->n_cpus * sizeof(int)); | |||
641 | } | |||
642 | ||||
643 | static void | |||
644 | topology_to_schedtbl(struct lwan *l, uint32_t schedtbl[], uint32_t n_threads) | |||
645 | { | |||
646 | uint32_t *siblings = alloca(l->n_cpus * sizeof(uint32_t))__builtin_alloca (l->n_cpus * sizeof(uint32_t)); | |||
647 | ||||
648 | if (!read_cpu_topology(l, siblings)) { | |||
649 | for (uint32_t i = 0; i < n_threads; i++) | |||
650 | schedtbl[i] = (i / 2) % l->thread.count; | |||
651 | } else { | |||
652 | uint32_t *affinity = alloca(l->n_cpus * sizeof(uint32_t))__builtin_alloca (l->n_cpus * sizeof(uint32_t)); | |||
653 | ||||
654 | siblings_to_schedtbl(l, siblings, affinity); | |||
655 | ||||
656 | for (uint32_t i = 0; i < n_threads; i++) | |||
657 | schedtbl[i] = affinity[i % l->n_cpus]; | |||
658 | } | |||
659 | } | |||
660 | ||||
661 | static void | |||
662 | adjust_threads_affinity(struct lwan *l, uint32_t *schedtbl, uint32_t mask) | |||
663 | { | |||
664 | for (uint32_t i = 0; i < l->thread.count; i++) { | |||
665 | cpu_set_t set; | |||
666 | ||||
667 | CPU_ZERO(&set)do __builtin_memset (&set, '\0', sizeof (cpu_set_t)); while (0); | |||
668 | CPU_SET(schedtbl[i & mask], &set)(__extension__ ({ size_t __cpu = (schedtbl[i & mask]); __cpu / 8 < (sizeof (cpu_set_t)) ? (((__cpu_mask *) ((&set) ->__bits))[((__cpu) / (8 * sizeof (__cpu_mask)))] |= ((__cpu_mask ) 1 << ((__cpu) % (8 * sizeof (__cpu_mask))))) : 0; })); | |||
| ||||
669 | ||||
670 | if (pthread_setaffinity_np(l->thread.threads[i].self, sizeof(set), | |||
671 | &set)) | |||
672 | lwan_status_warning("Could not set affinity for thread %d", i)lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 672, __FUNCTION__, "Could not set affinity for thread %d", i ); | |||
673 | } | |||
674 | } | |||
675 | #elif defined(__x86_64__1) | |||
676 | static void | |||
677 | topology_to_schedtbl(struct lwan *l, uint32_t schedtbl[], uint32_t n_threads) | |||
678 | { | |||
679 | for (uint32_t i = 0; i < n_threads; i++) | |||
680 | schedtbl[i] = (i / 2) % l->thread.count; | |||
681 | } | |||
682 | ||||
683 | static void | |||
684 | adjust_threads_affinity(struct lwan *l, uint32_t *schedtbl, uint32_t n) | |||
685 | { | |||
686 | } | |||
687 | #endif | |||
688 | ||||
689 | void lwan_thread_init(struct lwan *l) | |||
690 | { | |||
691 | if (pthread_barrier_init(&l->thread.barrier, NULL((void*)0), | |||
| ||||
692 | (unsigned)l->thread.count + 1)) | |||
693 | lwan_status_critical("Could not create barrier")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 693, __FUNCTION__, "Could not create barrier"); | |||
694 | ||||
695 | lwan_status_debug("Initializing threads")lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 695, __FUNCTION__, "Initializing threads"); | |||
696 | ||||
697 | l->thread.threads = | |||
698 | calloc((size_t)l->thread.count, sizeof(struct lwan_thread)); | |||
699 | if (!l->thread.threads) | |||
700 | lwan_status_critical("Could not allocate memory for threads")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 700, __FUNCTION__, "Could not allocate memory for threads"); | |||
701 | ||||
702 | const size_t n_queue_fds = LWAN_MIN(l->thread.max_fd / l->thread.count,({ const __typeof__((l->thread.max_fd / l->thread.count ) + 0) lwan_tmp_id10 = (l->thread.max_fd / l->thread.count ); const __typeof__(((size_t)(2 * lwan_socket_get_backlog_size ())) + 0) lwan_tmp_id11 = ((size_t)(2 * lwan_socket_get_backlog_size ())); lwan_tmp_id10 > lwan_tmp_id11 ? lwan_tmp_id11 : lwan_tmp_id10 ; }) | |||
703 | (size_t)(2 * lwan_socket_get_backlog_size()))({ const __typeof__((l->thread.max_fd / l->thread.count ) + 0) lwan_tmp_id10 = (l->thread.max_fd / l->thread.count ); const __typeof__(((size_t)(2 * lwan_socket_get_backlog_size ())) + 0) lwan_tmp_id11 = ((size_t)(2 * lwan_socket_get_backlog_size ())); lwan_tmp_id10 > lwan_tmp_id11 ? lwan_tmp_id11 : lwan_tmp_id10 ; }); | |||
704 | lwan_status_debug("Pending client file descriptor queue has %zu items", n_queue_fds)lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 704, __FUNCTION__, "Pending client file descriptor queue has %zu items" , n_queue_fds); | |||
705 | for (unsigned int i = 0; i
| |||
706 | create_thread(l, &l->thread.threads[i], n_queue_fds); | |||
707 | ||||
708 | const unsigned int total_conns = l->thread.max_fd * l->thread.count; | |||
709 | #ifdef __x86_64__1 | |||
710 | static_assert(sizeof(struct lwan_connection) == 32,extern int (*__Static_assert_function (void)) [!!sizeof (struct { int __error_if_negative: (sizeof(struct lwan_connection) == 32) ? 2 : -1; })] | |||
711 | "Two connections per cache line")extern int (*__Static_assert_function (void)) [!!sizeof (struct { int __error_if_negative: (sizeof(struct lwan_connection) == 32) ? 2 : -1; })]; | |||
712 | /* | |||
713 | * Pre-schedule each file descriptor, to reduce some operations in the | |||
714 | * fast path. | |||
715 | * | |||
716 | * Since struct lwan_connection is guaranteed to be 32-byte long, two of | |||
717 | * them can fill up a cache line. Assume siblings share cache lines and | |||
718 | * use the CPU topology to group two connections per cache line in such | |||
719 | * a way that false sharing is avoided. | |||
720 | */ | |||
721 | uint32_t n_threads = (uint32_t)lwan_nextpow2((size_t)((l->thread.count - 1) * 2)); | |||
722 | uint32_t *schedtbl = alloca(n_threads * sizeof(uint32_t))__builtin_alloca (n_threads * sizeof(uint32_t)); | |||
723 | ||||
724 | topology_to_schedtbl(l, schedtbl, n_threads); | |||
725 | ||||
726 | n_threads--; /* Transform count into mask for AND below */ | |||
727 | adjust_threads_affinity(l, schedtbl, n_threads); | |||
728 | for (unsigned int i = 0; i < total_conns; i++) | |||
729 | l->conns[i].thread = &l->thread.threads[schedtbl[i & n_threads]]; | |||
730 | #else | |||
731 | for (unsigned int i = 0; i < total_conns; i++) | |||
732 | l->conns[i].thread = &l->thread.threads[i % l->thread.count]; | |||
733 | #endif | |||
734 | ||||
735 | pthread_barrier_wait(&l->thread.barrier); | |||
736 | ||||
737 | lwan_status_debug("Worker threads created and ready to serve")lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 737, __FUNCTION__, "Worker threads created and ready to serve" ); | |||
738 | } | |||
739 | ||||
740 | void lwan_thread_shutdown(struct lwan *l) | |||
741 | { | |||
742 | lwan_status_debug("Shutting down threads")lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 742, __FUNCTION__, "Shutting down threads"); | |||
743 | ||||
744 | for (unsigned int i = 0; i < l->thread.count; i++) { | |||
745 | struct lwan_thread *t = &l->thread.threads[i]; | |||
746 | ||||
747 | close(t->epoll_fd); | |||
748 | lwan_thread_nudge(t); | |||
749 | } | |||
750 | ||||
751 | pthread_barrier_wait(&l->thread.barrier); | |||
752 | pthread_barrier_destroy(&l->thread.barrier); | |||
753 | ||||
754 | for (unsigned int i = 0; i < l->thread.count; i++) { | |||
755 | struct lwan_thread *t = &l->thread.threads[i]; | |||
756 | ||||
757 | close(t->pipe_fd[0]); | |||
758 | #if !defined(HAVE_EVENTFD) | |||
759 | close(t->pipe_fd[1]); | |||
760 | #endif | |||
761 | ||||
762 | pthread_join(l->thread.threads[i].self, NULL((void*)0)); | |||
763 | spsc_queue_free(&t->pending_fds); | |||
764 | timeouts_close(t->wheel); | |||
765 | } | |||
766 | ||||
767 | free(l->thread.threads); | |||
768 | } |