File: | lwan-thread.c |
Warning: | line 558, column 13 Value stored to 'accepted_connections' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * lwan - simple web server |
3 | * Copyright (c) 2012, 2013 Leandro A. F. Pereira <leandro@hardinfo.org> |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version 2 |
8 | * of the License, or any later version. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, |
18 | * USA. |
19 | */ |
20 | |
21 | #define _GNU_SOURCE |
22 | #include <assert.h> |
23 | #include <errno(*__errno_location ()).h> |
24 | #include <fcntl.h> |
25 | #include <pthread.h> |
26 | #include <sched.h> |
27 | #include <stdlib.h> |
28 | #include <string.h> |
29 | #include <sys/epoll.h> |
30 | #include <sys/ioctl.h> |
31 | #include <sys/socket.h> |
32 | #include <unistd.h> |
33 | |
34 | #include "lwan-private.h" |
35 | #include "lwan-tq.h" |
36 | #include "list.h" |
37 | |
38 | static void lwan_strbuf_free_defer(void *data) |
39 | { |
40 | lwan_strbuf_free((struct lwan_strbuf *)data); |
41 | } |
42 | |
43 | static void graceful_close(struct lwan *l, |
44 | struct lwan_connection *conn, |
45 | char buffer[static DEFAULT_BUFFER_SIZE4096]) |
46 | { |
47 | int fd = lwan_connection_get_fd(l, conn); |
48 | |
49 | while (TIOCOUTQ0x5411) { |
50 | /* This ioctl isn't probably doing what it says on the tin; the details |
51 | * are subtle, but it seems to do the trick to allow gracefully closing |
52 | * the connection in some cases with minimal system calls. */ |
53 | int bytes_waiting; |
54 | int r = ioctl(fd, TIOCOUTQ0x5411, &bytes_waiting); |
55 | |
56 | if (!r && !bytes_waiting) /* See note about close(2) below. */ |
57 | return; |
58 | if (r < 0 && errno(*__errno_location ()) == EINTR4) |
59 | continue; |
60 | |
61 | break; |
62 | } |
63 | |
64 | if (UNLIKELY(shutdown(fd, SHUT_WR) < 0)__builtin_expect(((shutdown(fd, SHUT_WR) < 0)), (0))) { |
65 | if (UNLIKELY(errno == ENOTCONN)__builtin_expect((((*__errno_location ()) == 107)), (0))) |
66 | return; |
67 | } |
68 | |
69 | for (int tries = 0; tries < 20; tries++) { |
70 | ssize_t r = read(fd, buffer, DEFAULT_BUFFER_SIZE4096); |
71 | |
72 | if (!r) |
73 | break; |
74 | |
75 | if (r < 0) { |
76 | switch (errno(*__errno_location ())) { |
77 | case EAGAIN11: |
78 | break; |
79 | case EINTR4: |
80 | continue; |
81 | default: |
82 | return; |
83 | } |
84 | } |
85 | |
86 | coro_yield(conn->coro, CONN_CORO_WANT_READ); |
87 | } |
88 | |
89 | /* close(2) will be called when the coroutine yields with CONN_CORO_ABORT */ |
90 | } |
91 | |
92 | __attribute__((noreturn)) static int process_request_coro(struct coro *coro, |
93 | void *data) |
94 | { |
95 | /* NOTE: This function should not return; coro_yield should be used |
96 | * instead. This ensures the storage for `strbuf` is alive when the |
97 | * coroutine ends and lwan_strbuf_free() is called. */ |
98 | struct lwan_connection *conn = data; |
99 | struct lwan *lwan = conn->thread->lwan; |
100 | int fd = lwan_connection_get_fd(lwan, conn); |
101 | enum lwan_request_flags flags = lwan->config.request_flags; |
102 | struct lwan_strbuf strbuf = LWAN_STRBUF_STATIC_INIT(struct lwan_strbuf) { .buffer = "" }; |
103 | char request_buffer[DEFAULT_BUFFER_SIZE4096]; |
104 | struct lwan_value buffer = {.value = request_buffer, .len = 0}; |
105 | char *next_request = NULL((void*)0); |
106 | char *header_start[N_HEADER_START64]; |
107 | struct lwan_proxy proxy; |
108 | const int error_when_n_packets = lwan_calculate_n_packets(DEFAULT_BUFFER_SIZE4096); |
109 | |
110 | coro_defer(coro, lwan_strbuf_free_defer, &strbuf); |
111 | |
112 | const size_t init_gen = 1; /* 1 call to coro_defer() */ |
113 | assert(init_gen == coro_deferred_get_generation(coro))((void) sizeof ((init_gen == coro_deferred_get_generation(coro )) ? 1 : 0), __extension__ ({ if (init_gen == coro_deferred_get_generation (coro)) ; else __assert_fail ("init_gen == coro_deferred_get_generation(coro)" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 113, __extension__ __PRETTY_FUNCTION__); })); |
114 | |
115 | while (true1) { |
116 | struct lwan_request_parser_helper helper = { |
117 | .buffer = &buffer, |
118 | .next_request = next_request, |
119 | .error_when_n_packets = error_when_n_packets, |
120 | .header_start = header_start, |
121 | }; |
122 | struct lwan_request request = {.conn = conn, |
123 | .global_response_headers = &lwan->headers, |
124 | .fd = fd, |
125 | .response = {.buffer = &strbuf}, |
126 | .flags = flags, |
127 | .proxy = &proxy, |
128 | .helper = &helper}; |
129 | |
130 | lwan_process_request(lwan, &request); |
131 | |
132 | /* Run the deferred instructions now (except those used to initialize |
133 | * the coroutine), so that if the connection is gracefully closed, |
134 | * the storage for ``helper'' is still there. */ |
135 | coro_deferred_run(coro, init_gen); |
136 | |
137 | if (UNLIKELY(!(conn->flags & CONN_IS_KEEP_ALIVE))__builtin_expect(((!(conn->flags & CONN_IS_KEEP_ALIVE) )), (0))) { |
138 | graceful_close(lwan, conn, request_buffer); |
139 | break; |
140 | } |
141 | |
142 | if (next_request && *next_request) { |
143 | conn->flags |= CONN_CORK; |
144 | |
145 | if (!(conn->flags & CONN_EVENTS_WRITE)) |
146 | coro_yield(coro, CONN_CORO_WANT_WRITE); |
147 | } else { |
148 | conn->flags &= ~CONN_CORK; |
149 | coro_yield(coro, CONN_CORO_WANT_READ); |
150 | } |
151 | |
152 | /* Ensure string buffer is reset between requests, and that the backing |
153 | * store isn't over 2KB. */ |
154 | lwan_strbuf_reset_trim(&strbuf, 2048); |
155 | |
156 | /* Only allow flags from config. */ |
157 | flags = request.flags & (REQUEST_PROXIED | REQUEST_ALLOW_CORS); |
158 | next_request = helper.next_request; |
159 | } |
160 | |
161 | coro_yield(coro, CONN_CORO_ABORT); |
162 | __builtin_unreachable(); |
163 | } |
164 | |
165 | static ALWAYS_INLINEinline __attribute__((always_inline)) uint32_t |
166 | conn_flags_to_epoll_events(enum lwan_connection_flags flags) |
167 | { |
168 | static const uint32_t map[CONN_EVENTS_MASK + 1] = { |
169 | [0 /* Suspended (timer or await) */] = EPOLLRDHUPEPOLLRDHUP, |
170 | [CONN_EVENTS_WRITE] = EPOLLOUTEPOLLOUT | EPOLLRDHUPEPOLLRDHUP, |
171 | [CONN_EVENTS_READ] = EPOLLINEPOLLIN | EPOLLRDHUPEPOLLRDHUP, |
172 | [CONN_EVENTS_READ_WRITE] = EPOLLINEPOLLIN | EPOLLOUTEPOLLOUT | EPOLLRDHUPEPOLLRDHUP, |
173 | }; |
174 | |
175 | return map[flags & CONN_EVENTS_MASK]; |
176 | } |
177 | |
178 | static void update_epoll_flags(int fd, |
179 | struct lwan_connection *conn, |
180 | int epoll_fd, |
181 | enum lwan_connection_coro_yield yield_result) |
182 | { |
183 | static const enum lwan_connection_flags or_mask[CONN_CORO_MAX] = { |
184 | [CONN_CORO_YIELD] = 0, |
185 | |
186 | [CONN_CORO_WANT_READ_WRITE] = CONN_EVENTS_READ_WRITE, |
187 | [CONN_CORO_WANT_READ] = CONN_EVENTS_READ, |
188 | [CONN_CORO_WANT_WRITE] = CONN_EVENTS_WRITE, |
189 | |
190 | /* While the coro is suspended, we're not interested in either EPOLLIN |
191 | * or EPOLLOUT events. We still want to track this fd in epoll, though, |
192 | * so unset both so that only EPOLLRDHUP (plus the implicitly-set ones) |
193 | * are set. */ |
194 | [CONN_CORO_SUSPEND] = CONN_SUSPENDED, |
195 | |
196 | /* Ideally, when suspending a coroutine, the current flags&CONN_EVENTS_MASK |
197 | * would have to be stored and restored -- however, resuming as if the |
198 | * client coroutine is interested in a write event always guarantees that |
199 | * they'll be resumed as they're TCP sockets. There's a good chance that |
200 | * trying to read from a socket after resuming a coroutine will succeed, |
201 | * but if it doesn't because read() returns -EAGAIN, the I/O wrappers will |
202 | * yield with CONN_CORO_WANT_READ anyway. */ |
203 | [CONN_CORO_RESUME] = CONN_EVENTS_WRITE, |
204 | }; |
205 | static const enum lwan_connection_flags and_mask[CONN_CORO_MAX] = { |
206 | [CONN_CORO_YIELD] = ~0, |
207 | |
208 | [CONN_CORO_WANT_READ_WRITE] = ~0, |
209 | [CONN_CORO_WANT_READ] = ~CONN_EVENTS_WRITE, |
210 | [CONN_CORO_WANT_WRITE] = ~CONN_EVENTS_READ, |
211 | |
212 | [CONN_CORO_SUSPEND] = ~CONN_EVENTS_READ_WRITE, |
213 | [CONN_CORO_RESUME] = ~CONN_SUSPENDED, |
214 | }; |
215 | enum lwan_connection_flags prev_flags = conn->flags; |
216 | |
217 | conn->flags |= or_mask[yield_result]; |
218 | conn->flags &= and_mask[yield_result]; |
219 | |
220 | if (conn->flags == prev_flags) |
221 | return; |
222 | |
223 | struct epoll_event event = { |
224 | .events = conn_flags_to_epoll_events(conn->flags), |
225 | .data.ptr = conn, |
226 | }; |
227 | |
228 | if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0)__builtin_expect(((epoll_ctl(epoll_fd, 3, fd, &event) < 0)), (0))) |
229 | lwan_status_perror("epoll_ctl")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 229, __FUNCTION__, "epoll_ctl"); |
230 | } |
231 | |
232 | static void clear_async_await_flag(void *data) |
233 | { |
234 | struct lwan_connection *async_fd_conn = data; |
235 | |
236 | async_fd_conn->flags &= ~CONN_ASYNC_AWAIT; |
237 | } |
238 | |
239 | static enum lwan_connection_coro_yield |
240 | resume_async(struct timeout_queue *tq, |
241 | enum lwan_connection_coro_yield yield_result, |
242 | int64_t from_coro, |
243 | struct lwan_connection *conn, |
244 | int epoll_fd) |
245 | { |
246 | static const enum lwan_connection_flags to_connection_flags[] = { |
247 | [CONN_CORO_ASYNC_AWAIT_READ] = CONN_EVENTS_READ, |
248 | [CONN_CORO_ASYNC_AWAIT_WRITE] = CONN_EVENTS_WRITE, |
249 | [CONN_CORO_ASYNC_AWAIT_READ_WRITE] = CONN_EVENTS_READ_WRITE, |
250 | }; |
251 | int await_fd = (int)((uint64_t)from_coro >> 32); |
252 | enum lwan_connection_flags flags; |
253 | int op; |
254 | |
255 | assert(await_fd >= 0)((void) sizeof ((await_fd >= 0) ? 1 : 0), __extension__ ({ if (await_fd >= 0) ; else __assert_fail ("await_fd >= 0" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 255, __extension__ __PRETTY_FUNCTION__); })); |
256 | assert(yield_result >= CONN_CORO_ASYNC_AWAIT_READ &&((void) sizeof ((yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE ) ? 1 : 0), __extension__ ({ if (yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE ) ; else __assert_fail ("yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 257, __extension__ __PRETTY_FUNCTION__); })) |
257 | yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE)((void) sizeof ((yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE ) ? 1 : 0), __extension__ ({ if (yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE ) ; else __assert_fail ("yield_result >= CONN_CORO_ASYNC_AWAIT_READ && yield_result <= CONN_CORO_ASYNC_AWAIT_READ_WRITE" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 257, __extension__ __PRETTY_FUNCTION__); })); |
258 | |
259 | flags = to_connection_flags[yield_result]; |
260 | |
261 | struct lwan_connection *await_fd_conn = &tq->lwan->conns[await_fd]; |
262 | if (LIKELY(await_fd_conn->flags & CONN_ASYNC_AWAIT)__builtin_expect((!!(await_fd_conn->flags & CONN_ASYNC_AWAIT )), (1))) { |
263 | if (LIKELY((await_fd_conn->flags & CONN_EVENTS_MASK) == flags)__builtin_expect((!!((await_fd_conn->flags & CONN_EVENTS_MASK ) == flags)), (1))) |
264 | return CONN_CORO_SUSPEND; |
265 | |
266 | op = EPOLL_CTL_MOD3; |
267 | } else { |
268 | op = EPOLL_CTL_ADD1; |
269 | flags |= CONN_ASYNC_AWAIT; |
270 | coro_defer(conn->coro, clear_async_await_flag, await_fd_conn); |
271 | } |
272 | |
273 | struct epoll_event event = {.events = conn_flags_to_epoll_events(flags), |
274 | .data.ptr = conn}; |
275 | if (LIKELY(!epoll_ctl(epoll_fd, op, await_fd, &event))__builtin_expect((!!(!epoll_ctl(epoll_fd, op, await_fd, & event))), (1))) { |
276 | await_fd_conn->flags &= ~CONN_EVENTS_MASK; |
277 | await_fd_conn->flags |= flags; |
278 | return CONN_CORO_SUSPEND; |
279 | } |
280 | |
281 | return CONN_CORO_ABORT; |
282 | } |
283 | |
284 | static ALWAYS_INLINEinline __attribute__((always_inline)) void resume_coro(struct timeout_queue *tq, |
285 | struct lwan_connection *conn, |
286 | int epoll_fd) |
287 | { |
288 | assert(conn->coro)((void) sizeof ((conn->coro) ? 1 : 0), __extension__ ({ if (conn->coro) ; else __assert_fail ("conn->coro", "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 288, __extension__ __PRETTY_FUNCTION__); })); |
289 | |
290 | int64_t from_coro = coro_resume(conn->coro); |
291 | enum lwan_connection_coro_yield yield_result = from_coro & 0xffffffff; |
292 | |
293 | if (UNLIKELY(yield_result >= CONN_CORO_ASYNC)__builtin_expect(((yield_result >= CONN_CORO_ASYNC)), (0))) |
294 | yield_result = resume_async(tq, yield_result, from_coro, conn, epoll_fd); |
295 | |
296 | if (UNLIKELY(yield_result == CONN_CORO_ABORT)__builtin_expect(((yield_result == CONN_CORO_ABORT)), (0))) |
297 | return timeout_queue_expire(tq, conn); |
298 | |
299 | return update_epoll_flags(lwan_connection_get_fd(tq->lwan, conn), conn, |
300 | epoll_fd, yield_result); |
301 | } |
302 | |
303 | static void update_date_cache(struct lwan_thread *thread) |
304 | { |
305 | time_t now = time(NULL((void*)0)); |
306 | |
307 | lwan_format_rfc_time(now, thread->date.date); |
308 | lwan_format_rfc_time(now + (time_t)thread->lwan->config.expires, |
309 | thread->date.expires); |
310 | } |
311 | |
312 | static ALWAYS_INLINEinline __attribute__((always_inline)) void spawn_coro(struct lwan_connection *conn, |
313 | struct coro_switcher *switcher, |
314 | struct timeout_queue *tq) |
315 | { |
316 | struct lwan_thread *t = conn->thread; |
317 | |
318 | assert(!conn->coro)((void) sizeof ((!conn->coro) ? 1 : 0), __extension__ ({ if (!conn->coro) ; else __assert_fail ("!conn->coro", "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 318, __extension__ __PRETTY_FUNCTION__); })); |
319 | assert(!(conn->flags & CONN_ASYNC_AWAIT))((void) sizeof ((!(conn->flags & CONN_ASYNC_AWAIT)) ? 1 : 0), __extension__ ({ if (!(conn->flags & CONN_ASYNC_AWAIT )) ; else __assert_fail ("!(conn->flags & CONN_ASYNC_AWAIT)" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 319, __extension__ __PRETTY_FUNCTION__); })); |
320 | assert(t)((void) sizeof ((t) ? 1 : 0), __extension__ ({ if (t) ; else __assert_fail ("t", "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 320, __extension__ __PRETTY_FUNCTION__); })); |
321 | assert((uintptr_t)t >= (uintptr_t)tq->lwan->thread.threads)((void) sizeof (((uintptr_t)t >= (uintptr_t)tq->lwan-> thread.threads) ? 1 : 0), __extension__ ({ if ((uintptr_t)t >= (uintptr_t)tq->lwan->thread.threads) ; else __assert_fail ("(uintptr_t)t >= (uintptr_t)tq->lwan->thread.threads" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 321, __extension__ __PRETTY_FUNCTION__); })); |
322 | assert((uintptr_t)t <((void) sizeof (((uintptr_t)t < (uintptr_t)(tq->lwan-> thread.threads + tq->lwan->thread.count)) ? 1 : 0), __extension__ ({ if ((uintptr_t)t < (uintptr_t)(tq->lwan->thread. threads + tq->lwan->thread.count)) ; else __assert_fail ("(uintptr_t)t < (uintptr_t)(tq->lwan->thread.threads + tq->lwan->thread.count)" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 323, __extension__ __PRETTY_FUNCTION__); })) |
323 | (uintptr_t)(tq->lwan->thread.threads + tq->lwan->thread.count))((void) sizeof (((uintptr_t)t < (uintptr_t)(tq->lwan-> thread.threads + tq->lwan->thread.count)) ? 1 : 0), __extension__ ({ if ((uintptr_t)t < (uintptr_t)(tq->lwan->thread. threads + tq->lwan->thread.count)) ; else __assert_fail ("(uintptr_t)t < (uintptr_t)(tq->lwan->thread.threads + tq->lwan->thread.count)" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 323, __extension__ __PRETTY_FUNCTION__); })); |
324 | |
325 | *conn = (struct lwan_connection) { |
326 | .coro = coro_new(switcher, process_request_coro, conn), |
327 | .flags = CONN_EVENTS_READ, |
328 | .time_to_expire = tq->current_time + tq->move_to_last_bump, |
329 | .thread = t, |
330 | }; |
331 | if (LIKELY(conn->coro)__builtin_expect((!!(conn->coro)), (1))) { |
332 | timeout_queue_insert(tq, conn); |
333 | return; |
334 | } |
335 | |
336 | /* FIXME: send a "busy" response to this client? we don't have a coroutine |
337 | * at this point, can't use lwan_send() here */ |
338 | lwan_status_error("Could not create coroutine, dropping connection")lwan_status_error_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 338, __FUNCTION__, "Could not create coroutine, dropping connection" ); |
339 | |
340 | conn->flags = 0; |
341 | |
342 | int fd = lwan_connection_get_fd(tq->lwan, conn); |
343 | shutdown(fd, SHUT_RDWRSHUT_RDWR); |
344 | close(fd); |
345 | } |
346 | |
347 | static bool_Bool process_pending_timers(struct timeout_queue *tq, |
348 | struct lwan_thread *t, |
349 | int epoll_fd) |
350 | { |
351 | struct timeout *timeout; |
352 | bool_Bool should_expire_timers = false0; |
353 | |
354 | while ((timeout = timeouts_get(t->wheel))) { |
355 | struct lwan_request *request; |
356 | |
357 | if (timeout == &tq->timeout) { |
358 | should_expire_timers = true1; |
359 | continue; |
360 | } |
361 | |
362 | request = container_of(timeout, struct lwan_request, timeout)((struct lwan_request *) ((char *)(timeout) - __builtin_offsetof (struct lwan_request, timeout)) + ((typeof(*(timeout)) *)0 != (typeof(((struct lwan_request *)0)->timeout) *)0)); |
363 | |
364 | update_epoll_flags(request->fd, request->conn, epoll_fd, |
365 | CONN_CORO_RESUME); |
366 | } |
367 | |
368 | if (should_expire_timers) { |
369 | timeout_queue_expire_waiting(tq); |
370 | |
371 | /* tq timeout expires every 1000ms if there are connections, so |
372 | * update the date cache at this point as well. */ |
373 | update_date_cache(t); |
374 | |
375 | if (!timeout_queue_empty(tq)) { |
376 | timeouts_add(t->wheel, &tq->timeout, 1000); |
377 | return true1; |
378 | } |
379 | |
380 | timeouts_del(t->wheel, &tq->timeout); |
381 | } |
382 | |
383 | return false0; |
384 | } |
385 | |
386 | static int |
387 | turn_timer_wheel(struct timeout_queue *tq, struct lwan_thread *t, int epoll_fd) |
388 | { |
389 | const int infinite_timeout = -1; |
390 | timeout_t wheel_timeout; |
391 | struct timespec now; |
392 | |
393 | if (UNLIKELY(clock_gettime(monotonic_clock_id, &now) < 0)__builtin_expect(((clock_gettime(monotonic_clock_id, &now ) < 0)), (0))) |
394 | lwan_status_critical("Could not get monotonic time")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 394, __FUNCTION__, "Could not get monotonic time"); |
395 | |
396 | timeouts_update(t->wheel, |
397 | (timeout_t)(now.tv_sec * 1000 + now.tv_nsec / 1000000)); |
398 | |
399 | /* Check if there's an expired timer. */ |
400 | wheel_timeout = timeouts_timeout(t->wheel); |
401 | if (wheel_timeout > 0) { |
402 | return (int)wheel_timeout; /* No, but will soon. Wake us up in |
403 | wheel_timeout ms. */ |
404 | } |
405 | |
406 | if (UNLIKELY((int64_t)wheel_timeout < 0)__builtin_expect((((int64_t)wheel_timeout < 0)), (0))) |
407 | return infinite_timeout; /* None found. */ |
408 | |
409 | if (!process_pending_timers(tq, t, epoll_fd)) |
410 | return infinite_timeout; /* No more timers to process. */ |
411 | |
412 | /* After processing pending timers, determine when to wake up. */ |
413 | return (int)timeouts_timeout(t->wheel); |
414 | } |
415 | |
416 | static bool_Bool accept_waiting_clients(const struct lwan_thread *t) |
417 | { |
418 | const struct lwan_connection *conns = t->lwan->conns; |
419 | |
420 | while (true1) { |
421 | int fd = |
422 | accept4(t->listen_fd, NULL((void*)0), NULL((void*)0), SOCK_NONBLOCKSOCK_NONBLOCK | SOCK_CLOEXECSOCK_CLOEXEC); |
423 | |
424 | if (LIKELY(fd >= 0)__builtin_expect((!!(fd >= 0)), (1))) { |
425 | const struct lwan_connection *conn = &conns[fd]; |
426 | struct epoll_event ev = { |
427 | .data.ptr = (void *)conn, |
428 | .events = conn_flags_to_epoll_events(CONN_EVENTS_READ), |
429 | }; |
430 | int r = epoll_ctl(conn->thread->epoll_fd, EPOLL_CTL_ADD1, fd, &ev); |
431 | |
432 | if (UNLIKELY(r < 0)__builtin_expect(((r < 0)), (0))) { |
433 | /* FIXME: send a "busy" response here? No coroutine has been |
434 | * created at this point to use the usual stuff, though. */ |
435 | lwan_status_perror("Could not add file descriptor %d to epoll "lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 437, __FUNCTION__, "Could not add file descriptor %d to epoll " "set %d. Dropping connection", fd, conn->thread->epoll_fd ) |
436 | "set %d. Dropping connection",lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 437, __FUNCTION__, "Could not add file descriptor %d to epoll " "set %d. Dropping connection", fd, conn->thread->epoll_fd ) |
437 | fd, conn->thread->epoll_fd)lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 437, __FUNCTION__, "Could not add file descriptor %d to epoll " "set %d. Dropping connection", fd, conn->thread->epoll_fd ); |
438 | shutdown(fd, SHUT_RDWRSHUT_RDWR); |
439 | close(fd); |
440 | } |
441 | |
442 | if (SO_INCOMING_CPU_SUPPORTED1) { |
443 | /* Ignore errors here, as this is just a hint */ |
444 | (void)setsockopt(fd, SOL_SOCKET1, SO_INCOMING_CPU49, &t->cpu, sizeof(t->cpu)); |
445 | } |
446 | |
447 | continue; |
448 | } |
449 | |
450 | switch (errno(*__errno_location ())) { |
451 | default: |
452 | lwan_status_perror("Unexpected error while accepting connections")lwan_status_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 452, __FUNCTION__, "Unexpected error while accepting connections" ); |
453 | /* fallthrough */ |
454 | |
455 | case EAGAIN11: |
456 | return true1; |
457 | |
458 | case EBADF9: |
459 | case ECONNABORTED103: |
460 | case EINVAL22: |
461 | lwan_status_info("Listening socket closed")lwan_status_info_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 461, __FUNCTION__, "Listening socket closed"); |
462 | return false0; |
463 | } |
464 | } |
465 | |
466 | __builtin_unreachable(); |
467 | } |
468 | |
469 | static int create_listen_socket(struct lwan_thread *t, bool_Bool print_listening_msg) |
470 | { |
471 | int listen_fd; |
472 | |
473 | listen_fd = lwan_create_listen_socket(t->lwan, print_listening_msg); |
474 | if (listen_fd < 0) |
475 | lwan_status_critical("Could not create listen_fd")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 475, __FUNCTION__, "Could not create listen_fd"); |
476 | |
477 | if (SO_INCOMING_CPU_SUPPORTED1) { |
478 | /* Ignore errors here, as this is just a hint */ |
479 | (void)setsockopt(listen_fd, SOL_SOCKET1, SO_INCOMING_CPU49, &t->cpu, sizeof(t->cpu)); |
480 | } |
481 | |
482 | struct epoll_event event = { |
483 | .events = EPOLLINEPOLLIN | EPOLLETEPOLLET | EPOLLERREPOLLERR, |
484 | .data.ptr = NULL((void*)0), |
485 | }; |
486 | if (epoll_ctl(t->epoll_fd, EPOLL_CTL_ADD1, listen_fd, &event) < 0) |
487 | lwan_status_critical_perror("Could not add socket to epoll")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 487, __FUNCTION__, "Could not add socket to epoll"); |
488 | |
489 | return listen_fd; |
490 | } |
491 | |
492 | static void *thread_io_loop(void *data) |
493 | { |
494 | struct lwan_thread *t = data; |
495 | int epoll_fd = t->epoll_fd; |
496 | const int max_events = LWAN_MIN((int)t->lwan->thread.max_fd, 1024)({ const __typeof__(((int)t->lwan->thread.max_fd) + 0) lwan_tmp_id4 = ((int)t->lwan->thread.max_fd); const __typeof__((1024 ) + 0) lwan_tmp_id5 = (1024); lwan_tmp_id4 > lwan_tmp_id5 ? lwan_tmp_id5 : lwan_tmp_id4; }); |
497 | struct lwan *lwan = t->lwan; |
498 | struct epoll_event *events; |
499 | struct coro_switcher switcher; |
500 | struct timeout_queue tq; |
501 | |
502 | lwan_status_debug("Worker thread #%zd starting",lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 503, __FUNCTION__, "Worker thread #%zd starting", t - t-> lwan->thread.threads + 1) |
503 | t - t->lwan->thread.threads + 1)lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 503, __FUNCTION__, "Worker thread #%zd starting", t - t-> lwan->thread.threads + 1); |
504 | lwan_set_thread_name("worker"); |
505 | |
506 | events = calloc((size_t)max_events, sizeof(*events)); |
507 | if (UNLIKELY(!events)__builtin_expect(((!events)), (0))) |
508 | lwan_status_critical("Could not allocate memory for events")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 508, __FUNCTION__, "Could not allocate memory for events"); |
509 | |
510 | update_date_cache(t); |
511 | |
512 | timeout_queue_init(&tq, lwan); |
513 | |
514 | pthread_barrier_wait(&lwan->thread.barrier); |
515 | |
516 | for (;;) { |
517 | int timeout = turn_timer_wheel(&tq, t, epoll_fd); |
518 | int n_fds = epoll_wait(epoll_fd, events, max_events, timeout); |
519 | bool_Bool accepted_connections = false0; |
520 | |
521 | if (UNLIKELY(n_fds < 0)__builtin_expect(((n_fds < 0)), (0))) { |
522 | if (errno(*__errno_location ()) == EBADF9 || errno(*__errno_location ()) == EINVAL22) |
523 | break; |
524 | continue; |
525 | } |
526 | |
527 | for (struct epoll_event *event = events; n_fds--; event++) { |
528 | struct lwan_connection *conn; |
529 | |
530 | if (!event->data.ptr) { |
531 | if (LIKELY(accept_waiting_clients(t))__builtin_expect((!!(accept_waiting_clients(t))), (1))) { |
532 | accepted_connections = true1; |
533 | continue; |
534 | } |
535 | close(epoll_fd); |
536 | epoll_fd = -1; |
537 | break; |
538 | } |
539 | |
540 | conn = event->data.ptr; |
541 | |
542 | if (UNLIKELY(event->events & (EPOLLRDHUP | EPOLLHUP))__builtin_expect(((event->events & (EPOLLRDHUP | EPOLLHUP ))), (0))) { |
543 | timeout_queue_expire(&tq, conn); |
544 | continue; |
545 | } |
546 | |
547 | if (!conn->coro) { |
548 | spawn_coro(conn, &switcher, &tq); |
549 | continue; |
550 | } |
551 | |
552 | resume_coro(&tq, conn, epoll_fd); |
553 | timeout_queue_move_to_last(&tq, conn); |
554 | } |
555 | |
556 | if (accepted_connections) { |
557 | timeouts_add(t->wheel, &tq.timeout, 1000); |
558 | accepted_connections = false0; |
Value stored to 'accepted_connections' is never read | |
559 | } |
560 | } |
561 | |
562 | pthread_barrier_wait(&lwan->thread.barrier); |
563 | |
564 | timeout_queue_expire_all(&tq); |
565 | free(events); |
566 | |
567 | return NULL((void*)0); |
568 | } |
569 | |
570 | static void create_thread(struct lwan *l, struct lwan_thread *thread) |
571 | { |
572 | int ignore; |
573 | pthread_attr_t attr; |
574 | |
575 | thread->lwan = l; |
576 | |
577 | thread->wheel = timeouts_open(&ignore); |
578 | if (!thread->wheel) |
579 | lwan_status_critical("Could not create timer wheel")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 579, __FUNCTION__, "Could not create timer wheel"); |
580 | |
581 | if ((thread->epoll_fd = epoll_create1(EPOLL_CLOEXECEPOLL_CLOEXEC)) < 0) |
582 | lwan_status_critical_perror("epoll_create")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 582, __FUNCTION__, "epoll_create"); |
583 | |
584 | if (pthread_attr_init(&attr)) |
585 | lwan_status_critical_perror("pthread_attr_init")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 585, __FUNCTION__, "pthread_attr_init"); |
586 | |
587 | if (pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEMPTHREAD_SCOPE_SYSTEM)) |
588 | lwan_status_critical_perror("pthread_attr_setscope")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 588, __FUNCTION__, "pthread_attr_setscope"); |
589 | |
590 | if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLEPTHREAD_CREATE_JOINABLE)) |
591 | lwan_status_critical_perror("pthread_attr_setdetachstate")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 591, __FUNCTION__, "pthread_attr_setdetachstate"); |
592 | |
593 | if (pthread_create(&thread->self, &attr, thread_io_loop, thread)) |
594 | lwan_status_critical_perror("pthread_create")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 594, __FUNCTION__, "pthread_create"); |
595 | |
596 | if (pthread_attr_destroy(&attr)) |
597 | lwan_status_critical_perror("pthread_attr_destroy")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 597, __FUNCTION__, "pthread_attr_destroy"); |
598 | } |
599 | |
600 | #if defined(__linux__1) && defined(__x86_64__1) |
601 | static bool_Bool read_cpu_topology(struct lwan *l, uint32_t siblings[]) |
602 | { |
603 | char path[PATH_MAX4096]; |
604 | |
605 | for (uint32_t i = 0; i < l->available_cpus; i++) |
606 | siblings[i] = 0xbebacafe; |
607 | |
608 | for (unsigned int i = 0; i < l->available_cpus; i++) { |
609 | FILE *sib; |
610 | uint32_t id, sibling; |
611 | char separator; |
612 | |
613 | snprintf(path, sizeof(path), |
614 | "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", |
615 | i); |
616 | |
617 | sib = fopen(path, "re"); |
618 | if (!sib) { |
619 | lwan_status_warning("Could not open `%s` to determine CPU topology",lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 620, __FUNCTION__, "Could not open `%s` to determine CPU topology" , path) |
620 | path)lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 620, __FUNCTION__, "Could not open `%s` to determine CPU topology" , path); |
621 | return false0; |
622 | } |
623 | |
624 | switch (fscanf(sib, "%u%c%u", &id, &separator, &sibling)) { |
625 | case 2: /* No SMT */ |
626 | siblings[i] = id; |
627 | break; |
628 | case 3: /* SMT */ |
629 | if (!(separator == ',' || separator == '-')) { |
630 | lwan_status_critical("Expecting either ',' or '-' for sibling separator")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 630, __FUNCTION__, "Expecting either ',' or '-' for sibling separator" ); |
631 | __builtin_unreachable(); |
632 | } |
633 | |
634 | siblings[i] = sibling; |
635 | break; |
636 | default: |
637 | lwan_status_critical("%s has invalid format", path)lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 637, __FUNCTION__, "%s has invalid format", path); |
638 | __builtin_unreachable(); |
639 | } |
640 | |
641 | fclose(sib); |
642 | } |
643 | |
644 | /* Perform a sanity check here, as some systems seem to filter out the |
645 | * result of sysconf() to obtain the number of configured and online |
646 | * CPUs but don't bother changing what's available through sysfs as far |
647 | * as the CPU topology information goes. It's better to fall back to a |
648 | * possibly non-optimal setup than just crash during startup while |
649 | * trying to perform an out-of-bounds array access. */ |
650 | for (unsigned int i = 0; i < l->available_cpus; i++) { |
651 | if (siblings[i] == 0xbebacafe) { |
652 | lwan_status_warning("Could not determine sibling for CPU %d", i)lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 652, __FUNCTION__, "Could not determine sibling for CPU %d" , i); |
653 | return false0; |
654 | } |
655 | |
656 | if (siblings[i] >= l->available_cpus) { |
657 | lwan_status_warning("CPU information topology says CPU %d exists, "lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 660, __FUNCTION__, "CPU information topology says CPU %d exists, " "but max available CPUs is %d (online CPUs: %d). " "Is Lwan running in a (broken) container?" , siblings[i], l->available_cpus, l->online_cpus) |
658 | "but max available CPUs is %d (online CPUs: %d). "lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 660, __FUNCTION__, "CPU information topology says CPU %d exists, " "but max available CPUs is %d (online CPUs: %d). " "Is Lwan running in a (broken) container?" , siblings[i], l->available_cpus, l->online_cpus) |
659 | "Is Lwan running in a (broken) container?",lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 660, __FUNCTION__, "CPU information topology says CPU %d exists, " "but max available CPUs is %d (online CPUs: %d). " "Is Lwan running in a (broken) container?" , siblings[i], l->available_cpus, l->online_cpus) |
660 | siblings[i], l->available_cpus, l->online_cpus)lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 660, __FUNCTION__, "CPU information topology says CPU %d exists, " "but max available CPUs is %d (online CPUs: %d). " "Is Lwan running in a (broken) container?" , siblings[i], l->available_cpus, l->online_cpus); |
661 | return false0; |
662 | } |
663 | } |
664 | |
665 | return true1; |
666 | } |
667 | |
668 | static void |
669 | siblings_to_schedtbl(struct lwan *l, uint32_t siblings[], uint32_t schedtbl[]) |
670 | { |
671 | int *seen = alloca(l->available_cpus * sizeof(int))__builtin_alloca (l->available_cpus * sizeof(int)); |
672 | unsigned int n_schedtbl = 0; |
673 | |
674 | for (uint32_t i = 0; i < l->available_cpus; i++) |
675 | seen[i] = -1; |
676 | |
677 | for (uint32_t i = 0; i < l->available_cpus; i++) { |
678 | if (seen[siblings[i]] < 0) { |
679 | seen[siblings[i]] = (int)i; |
680 | } else { |
681 | schedtbl[n_schedtbl++] = (uint32_t)seen[siblings[i]]; |
682 | schedtbl[n_schedtbl++] = i; |
683 | } |
684 | } |
685 | |
686 | if (n_schedtbl != l->available_cpus) |
687 | memcpy(schedtbl, seen, l->available_cpus * sizeof(int)); |
688 | } |
689 | |
690 | static bool_Bool |
691 | topology_to_schedtbl(struct lwan *l, uint32_t schedtbl[], uint32_t n_threads) |
692 | { |
693 | uint32_t *siblings = alloca(l->available_cpus * sizeof(uint32_t))__builtin_alloca (l->available_cpus * sizeof(uint32_t)); |
694 | |
695 | if (read_cpu_topology(l, siblings)) { |
696 | uint32_t *affinity = alloca(l->available_cpus * sizeof(uint32_t))__builtin_alloca (l->available_cpus * sizeof(uint32_t)); |
697 | |
698 | siblings_to_schedtbl(l, siblings, affinity); |
699 | |
700 | for (uint32_t i = 0; i < n_threads; i++) |
701 | schedtbl[i] = affinity[i % l->available_cpus]; |
702 | return true1; |
703 | } |
704 | |
705 | for (uint32_t i = 0; i < n_threads; i++) |
706 | schedtbl[i] = (i / 2) % l->thread.count; |
707 | return false0; |
708 | } |
709 | |
710 | static void |
711 | adjust_threads_affinity(struct lwan *l, uint32_t *schedtbl, uint32_t mask) |
712 | { |
713 | for (uint32_t i = 0; i < l->thread.count; i++) { |
714 | cpu_set_t set; |
715 | |
716 | CPU_ZERO(&set)do __builtin_memset (&set, '\0', sizeof (cpu_set_t)); while (0); |
717 | CPU_SET(schedtbl[i & mask], &set)(__extension__ ({ size_t __cpu = (schedtbl[i & mask]); __cpu / 8 < (sizeof (cpu_set_t)) ? (((__cpu_mask *) ((&set) ->__bits))[((__cpu) / (8 * sizeof (__cpu_mask)))] |= ((__cpu_mask ) 1 << ((__cpu) % (8 * sizeof (__cpu_mask))))) : 0; })); |
718 | |
719 | if (pthread_setaffinity_np(l->thread.threads[i].self, sizeof(set), |
720 | &set)) |
721 | lwan_status_warning("Could not set affinity for thread %d", i)lwan_status_warning_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 721, __FUNCTION__, "Could not set affinity for thread %d", i ); |
722 | } |
723 | } |
724 | #elif defined(__x86_64__1) |
725 | static bool_Bool |
726 | topology_to_schedtbl(struct lwan *l, uint32_t schedtbl[], uint32_t n_threads) |
727 | { |
728 | for (uint32_t i = 0; i < n_threads; i++) |
729 | schedtbl[i] = (i / 2) % l->thread.count; |
730 | return false0; |
731 | } |
732 | |
733 | static void |
734 | adjust_threads_affinity(struct lwan *l, uint32_t *schedtbl, uint32_t n) |
735 | { |
736 | } |
737 | #endif |
738 | |
739 | void lwan_thread_init(struct lwan *l) |
740 | { |
741 | const unsigned int total_conns = l->thread.max_fd * l->thread.count; |
742 | |
743 | if (pthread_barrier_init(&l->thread.barrier, NULL((void*)0), |
744 | (unsigned)l->thread.count + 1)) |
745 | lwan_status_critical("Could not create barrier")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 745, __FUNCTION__, "Could not create barrier"); |
746 | |
747 | lwan_status_debug("Initializing threads")lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 747, __FUNCTION__, "Initializing threads"); |
748 | |
749 | l->thread.threads = |
750 | calloc((size_t)l->thread.count, sizeof(struct lwan_thread)); |
751 | if (!l->thread.threads) |
752 | lwan_status_critical("Could not allocate memory for threads")lwan_status_critical_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 752, __FUNCTION__, "Could not allocate memory for threads"); |
753 | |
754 | #ifdef __x86_64__1 |
755 | static_assert(sizeof(struct lwan_connection) == 32,extern int (*__Static_assert_function (void)) [!!sizeof (struct { int __error_if_negative: (sizeof(struct lwan_connection) == 32) ? 2 : -1; })] |
756 | "Two connections per cache line")extern int (*__Static_assert_function (void)) [!!sizeof (struct { int __error_if_negative: (sizeof(struct lwan_connection) == 32) ? 2 : -1; })]; |
757 | #ifdef _SC_LEVEL1_DCACHE_LINESIZE_SC_LEVEL1_DCACHE_LINESIZE |
758 | assert(sysconf(_SC_LEVEL1_DCACHE_LINESIZE) == 64)((void) sizeof ((sysconf(_SC_LEVEL1_DCACHE_LINESIZE) == 64) ? 1 : 0), __extension__ ({ if (sysconf(_SC_LEVEL1_DCACHE_LINESIZE ) == 64) ; else __assert_fail ("sysconf(_SC_LEVEL1_DCACHE_LINESIZE) == 64" , "/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 758, __extension__ __PRETTY_FUNCTION__); })); |
759 | #endif |
760 | |
761 | lwan_status_debug("%d CPUs of %d are online. "lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 763, __FUNCTION__, "%d CPUs of %d are online. " "Reading topology to pre-schedule clients" , l->online_cpus, l->available_cpus) |
762 | "Reading topology to pre-schedule clients",lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 763, __FUNCTION__, "%d CPUs of %d are online. " "Reading topology to pre-schedule clients" , l->online_cpus, l->available_cpus) |
763 | l->online_cpus, l->available_cpus)lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 763, __FUNCTION__, "%d CPUs of %d are online. " "Reading topology to pre-schedule clients" , l->online_cpus, l->available_cpus); |
764 | |
765 | /* |
766 | * Pre-schedule each file descriptor, to reduce some operations in the |
767 | * fast path. |
768 | * |
769 | * Since struct lwan_connection is guaranteed to be 32-byte long, two of |
770 | * them can fill up a cache line. Assume siblings share cache lines and |
771 | * use the CPU topology to group two connections per cache line in such |
772 | * a way that false sharing is avoided. |
773 | */ |
774 | uint32_t n_threads = (uint32_t)lwan_nextpow2((size_t)((l->thread.count - 1) * 2)); |
775 | uint32_t *schedtbl = alloca(n_threads * sizeof(uint32_t))__builtin_alloca (n_threads * sizeof(uint32_t)); |
776 | |
777 | bool_Bool adj_affinity = topology_to_schedtbl(l, schedtbl, n_threads); |
778 | |
779 | n_threads--; /* Transform count into mask for AND below */ |
780 | |
781 | if (adj_affinity) { |
782 | /* Save which CPU this tread will be pinned at so we can use |
783 | * SO_INCOMING_CPU later. */ |
784 | for (unsigned int i = 0; i < l->thread.count; i++) |
785 | l->thread.threads[i].cpu = schedtbl[i & n_threads]; |
786 | } |
787 | |
788 | for (unsigned int i = 0; i < total_conns; i++) |
789 | l->conns[i].thread = &l->thread.threads[schedtbl[i & n_threads]]; |
790 | #else |
791 | for (unsigned int i = 0; i < l->thread.count; i++) |
792 | l->thread.threads[i].cpu = i % l->thread.count; |
793 | for (unsigned int i = 0; i < total_conns; i++) |
794 | l->conns[i].thread = &l->thread.threads[i % l->thread.count]; |
795 | #endif |
796 | |
797 | for (unsigned int i = 0; i < l->thread.count; i++) { |
798 | struct lwan_thread *thread = &l->thread.threads[i]; |
799 | |
800 | create_thread(l, thread); |
801 | |
802 | if ((thread->listen_fd = create_listen_socket(thread, i == 0)) < 0) |
803 | lwan_status_critical_perror("Could not create listening socket")lwan_status_critical_perror_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 803, __FUNCTION__, "Could not create listening socket"); |
804 | } |
805 | |
806 | #ifdef __x86_64__1 |
807 | if (adj_affinity) |
808 | adjust_threads_affinity(l, schedtbl, n_threads); |
809 | #endif |
810 | |
811 | pthread_barrier_wait(&l->thread.barrier); |
812 | |
813 | lwan_status_debug("Worker threads created and ready to serve")lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 813, __FUNCTION__, "Worker threads created and ready to serve" ); |
814 | } |
815 | |
816 | void lwan_thread_shutdown(struct lwan *l) |
817 | { |
818 | lwan_status_debug("Shutting down threads")lwan_status_debug_debug("/home/buildbot/lwan-worker/clang-analyze/build/src/lib/lwan-thread.c" , 818, __FUNCTION__, "Shutting down threads"); |
819 | |
820 | for (unsigned int i = 0; i < l->thread.count; i++) { |
821 | struct lwan_thread *t = &l->thread.threads[i]; |
822 | int epoll_fd = t->epoll_fd; |
823 | int listen_fd = t->listen_fd; |
824 | |
825 | t->listen_fd = -1; |
826 | t->epoll_fd = -1; |
827 | close(epoll_fd); |
828 | close(listen_fd); |
829 | } |
830 | |
831 | pthread_barrier_wait(&l->thread.barrier); |
832 | pthread_barrier_destroy(&l->thread.barrier); |
833 | |
834 | for (unsigned int i = 0; i < l->thread.count; i++) { |
835 | struct lwan_thread *t = &l->thread.threads[i]; |
836 | |
837 | pthread_join(l->thread.threads[i].self, NULL((void*)0)); |
838 | timeouts_close(t->wheel); |
839 | } |
840 | |
841 | free(l->thread.threads); |
842 | } |