2 * Copyright (C) 2012-2020 all contributors <cmogstored-public@yhbt.net>
3 * License: GPL-3.0+ <https://www.gnu.org/licenses/gpl-3.0.txt>
5 #include "cmogstored.h"
7 static struct mog_fd
*queue_xchg_maybe(struct mog_queue
*q
, struct mog_fd
*mfd
)
10 * idle, just-ready clients are the most important
11 * We use a zero timeout here since epoll_wait() is
12 * optimizes for the non-blocking case.
14 struct mog_fd
*recent_mfd
= mog_idleq_wait(q
, 0);
18 * We got a more important client, push
19 * active_mfd into the active queue for another
20 * thread to service while we service a more
21 * recently-active client.
23 mog_activeq_push(q
, mfd
);
28 * keep processing the currently-active mfd in this thread
29 * if no new work came up
34 /* passed as a start_routine to pthread_create */
35 void * mog_queue_loop(void *arg
)
37 struct mog_queue
*q
= arg
;
38 struct mog_fd
*mfd
= NULL
;
40 syslog(LOG_DEBUG
, "mog_queue_loop[%lx] thread ready",
41 (unsigned long)pthread_self());
45 mfd
= mog_idleq_wait(q
, -1);
46 switch (mog_queue_step(mfd
)) {
48 mfd
= queue_xchg_maybe(q
, mfd
);
50 case MOG_NEXT_WAIT_RD
:
51 mfd
= mog_queue_xchg(q
, mfd
, MOG_QEV_RD
);
53 case MOG_NEXT_WAIT_WR
:
54 mfd
= mog_queue_xchg(q
, mfd
, MOG_QEV_WR
);
58 /* already hanndled */
59 mfd
= mog_idleq_wait(q
, -1);
66 static void queue_quit_step(struct mog_fd
*mfd
)
68 switch (mfd
->fd_type
) {
69 case MOG_FD_TYPE_MGMT
: mog_mgmt_quit_step(mfd
); return;
70 case MOG_FD_TYPE_HTTP
:
71 case MOG_FD_TYPE_HTTPGET
:
72 mog_http_quit_step(mfd
); return;
73 case MOG_FD_TYPE_FILE
:
74 case MOG_FD_TYPE_QUEUE
:
76 assert(0 && "invalid fd_type in queue_quit_step");
82 /* called at shutdown when only one thread is active */
83 void mog_queue_quit_loop(struct mog_queue
*queue
)
87 while (mog_nr_active_at_quit
) {
88 assert(mog_nr_active_at_quit
<= (size_t)INT_MAX
89 && "mog_nr_active_at_quit underflow");
91 if ((mfd
= mog_idleq_wait_intr(queue
, -1)))