1 /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
5 * Copyright (C) 2001-2010, Eduardo Silva P. <edsiper@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU Library General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 #include <sys/epoll.h>
28 #include <sys/syscall.h>
32 #include "connection.h"
33 #include "scheduler.h"
44 /* Register thread information */
45 int mk_sched_register_thread(pthread_t tid
, int efd
)
48 struct sched_list_node
*sr
, *aux
;
50 sr
= mk_mem_malloc_z(sizeof(struct sched_list_node
));
54 sr
->queue
= mk_mem_malloc_z(sizeof(struct sched_connection
) *
55 config
->worker_capacity
);
56 sr
->request_handler
= NULL
;
59 for (i
= 0; i
< config
->worker_capacity
; i
++) {
60 /* Pre alloc IPv4 memory buffer */
61 sr
->queue
[i
].ipv4
.data
= mk_mem_malloc_z(16);
62 sr
->queue
[i
].status
= MK_SCHEDULER_CONN_AVAILABLE
;
75 sr
->idx
= aux
->idx
+ 1;
82 * Create thread which will be listening
83 * for incomings file descriptors
85 int mk_sched_launch_thread(int max_events
)
90 sched_thread_conf
*thconf
;
91 pthread_mutex_t mutex_wait_register
;
93 /* Creating epoll file descriptor */
94 efd
= mk_epoll_create(max_events
);
100 pthread_mutex_init(&mutex_wait_register
, (pthread_mutexattr_t
*) NULL
);
101 pthread_mutex_lock(&mutex_wait_register
);
103 thconf
= mk_mem_malloc(sizeof(sched_thread_conf
));
104 thconf
->epoll_fd
= efd
;
105 thconf
->max_events
= max_events
;
107 pthread_attr_init(&attr
);
108 pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
109 if (pthread_create(&tid
, &attr
, mk_sched_launch_epoll_loop
,
110 (void *) thconf
) != 0) {
111 perror("pthread_create");
115 /* Register working thread */
116 mk_sched_register_thread(tid
, efd
);
117 pthread_mutex_unlock(&mutex_wait_register
);
122 /* created thread, all this calls are in the thread context */
123 void *mk_sched_launch_epoll_loop(void *thread_conf
)
125 sched_thread_conf
*thconf
;
126 struct sched_list_node
*thinfo
;
127 mk_epoll_handlers
*handler
;
129 /* Avoid SIGPIPE signals */
130 mk_signal_thread_sigpipe_safe();
132 thconf
= thread_conf
;
134 /* Init specific thread cache */
135 mk_cache_thread_init();
136 mk_plugin_worker_startup();
138 /* Epoll event handlers */
139 handler
= mk_epoll_set_handlers((void *) mk_conn_read
,
140 (void *) mk_conn_write
,
141 (void *) mk_conn_error
,
142 (void *) mk_conn_close
,
143 (void *) mk_conn_timeout
);
145 /* Nasty way to export task id */
147 thinfo
= mk_sched_get_thread_conf();
149 thinfo
= mk_sched_get_thread_conf();
152 /* Glibc doesn't export to user space the gettid() syscall */
153 thinfo
->pid
= syscall(__NR_gettid
);
155 mk_sched_set_thread_poll(thconf
->epoll_fd
);
156 mk_epoll_init(thconf
->epoll_fd
, handler
, thconf
->max_events
);
161 struct request_idx
*mk_sched_get_request_index()
163 return pthread_getspecific(request_index
);
166 void mk_sched_set_request_index(struct request_idx
*ri
)
168 pthread_setspecific(request_index
, (void *) ri
);
171 void mk_sched_set_thread_poll(int epoll
)
173 pthread_setspecific(epoll_fd
, (void *) epoll
);
176 int mk_sched_get_thread_poll()
178 return (int) pthread_getspecific(epoll_fd
);
181 struct sched_list_node
*mk_sched_get_thread_conf()
183 struct sched_list_node
*node
;
186 current
= pthread_self();
189 if (pthread_equal(node
->tid
, current
) != 0) {
198 int mk_sched_add_client(struct sched_list_node
*sched
, int remote_fd
)
202 /* Look for an available slot */
203 for (i
= 0; i
< config
->worker_capacity
; i
++) {
204 if (sched
->queue
[i
].status
== MK_SCHEDULER_CONN_AVAILABLE
) {
206 mk_socket_get_ip(remote_fd
, sched
->queue
[i
].ipv4
.data
);
207 mk_pointer_set( &sched
->queue
[i
].ipv4
, sched
->queue
[i
].ipv4
.data
);
209 /* Before to continue, we need to run plugin stage 20 */
210 ret
= mk_plugin_stage_run(MK_PLUGIN_STAGE_20
,
212 &sched
->queue
[i
], NULL
, NULL
);
214 /* Close connection, otherwise continue */
215 if (ret
== MK_PLUGIN_RET_CLOSE_CONX
) {
216 mk_conn_close(remote_fd
);
217 return MK_PLUGIN_RET_CLOSE_CONX
;
220 /* Socket and status */
221 sched
->queue
[i
].socket
= remote_fd
;
222 sched
->queue
[i
].status
= MK_SCHEDULER_CONN_PENDING
;
223 sched
->queue
[i
].arrive_time
= log_current_utime
;
225 mk_epoll_add_client(sched
->epoll_fd
, remote_fd
, MK_EPOLL_READ
,
226 MK_EPOLL_BEHAVIOR_TRIGGERED
);
234 int mk_sched_remove_client(struct sched_list_node
*sched
, int remote_fd
)
236 struct sched_connection
*sc
;
238 sc
= mk_sched_get_connection(sched
, remote_fd
);
240 /* Close socket and change status */
242 sc
->status
= MK_SCHEDULER_CONN_AVAILABLE
;
248 struct sched_connection
*mk_sched_get_connection(struct sched_list_node
249 *sched
, int remote_fd
)
254 sched
= mk_sched_get_thread_conf();
261 for (i
= 0; i
< config
->worker_capacity
; i
++) {
262 if (sched
->queue
[i
].socket
== remote_fd
) {
263 return &sched
->queue
[i
];
270 int mk_sched_check_timeouts(struct sched_list_node
*sched
)
273 struct request_idx
*req_idx
;
274 struct client_request
*req_cl
;
276 /* PENDING CONN TIMEOUT */
277 for (i
= 0; i
< config
->worker_capacity
; i
++) {
278 if (sched
->queue
[i
].status
== MK_SCHEDULER_CONN_PENDING
) {
279 if (sched
->queue
[i
].arrive_time
+ config
->timeout
<=
282 MK_TRACE("Scheduler, closing fd %i due TIMEOUT",
283 sched
->queue
[i
].socket
);
285 mk_sched_remove_client(sched
, sched
->queue
[i
].socket
);
290 /* PROCESSING CONN TIMEOUT */
291 req_idx
= mk_sched_get_request_index();
292 req_cl
= req_idx
->first
;
295 if (req_cl
->status
== MK_REQUEST_STATUS_INCOMPLETE
) {
296 if ((req_cl
->init_time
+ config
->timeout
) >= log_current_utime
) {
298 MK_TRACE("Scheduler, closing fd %i due to timeout (incomplete)",
301 close(req_cl
->socket
);
304 req_cl
= req_cl
->next
;
310 int mk_sched_update_conn_status(struct sched_list_node
*sched
,
311 int remote_fd
, int status
)
319 for (i
= 0; i
< config
->workers
; i
++) {
320 if (sched
->queue
[i
].socket
== remote_fd
) {
321 sched
->queue
[i
].status
= status
;