Merge branch 'master' of git+ssh://repo.or.cz/srv/git/MonkeyD
[MonkeyD.git] / src / scheduler.c
blobf05b7ee353e9555a1fa6159c30169dd5a4ae9469
1 /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
3 /* Monkey HTTP Daemon
4 * ------------------
5 * Copyright (C) 2001-2010, Eduardo Silva P. <edsiper@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU Library General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <pthread.h>
26 #include <sys/epoll.h>
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #include <string.h>
31 #include "monkey.h"
32 #include "connection.h"
33 #include "scheduler.h"
34 #include "memory.h"
35 #include "epoll.h"
36 #include "request.h"
37 #include "cache.h"
38 #include "config.h"
39 #include "clock.h"
40 #include "signals.h"
41 #include "plugin.h"
42 #include "utils.h"
44 /* Register thread information */
45 int mk_sched_register_thread(pthread_t tid, int efd)
47 int i;
48 struct sched_list_node *sr, *aux;
50 sr = mk_mem_malloc_z(sizeof(struct sched_list_node));
51 sr->tid = tid;
52 sr->pid = -1;
53 sr->epoll_fd = efd;
54 sr->queue = mk_mem_malloc_z(sizeof(struct sched_connection) *
55 config->worker_capacity);
56 sr->request_handler = NULL;
57 sr->next = NULL;
59 for (i = 0; i < config->worker_capacity; i++) {
60 /* Pre alloc IPv4 memory buffer */
61 sr->queue[i].ipv4.data = mk_mem_malloc_z(16);
62 sr->queue[i].status = MK_SCHEDULER_CONN_AVAILABLE;
65 if (!sched_list) {
66 sr->idx = 1;
67 sched_list = sr;
68 return 0;
71 aux = sched_list;
72 while (aux->next) {
73 aux = aux->next;
75 sr->idx = aux->idx + 1;
76 aux->next = sr;
78 return 0;
82 * Create thread which will be listening
83 * for incomings file descriptors
85 int mk_sched_launch_thread(int max_events)
87 int efd;
88 pthread_t tid;
89 pthread_attr_t attr;
90 sched_thread_conf *thconf;
91 pthread_mutex_t mutex_wait_register;
93 /* Creating epoll file descriptor */
94 efd = mk_epoll_create(max_events);
95 if (efd < 1) {
96 return -1;
99 /* Thread stuff */
100 pthread_mutex_init(&mutex_wait_register, (pthread_mutexattr_t *) NULL);
101 pthread_mutex_lock(&mutex_wait_register);
103 thconf = mk_mem_malloc(sizeof(sched_thread_conf));
104 thconf->epoll_fd = efd;
105 thconf->max_events = max_events;
107 pthread_attr_init(&attr);
108 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
109 if (pthread_create(&tid, &attr, mk_sched_launch_epoll_loop,
110 (void *) thconf) != 0) {
111 perror("pthread_create");
112 return -1;
115 /* Register working thread */
116 mk_sched_register_thread(tid, efd);
117 pthread_mutex_unlock(&mutex_wait_register);
119 return 0;
122 /* created thread, all this calls are in the thread context */
123 void *mk_sched_launch_epoll_loop(void *thread_conf)
125 sched_thread_conf *thconf;
126 struct sched_list_node *thinfo;
127 mk_epoll_handlers *handler;
129 /* Avoid SIGPIPE signals */
130 mk_signal_thread_sigpipe_safe();
132 thconf = thread_conf;
134 /* Init specific thread cache */
135 mk_cache_thread_init();
136 mk_plugin_worker_startup();
138 /* Epoll event handlers */
139 handler = mk_epoll_set_handlers((void *) mk_conn_read,
140 (void *) mk_conn_write,
141 (void *) mk_conn_error,
142 (void *) mk_conn_close,
143 (void *) mk_conn_timeout);
145 /* Nasty way to export task id */
146 usleep(1000);
147 thinfo = mk_sched_get_thread_conf();
148 while (!thinfo) {
149 thinfo = mk_sched_get_thread_conf();
152 /* Glibc doesn't export to user space the gettid() syscall */
153 thinfo->pid = syscall(__NR_gettid);
155 mk_sched_set_thread_poll(thconf->epoll_fd);
156 mk_epoll_init(thconf->epoll_fd, handler, thconf->max_events);
158 return 0;
161 struct request_idx *mk_sched_get_request_index()
163 return pthread_getspecific(request_index);
166 void mk_sched_set_request_index(struct request_idx *ri)
168 pthread_setspecific(request_index, (void *) ri);
171 void mk_sched_set_thread_poll(int epoll)
173 pthread_setspecific(epoll_fd, (void *) epoll);
176 int mk_sched_get_thread_poll()
178 return (int) pthread_getspecific(epoll_fd);
181 struct sched_list_node *mk_sched_get_thread_conf()
183 struct sched_list_node *node;
184 pthread_t current;
186 current = pthread_self();
187 node = sched_list;
188 while (node) {
189 if (pthread_equal(node->tid, current) != 0) {
190 return node;
192 node = node->next;
195 return NULL;
198 int mk_sched_add_client(struct sched_list_node *sched, int remote_fd)
200 unsigned int i, ret;
202 /* Look for an available slot */
203 for (i = 0; i < config->worker_capacity; i++) {
204 if (sched->queue[i].status == MK_SCHEDULER_CONN_AVAILABLE) {
205 /* Set IP */
206 mk_socket_get_ip(remote_fd, sched->queue[i].ipv4.data);
207 mk_pointer_set( &sched->queue[i].ipv4, sched->queue[i].ipv4.data );
209 /* Before to continue, we need to run plugin stage 20 */
210 ret = mk_plugin_stage_run(MK_PLUGIN_STAGE_20,
211 remote_fd,
212 &sched->queue[i], NULL, NULL);
214 /* Close connection, otherwise continue */
215 if (ret == MK_PLUGIN_RET_CLOSE_CONX) {
216 mk_conn_close(remote_fd);
217 return MK_PLUGIN_RET_CLOSE_CONX;
220 /* Socket and status */
221 sched->queue[i].socket = remote_fd;
222 sched->queue[i].status = MK_SCHEDULER_CONN_PENDING;
223 sched->queue[i].arrive_time = log_current_utime;
225 mk_epoll_add_client(sched->epoll_fd, remote_fd, MK_EPOLL_READ,
226 MK_EPOLL_BEHAVIOR_TRIGGERED);
227 return 0;
231 return -1;
234 int mk_sched_remove_client(struct sched_list_node *sched, int remote_fd)
236 struct sched_connection *sc;
238 sc = mk_sched_get_connection(sched, remote_fd);
239 if (sc) {
240 /* Close socket and change status */
241 close(remote_fd);
242 sc->status = MK_SCHEDULER_CONN_AVAILABLE;
243 return 0;
245 return -1;
248 struct sched_connection *mk_sched_get_connection(struct sched_list_node
249 *sched, int remote_fd)
251 int i;
253 if (!sched) {
254 sched = mk_sched_get_thread_conf();
255 if (!sched) {
256 close(remote_fd);
257 return NULL;
261 for (i = 0; i < config->worker_capacity; i++) {
262 if (sched->queue[i].socket == remote_fd) {
263 return &sched->queue[i];
267 return NULL;
270 int mk_sched_check_timeouts(struct sched_list_node *sched)
272 int i;
273 struct request_idx *req_idx;
274 struct client_request *req_cl;
276 /* PENDING CONN TIMEOUT */
277 for (i = 0; i < config->worker_capacity; i++) {
278 if (sched->queue[i].status == MK_SCHEDULER_CONN_PENDING) {
279 if (sched->queue[i].arrive_time + config->timeout <=
280 log_current_utime) {
281 #ifdef TRACE
282 MK_TRACE("Scheduler, closing fd %i due TIMEOUT",
283 sched->queue[i].socket);
284 #endif
285 mk_sched_remove_client(sched, sched->queue[i].socket);
290 /* PROCESSING CONN TIMEOUT */
291 req_idx = mk_sched_get_request_index();
292 req_cl = req_idx->first;
294 while (req_cl) {
295 if (req_cl->status == MK_REQUEST_STATUS_INCOMPLETE) {
296 if ((req_cl->init_time + config->timeout) >= log_current_utime) {
297 #ifdef TRACE
298 MK_TRACE("Scheduler, closing fd %i due to timeout (incomplete)",
299 req_cl->socket);
300 #endif
301 close(req_cl->socket);
304 req_cl = req_cl->next;
307 return 0;
310 int mk_sched_update_conn_status(struct sched_list_node *sched,
311 int remote_fd, int status)
313 int i;
315 if (!sched) {
316 return -1;
319 for (i = 0; i < config->workers; i++) {
320 if (sched->queue[i].socket == remote_fd) {
321 sched->queue[i].status = status;
322 return 0;
325 return 0;