2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in thereg
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/usr.sbin/nscd/nscd.c,v 1.7 2008/10/23 00:27:35 delphij Exp $
29 #include <sys/types.h>
30 #include <sys/event.h>
31 #include <sys/socket.h>
33 #include <sys/param.h>
47 #include "agents/passwd.h"
48 #include "agents/group.h"
49 #include "agents/services.h"
57 #include "singletons.h"
60 #define CONFIG_PATH "/etc/nscd.conf"
62 #define DEFAULT_CONFIG_PATH "nscd.conf"
64 #define MAX_SOCKET_IO_SIZE 4096
66 struct processing_thread_args
{
68 struct configuration
*the_configuration
;
69 struct runtime_env
*the_runtime_env
;
72 static void accept_connection(struct kevent
*, struct runtime_env
*,
73 struct configuration
*);
74 static void destroy_cache_(cache
);
75 static void destroy_runtime_env(struct runtime_env
*);
76 static cache
init_cache_(struct configuration
*);
77 static struct runtime_env
*init_runtime_env(struct configuration
*);
78 static void processing_loop(cache
, struct runtime_env
*,
79 struct configuration
*);
80 static void process_socket_event(struct kevent
*, struct runtime_env
*,
81 struct configuration
*);
82 static void process_timer_event(struct kevent
*, struct runtime_env
*,
83 struct configuration
*);
84 static void *processing_thread(void *);
85 static void usage(void);
87 void get_time_func(struct timeval
*);
93 "usage: nscd [-dnst] [-i cachename] [-I cachename]\n");
98 init_cache_(struct configuration
*config
)
100 struct cache_params params
;
103 struct configuration_entry
*config_entry
;
107 TRACE_IN(init_cache_
);
109 memset(¶ms
, 0, sizeof(struct cache_params
));
110 params
.get_time_func
= get_time_func
;
111 retval
= init_cache(¶ms
);
113 size
= configuration_get_entries_size(config
);
114 for (i
= 0; i
< size
; ++i
) {
115 config_entry
= configuration_get_entry(config
, i
);
117 * We should register common entries now - multipart entries
118 * would be registered automatically during the queries.
120 res
= register_cache_entry(retval
, (struct cache_entry_params
*)
121 &config_entry
->positive_cache_params
);
122 config_entry
->positive_cache_entry
= find_cache_entry(retval
,
123 config_entry
->positive_cache_params
.entry_name
);
124 assert(config_entry
->positive_cache_entry
!=
125 INVALID_CACHE_ENTRY
);
127 res
= register_cache_entry(retval
, (struct cache_entry_params
*)
128 &config_entry
->negative_cache_params
);
129 config_entry
->negative_cache_entry
= find_cache_entry(retval
,
130 config_entry
->negative_cache_params
.entry_name
);
131 assert(config_entry
->negative_cache_entry
!=
132 INVALID_CACHE_ENTRY
);
135 LOG_MSG_2("cache", "cache was successfully initialized");
136 TRACE_OUT(init_cache_
);
141 destroy_cache_(cache the_cache
)
143 TRACE_IN(destroy_cache_
);
144 destroy_cache(the_cache
);
145 TRACE_OUT(destroy_cache_
);
149 * Socket and kqueues are prepared here. We have one global queue for both
150 * socket and timers events.
152 static struct runtime_env
*
153 init_runtime_env(struct configuration
*config
)
156 struct sockaddr_un serv_addr
;
158 struct kevent eventlist
;
159 struct timespec timeout
;
161 struct runtime_env
*retval
;
163 TRACE_IN(init_runtime_env
);
164 retval
= (struct runtime_env
*)calloc(1, sizeof(struct runtime_env
));
165 assert(retval
!= NULL
);
167 retval
->sockfd
= socket(PF_LOCAL
, SOCK_STREAM
, 0);
169 if (config
->force_unlink
== 1)
170 unlink(config
->socket_path
);
172 memset(&serv_addr
, 0, sizeof(struct sockaddr_un
));
173 serv_addr
.sun_family
= PF_LOCAL
;
174 strlcpy(serv_addr
.sun_path
, config
->socket_path
,
175 sizeof(serv_addr
.sun_path
));
176 serv_addr_len
= sizeof(serv_addr
.sun_family
) +
177 strlen(serv_addr
.sun_path
) + 1;
179 if (bind(retval
->sockfd
, (struct sockaddr
*)&serv_addr
,
180 serv_addr_len
) == -1) {
181 close(retval
->sockfd
);
184 LOG_ERR_2("runtime environment", "can't bind socket to path: "
185 "%s", config
->socket_path
);
186 TRACE_OUT(init_runtime_env
);
189 LOG_MSG_2("runtime environment", "using socket %s",
190 config
->socket_path
);
193 * Here we're marking socket as non-blocking and setting its backlog
194 * to the maximum value
196 chmod(config
->socket_path
, config
->socket_mode
);
197 listen(retval
->sockfd
, -1);
198 fcntl(retval
->sockfd
, F_SETFL
, O_NONBLOCK
);
200 retval
->queue
= kqueue();
201 assert(retval
->queue
!= -1);
203 EV_SET(&eventlist
, retval
->sockfd
, EVFILT_READ
, EV_ADD
| EV_ONESHOT
,
205 memset(&timeout
, 0, sizeof(struct timespec
));
206 kevent(retval
->queue
, &eventlist
, 1, NULL
, 0, &timeout
);
208 LOG_MSG_2("runtime environment", "successfully initialized");
209 TRACE_OUT(init_runtime_env
);
214 destroy_runtime_env(struct runtime_env
*env
)
216 TRACE_IN(destroy_runtime_env
);
220 TRACE_OUT(destroy_runtime_env
);
224 accept_connection(struct kevent
*event_data
, struct runtime_env
*env
,
225 struct configuration
*config
)
227 struct kevent eventlist
[2];
228 struct timespec timeout
;
229 struct query_state
*qstate
;
237 TRACE_IN(accept_connection
);
238 fd
= accept(event_data
->ident
, NULL
, NULL
);
240 LOG_ERR_2("accept_connection", "error %d during accept()",
242 TRACE_OUT(accept_connection
);
246 if (getpeereid(fd
, &euid
, &egid
) != 0) {
247 LOG_ERR_2("accept_connection", "error %d during getpeereid()",
249 TRACE_OUT(accept_connection
);
253 qstate
= init_query_state(fd
, sizeof(int), euid
, egid
);
254 if (qstate
== NULL
) {
255 LOG_ERR_2("accept_connection", "can't init query_state");
256 TRACE_OUT(accept_connection
);
260 memset(&timeout
, 0, sizeof(struct timespec
));
261 EV_SET(&eventlist
[0], fd
, EVFILT_TIMER
, EV_ADD
| EV_ONESHOT
,
262 0, qstate
->timeout
.tv_sec
* 1000, qstate
);
263 EV_SET(&eventlist
[1], fd
, EVFILT_READ
, EV_ADD
| EV_ONESHOT
,
264 NOTE_LOWAT
, qstate
->kevent_watermark
, qstate
);
265 res
= kevent(env
->queue
, eventlist
, 2, NULL
, 0, &timeout
);
267 LOG_ERR_2("accept_connection", "kevent error");
269 TRACE_OUT(accept_connection
);
273 process_socket_event(struct kevent
*event_data
, struct runtime_env
*env
,
274 struct configuration
*config
)
276 struct kevent eventlist
[2];
277 struct timeval query_timeout
;
278 struct timespec kevent_timeout
;
282 struct query_state
*qstate
;
284 TRACE_IN(process_socket_event
);
285 eof_res
= event_data
->flags
& EV_EOF
? 1 : 0;
288 memset(&kevent_timeout
, 0, sizeof(struct timespec
));
289 EV_SET(&eventlist
[0], event_data
->ident
, EVFILT_TIMER
, EV_DELETE
,
291 nevents
= kevent(env
->queue
, eventlist
, 1, NULL
, 0, &kevent_timeout
);
293 if (errno
== ENOENT
) {
294 /* the timer is already handling this event */
295 TRACE_OUT(process_socket_event
);
298 /* some other error happened */
299 LOG_ERR_2("process_socket_event", "kevent error, errno"
301 TRACE_OUT(process_socket_event
);
305 qstate
= (struct query_state
*)event_data
->udata
;
308 * If the buffer that is to be send/received is too large,
309 * we send it implicitly, by using query_io_buffer_read and
310 * query_io_buffer_write functions in the query_state. These functions
311 * use the temporary buffer, which is later send/received in parts.
312 * The code below implements buffer splitting/mergind for send/receive
313 * operations. It also does the actual socket IO operations.
315 if (((qstate
->use_alternate_io
== 0) &&
316 (qstate
->kevent_watermark
<= event_data
->data
)) ||
317 ((qstate
->use_alternate_io
!= 0) &&
318 (qstate
->io_buffer_watermark
<= event_data
->data
))) {
319 if (qstate
->use_alternate_io
!= 0) {
320 switch (qstate
->io_buffer_filter
) {
322 io_res
= query_socket_read(qstate
,
324 qstate
->io_buffer_watermark
);
326 qstate
->use_alternate_io
= 0;
327 qstate
->process_func
= NULL
;
329 qstate
->io_buffer_p
+= io_res
;
330 if (qstate
->io_buffer_p
==
332 qstate
->io_buffer_size
) {
333 qstate
->io_buffer_p
=
335 qstate
->use_alternate_io
= 0;
344 if (qstate
->use_alternate_io
== 0) {
346 res
= qstate
->process_func(qstate
);
347 } while ((qstate
->kevent_watermark
== 0) &&
348 (qstate
->process_func
!= NULL
) &&
352 qstate
->process_func
= NULL
;
355 if ((qstate
->use_alternate_io
!= 0) &&
356 (qstate
->io_buffer_filter
== EVFILT_WRITE
)) {
357 io_res
= query_socket_write(qstate
, qstate
->io_buffer_p
,
358 qstate
->io_buffer_watermark
);
360 qstate
->use_alternate_io
= 0;
361 qstate
->process_func
= NULL
;
363 qstate
->io_buffer_p
+= io_res
;
366 /* assuming that socket was closed */
367 qstate
->process_func
= NULL
;
368 qstate
->use_alternate_io
= 0;
371 if (((qstate
->process_func
== NULL
) &&
372 (qstate
->use_alternate_io
== 0)) ||
373 (eof_res
!= 0) || (res
!= 0)) {
374 destroy_query_state(qstate
);
375 close(event_data
->ident
);
376 TRACE_OUT(process_socket_event
);
380 /* updating the query_state lifetime variable */
381 get_time_func(&query_timeout
);
382 query_timeout
.tv_usec
= 0;
383 query_timeout
.tv_sec
-= qstate
->creation_time
.tv_sec
;
384 if (query_timeout
.tv_sec
> qstate
->timeout
.tv_sec
)
385 query_timeout
.tv_sec
= 0;
387 query_timeout
.tv_sec
= qstate
->timeout
.tv_sec
-
388 query_timeout
.tv_sec
;
390 if ((qstate
->use_alternate_io
!= 0) && (qstate
->io_buffer_p
==
391 qstate
->io_buffer
+ qstate
->io_buffer_size
))
392 qstate
->use_alternate_io
= 0;
394 if (qstate
->use_alternate_io
== 0) {
396 * If we must send/receive the large block of data,
397 * we should prepare the query_state's io_XXX fields.
398 * We should also substitute its write_func and read_func
399 * with the query_io_buffer_write and query_io_buffer_read,
400 * which will allow us to implicitly send/receive this large
401 * buffer later (in the subsequent calls to the
402 * process_socket_event).
404 if (qstate
->kevent_watermark
> MAX_SOCKET_IO_SIZE
) {
405 if (qstate
->io_buffer
!= NULL
)
406 free(qstate
->io_buffer
);
408 qstate
->io_buffer
= (char *)calloc(1,
409 qstate
->kevent_watermark
);
410 assert(qstate
->io_buffer
!= NULL
);
412 qstate
->io_buffer_p
= qstate
->io_buffer
;
413 qstate
->io_buffer_size
= qstate
->kevent_watermark
;
414 qstate
->io_buffer_filter
= qstate
->kevent_filter
;
416 qstate
->write_func
= query_io_buffer_write
;
417 qstate
->read_func
= query_io_buffer_read
;
419 if (qstate
->kevent_filter
== EVFILT_READ
)
420 qstate
->use_alternate_io
= 1;
422 qstate
->io_buffer_watermark
= MAX_SOCKET_IO_SIZE
;
423 EV_SET(&eventlist
[1], event_data
->ident
,
424 qstate
->kevent_filter
, EV_ADD
| EV_ONESHOT
,
425 NOTE_LOWAT
, MAX_SOCKET_IO_SIZE
, qstate
);
427 EV_SET(&eventlist
[1], event_data
->ident
,
428 qstate
->kevent_filter
, EV_ADD
| EV_ONESHOT
,
429 NOTE_LOWAT
, qstate
->kevent_watermark
, qstate
);
432 if (qstate
->io_buffer
+ qstate
->io_buffer_size
-
433 qstate
->io_buffer_p
<
434 MAX_SOCKET_IO_SIZE
) {
435 qstate
->io_buffer_watermark
= qstate
->io_buffer
+
436 qstate
->io_buffer_size
- qstate
->io_buffer_p
;
437 EV_SET(&eventlist
[1], event_data
->ident
,
438 qstate
->io_buffer_filter
,
439 EV_ADD
| EV_ONESHOT
, NOTE_LOWAT
,
440 qstate
->io_buffer_watermark
,
443 qstate
->io_buffer_watermark
= MAX_SOCKET_IO_SIZE
;
444 EV_SET(&eventlist
[1], event_data
->ident
,
445 qstate
->io_buffer_filter
, EV_ADD
| EV_ONESHOT
,
446 NOTE_LOWAT
, MAX_SOCKET_IO_SIZE
, qstate
);
449 EV_SET(&eventlist
[0], event_data
->ident
, EVFILT_TIMER
,
450 EV_ADD
| EV_ONESHOT
, 0, query_timeout
.tv_sec
* 1000, qstate
);
451 kevent(env
->queue
, eventlist
, 2, NULL
, 0, &kevent_timeout
);
453 TRACE_OUT(process_socket_event
);
457 * This routine is called if timer event has been signaled in the kqueue. It
458 * just closes the socket and destroys the query_state.
461 process_timer_event(struct kevent
*event_data
, struct runtime_env
*env
,
462 struct configuration
*config
)
464 struct query_state
*qstate
;
466 TRACE_IN(process_timer_event
);
467 qstate
= (struct query_state
*)event_data
->udata
;
468 destroy_query_state(qstate
);
469 close(event_data
->ident
);
470 TRACE_OUT(process_timer_event
);
474 * Processing loop is the basic processing routine, that forms a body of each
478 processing_loop(cache the_cache
, struct runtime_env
*env
,
479 struct configuration
*config
)
481 struct timespec timeout
;
482 const int eventlist_size
= 1;
483 struct kevent eventlist
[eventlist_size
];
486 TRACE_MSG("=> processing_loop");
487 memset(&timeout
, 0, sizeof(struct timespec
));
488 memset(&eventlist
, 0, sizeof(struct kevent
) * eventlist_size
);
491 nevents
= kevent(env
->queue
, NULL
, 0, eventlist
,
492 eventlist_size
, NULL
);
494 * we can only receive 1 event on success
497 struct kevent
*event_data
;
498 event_data
= &eventlist
[0];
500 if (event_data
->ident
== env
->sockfd
) {
501 for (i
= 0; i
< event_data
->data
; ++i
)
502 accept_connection(event_data
, env
, config
);
504 EV_SET(eventlist
, s_runtime_env
->sockfd
,
505 EVFILT_READ
, EV_ADD
| EV_ONESHOT
,
508 sizeof(struct timespec
));
509 kevent(s_runtime_env
->queue
, eventlist
,
510 1, NULL
, 0, &timeout
);
513 switch (event_data
->filter
) {
516 process_socket_event(event_data
,
520 process_timer_event(event_data
,
528 /* this branch shouldn't be currently executed */
532 TRACE_MSG("<= processing_loop");
536 * Wrapper above the processing loop function. It sets the thread signal mask
537 * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
540 processing_thread(void *data
)
542 struct processing_thread_args
*args
;
545 TRACE_MSG("=> processing_thread");
546 args
= (struct processing_thread_args
*)data
;
549 sigaddset(&new, SIGPIPE
);
550 if (pthread_sigmask(SIG_BLOCK
, &new, NULL
) != 0)
551 LOG_ERR_1("processing thread",
552 "thread can't block the SIGPIPE signal");
554 processing_loop(args
->the_cache
, args
->the_runtime_env
,
555 args
->the_configuration
);
557 TRACE_MSG("<= processing_thread");
563 get_time_func(struct timeval
*time
)
566 memset(&res
, 0, sizeof(struct timespec
));
567 clock_gettime(CLOCK_MONOTONIC
, &res
);
569 time
->tv_sec
= res
.tv_sec
;
574 * The idea of _nss_cache_cycle_prevention_function is that nsdispatch will
575 * search for this symbol in the executable. This symbol is the attribute of
576 * the caching daemon. So, if it exists, nsdispatch won't try to connect to
577 * the caching daemon and will just ignore the 'cache' source in the
578 * nsswitch.conf. This method helps to avoid cycles and organize
579 * self-performing requests.
582 _nss_cache_cycle_prevention_function(void)
587 main(int argc
, char *argv
[])
589 struct processing_thread_args
*thread_args
;
592 struct pidfh
*pidfile
;
595 char const *config_file
;
596 char const *error_str
;
600 int trace_mode_enabled
;
601 int force_single_threaded
;
602 int do_not_daemonize
;
603 int clear_user_cache_entries
, clear_all_cache_entries
;
604 char *user_config_entry_name
, *global_config_entry_name
;
606 int daemon_mode
, interactive_mode
;
609 /* by default all debug messages are omitted */
612 /* parsing command line arguments */
613 trace_mode_enabled
= 0;
614 force_single_threaded
= 0;
615 do_not_daemonize
= 0;
616 clear_user_cache_entries
= 0;
617 clear_all_cache_entries
= 0;
619 user_config_entry_name
= NULL
;
620 global_config_entry_name
= NULL
;
621 while ((res
= getopt(argc
, argv
, "nstdi:I:")) != -1) {
624 do_not_daemonize
= 1;
627 force_single_threaded
= 1;
630 trace_mode_enabled
= 1;
633 clear_user_cache_entries
= 1;
635 if (strcmp(optarg
, "all") != 0)
636 user_config_entry_name
= strdup(optarg
);
639 clear_all_cache_entries
= 1;
641 if (strcmp(optarg
, "all") != 0)
642 global_config_entry_name
=
655 daemon_mode
= do_not_daemonize
| force_single_threaded
|
657 interactive_mode
= clear_user_cache_entries
| clear_all_cache_entries
|
660 if ((daemon_mode
!= 0) && (interactive_mode
!= 0)) {
661 LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
662 "can't be used together");
666 if (interactive_mode
!= 0) {
667 FILE *pidfin
= fopen(DEFAULT_PIDFILE_PATH
, "r");
670 struct nscd_connection_params connection_params
;
671 nscd_connection connection
;
676 errx(EXIT_FAILURE
, "There is no daemon running.");
678 memset(pidbuf
, 0, sizeof(pidbuf
));
679 fread(pidbuf
, sizeof(pidbuf
) - 1, 1, pidfin
);
682 if (ferror(pidfin
) != 0)
683 errx(EXIT_FAILURE
, "Can't read from pidfile.");
685 if (sscanf(pidbuf
, "%d", &pid
) != 1)
686 errx(EXIT_FAILURE
, "Invalid pidfile.");
687 LOG_MSG_1("main", "daemon PID is %d", pid
);
690 memset(&connection_params
, 0,
691 sizeof(struct nscd_connection_params
));
692 connection_params
.socket_path
= DEFAULT_SOCKET_PATH
;
693 connection
= open_nscd_connection__(&connection_params
);
694 if (connection
== INVALID_NSCD_CONNECTION
)
695 errx(EXIT_FAILURE
, "Can't connect to the daemon.");
697 if (clear_user_cache_entries
!= 0) {
698 result
= nscd_transform__(connection
,
699 user_config_entry_name
, TT_USER
);
702 "user cache transformation failed");
705 "user cache_transformation "
709 if (clear_all_cache_entries
!= 0) {
711 errx(EXIT_FAILURE
, "Only root can initiate "
712 "global cache transformation.");
714 result
= nscd_transform__(connection
,
715 global_config_entry_name
, TT_ALL
);
718 "global cache transformation "
722 "global cache transformation "
726 close_nscd_connection__(connection
);
728 free(user_config_entry_name
);
729 free(global_config_entry_name
);
730 return (EXIT_SUCCESS
);
733 pidfile
= pidfile_open(DEFAULT_PIDFILE_PATH
, 0644, &pid
);
734 if (pidfile
== NULL
) {
736 errx(EXIT_FAILURE
, "Daemon already running, pid: %d.",
738 warn("Cannot open or create pidfile");
741 if (trace_mode_enabled
== 1)
744 /* blocking the main thread from receiving SIGPIPE signal */
745 sigblock(sigmask(SIGPIPE
));
748 if (do_not_daemonize
== 0) {
749 res
= daemon(0, trace_mode_enabled
== 0 ? 0 : 1);
751 LOG_ERR_1("main", "can't daemonize myself: %s",
753 pidfile_remove(pidfile
);
756 LOG_MSG_1("main", "successfully daemonized");
759 pidfile_write(pidfile
);
761 s_agent_table
= init_agent_table();
762 register_agent(s_agent_table
, init_passwd_agent());
763 register_agent(s_agent_table
, init_passwd_mp_agent());
764 register_agent(s_agent_table
, init_group_agent());
765 register_agent(s_agent_table
, init_group_mp_agent());
766 register_agent(s_agent_table
, init_services_agent());
767 register_agent(s_agent_table
, init_services_mp_agent());
768 LOG_MSG_1("main", "request agents registered successfully");
771 * Hosts agent can't work properly until we have access to the
772 * appropriate dtab structures, which are used in nsdispatch
775 register_agent(s_agent_table, init_hosts_agent());
778 /* configuration initialization */
779 s_configuration
= init_configuration();
780 fill_configuration_defaults(s_configuration
);
784 config_file
= CONFIG_PATH
;
786 res
= parse_config_file(s_configuration
, config_file
, &error_str
,
788 if ((res
!= 0) && (error_str
== NULL
)) {
789 config_file
= DEFAULT_CONFIG_PATH
;
790 res
= parse_config_file(s_configuration
, config_file
,
791 &error_str
, &error_line
);
795 if (error_str
!= NULL
) {
796 LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
797 config_file
, error_line
, error_str
);
799 LOG_ERR_1("main", "no configuration file found "
800 "- was looking for %s and %s",
801 CONFIG_PATH
, DEFAULT_CONFIG_PATH
);
803 destroy_configuration(s_configuration
);
807 if (force_single_threaded
== 1)
808 s_configuration
->threads_num
= 1;
810 /* cache initialization */
811 s_cache
= init_cache_(s_configuration
);
812 if (s_cache
== NULL
) {
813 LOG_ERR_1("main", "can't initialize the cache");
814 destroy_configuration(s_configuration
);
818 /* runtime environment initialization */
819 s_runtime_env
= init_runtime_env(s_configuration
);
820 if (s_runtime_env
== NULL
) {
821 LOG_ERR_1("main", "can't initialize the runtime environment");
822 destroy_configuration(s_configuration
);
823 destroy_cache_(s_cache
);
827 if (s_configuration
->threads_num
> 1) {
828 threads
= (pthread_t
*)calloc(1, sizeof(pthread_t
) *
829 s_configuration
->threads_num
);
830 for (i
= 0; i
< s_configuration
->threads_num
; ++i
) {
831 thread_args
= (struct processing_thread_args
*)malloc(
832 sizeof(struct processing_thread_args
));
833 thread_args
->the_cache
= s_cache
;
834 thread_args
->the_runtime_env
= s_runtime_env
;
835 thread_args
->the_configuration
= s_configuration
;
837 LOG_MSG_1("main", "thread #%d was successfully created",
839 pthread_create(&threads
[i
], NULL
, processing_thread
,
845 for (i
= 0; i
< s_configuration
->threads_num
; ++i
)
846 pthread_join(threads
[i
], NULL
);
848 LOG_MSG_1("main", "working in single-threaded mode");
849 processing_loop(s_cache
, s_runtime_env
, s_configuration
);
853 /* runtime environment destruction */
854 destroy_runtime_env(s_runtime_env
);
856 /* cache destruction */
857 destroy_cache_(s_cache
);
859 /* configuration destruction */
860 destroy_configuration(s_configuration
);
862 /* agents table destruction */
863 destroy_agent_table(s_agent_table
);
865 pidfile_remove(pidfile
);
866 return (EXIT_SUCCESS
);