Accept make versions 4.0 and greater
[glibc.git] / nscd / nscd_helper.c
blob8587e4549c88c6b8da7f09e2313728bd1b1bf714
1 /* Copyright (C) 1998-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdbool.h>
23 #include <stddef.h>
24 #include <string.h>
25 #include <time.h>
26 #include <unistd.h>
27 #include <stdint.h>
28 #include <sys/mman.h>
29 #include <sys/poll.h>
30 #include <sys/socket.h>
31 #include <sys/stat.h>
32 #include <sys/time.h>
33 #include <sys/uio.h>
34 #include <sys/un.h>
35 #include <not-cancel.h>
36 #include <nis/rpcsvc/nis.h>
37 #include <kernel-features.h>
39 #include "nscd-client.h"
42 /* Extra time we wait if the socket is still receiving data. This
43 value is in milliseconds. Note that the other side is nscd on the
44 local machine and it is already transmitting data. So the wait
45 time need not be long. */
46 #define EXTRA_RECEIVE_TIME 200
49 static int
50 wait_on_socket (int sock, long int usectmo)
52 struct pollfd fds[1];
53 fds[0].fd = sock;
54 fds[0].events = POLLIN | POLLERR | POLLHUP;
55 int n = __poll (fds, 1, usectmo);
56 if (n == -1 && __builtin_expect (errno == EINTR, 0))
58 /* Handle the case where the poll() call is interrupted by a
59 signal. We cannot just use TEMP_FAILURE_RETRY since it might
60 lead to infinite loops. */
61 struct timeval now;
62 (void) __gettimeofday (&now, NULL);
63 long int end = now.tv_sec * 1000 + usectmo + (now.tv_usec + 500) / 1000;
64 long int timeout = usectmo;
65 while (1)
67 n = __poll (fds, 1, timeout);
68 if (n != -1 || errno != EINTR)
69 break;
71 /* Recompute the timeout time. */
72 (void) __gettimeofday (&now, NULL);
73 timeout = end - (now.tv_sec * 1000 + (now.tv_usec + 500) / 1000);
77 return n;
81 ssize_t
82 __readall (int fd, void *buf, size_t len)
84 size_t n = len;
85 ssize_t ret;
88 again:
89 ret = TEMP_FAILURE_RETRY (__read (fd, buf, n));
90 if (ret <= 0)
92 if (__builtin_expect (ret < 0 && errno == EAGAIN, 0)
93 /* The socket is still receiving data. Wait a bit more. */
94 && wait_on_socket (fd, EXTRA_RECEIVE_TIME) > 0)
95 goto again;
97 break;
99 buf = (char *) buf + ret;
100 n -= ret;
102 while (n > 0);
103 return ret < 0 ? ret : len - n;
107 ssize_t
108 __readvall (int fd, const struct iovec *iov, int iovcnt)
110 ssize_t ret = TEMP_FAILURE_RETRY (__readv (fd, iov, iovcnt));
111 if (ret <= 0)
113 if (__builtin_expect (ret == 0 || errno != EAGAIN, 1))
114 /* A genuine error or no data to read. */
115 return ret;
117 /* The data has not all yet been received. Do as if we have not
118 read anything yet. */
119 ret = 0;
122 size_t total = 0;
123 for (int i = 0; i < iovcnt; ++i)
124 total += iov[i].iov_len;
126 if (ret < total)
128 struct iovec iov_buf[iovcnt];
129 ssize_t r = ret;
131 struct iovec *iovp = memcpy (iov_buf, iov, iovcnt * sizeof (*iov));
134 while (iovp->iov_len <= r)
136 r -= iovp->iov_len;
137 --iovcnt;
138 ++iovp;
140 iovp->iov_base = (char *) iovp->iov_base + r;
141 iovp->iov_len -= r;
142 again:
143 r = TEMP_FAILURE_RETRY (__readv (fd, iovp, iovcnt));
144 if (r <= 0)
146 if (__builtin_expect (r < 0 && errno == EAGAIN, 0)
147 /* The socket is still receiving data. Wait a bit more. */
148 && wait_on_socket (fd, EXTRA_RECEIVE_TIME) > 0)
149 goto again;
151 break;
153 ret += r;
155 while (ret < total);
156 if (r < 0)
157 ret = r;
159 return ret;
163 static int
164 open_socket (request_type type, const char *key, size_t keylen)
166 int sock;
168 #ifdef SOCK_CLOEXEC
169 # ifndef __ASSUME_SOCK_CLOEXEC
170 if (__have_sock_cloexec >= 0)
171 # endif
173 sock = __socket (PF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0);
174 # ifndef __ASSUME_SOCK_CLOEXEC
175 if (__have_sock_cloexec == 0)
176 __have_sock_cloexec = sock != -1 || errno != EINVAL ? 1 : -1;
177 # endif
179 #endif
180 #ifndef __ASSUME_SOCK_CLOEXEC
181 # ifdef SOCK_CLOEXEC
182 if (__have_sock_cloexec < 0)
183 # endif
184 sock = __socket (PF_UNIX, SOCK_STREAM, 0);
185 #endif
186 if (sock < 0)
187 return -1;
189 struct
191 request_header req;
192 char key[keylen];
193 } reqdata;
194 size_t real_sizeof_reqdata = sizeof (request_header) + keylen;
196 #ifndef __ASSUME_SOCK_CLOEXEC
197 # ifdef SOCK_NONBLOCK
198 if (__have_sock_cloexec < 0)
199 # endif
200 /* Make socket non-blocking. */
201 __fcntl (sock, F_SETFL, O_RDWR | O_NONBLOCK);
202 #endif
204 struct sockaddr_un sun;
205 sun.sun_family = AF_UNIX;
206 strcpy (sun.sun_path, _PATH_NSCDSOCKET);
207 if (__connect (sock, (struct sockaddr *) &sun, sizeof (sun)) < 0
208 && errno != EINPROGRESS)
209 goto out;
211 reqdata.req.version = NSCD_VERSION;
212 reqdata.req.type = type;
213 reqdata.req.key_len = keylen;
215 memcpy (reqdata.key, key, keylen);
217 bool first_try = true;
218 struct timeval tvend;
219 /* Fake initializing tvend. */
220 asm ("" : "=m" (tvend));
221 while (1)
223 #ifndef MSG_NOSIGNAL
224 # define MSG_NOSIGNAL 0
225 #endif
226 ssize_t wres = TEMP_FAILURE_RETRY (__send (sock, &reqdata,
227 real_sizeof_reqdata,
228 MSG_NOSIGNAL));
229 if (__builtin_expect (wres == (ssize_t) real_sizeof_reqdata, 1))
230 /* We managed to send the request. */
231 return sock;
233 if (wres != -1 || errno != EAGAIN)
234 /* Something is really wrong, no chance to continue. */
235 break;
237 /* The daemon is busy wait for it. */
238 int to;
239 struct timeval now;
240 (void) __gettimeofday (&now, NULL);
241 if (first_try)
243 tvend.tv_usec = now.tv_usec;
244 tvend.tv_sec = now.tv_sec + 5;
245 to = 5 * 1000;
246 first_try = false;
248 else
249 to = ((tvend.tv_sec - now.tv_sec) * 1000
250 + (tvend.tv_usec - now.tv_usec) / 1000);
252 struct pollfd fds[1];
253 fds[0].fd = sock;
254 fds[0].events = POLLOUT | POLLERR | POLLHUP;
255 if (__poll (fds, 1, to) <= 0)
256 /* The connection timed out or broke down. */
257 break;
259 /* We try to write again. */
262 out:
263 close_not_cancel_no_status (sock);
265 return -1;
269 void
270 __nscd_unmap (struct mapped_database *mapped)
272 assert (mapped->counter == 0);
273 __munmap ((void *) mapped->head, mapped->mapsize);
274 free (mapped);
278 /* Try to get a file descriptor for the shared meory segment
279 containing the database. */
280 struct mapped_database *
281 __nscd_get_mapping (request_type type, const char *key,
282 struct mapped_database **mappedp)
284 struct mapped_database *result = NO_MAPPING;
285 #ifdef SCM_RIGHTS
286 const size_t keylen = strlen (key) + 1;
287 int saved_errno = errno;
289 int mapfd = -1;
290 char resdata[keylen];
292 /* Open a socket and send the request. */
293 int sock = open_socket (type, key, keylen);
294 if (sock < 0)
295 goto out;
297 /* Room for the data sent along with the file descriptor. We expect
298 the key name back. */
299 uint64_t mapsize;
300 struct iovec iov[2];
301 iov[0].iov_base = resdata;
302 iov[0].iov_len = keylen;
303 iov[1].iov_base = &mapsize;
304 iov[1].iov_len = sizeof (mapsize);
306 union
308 struct cmsghdr hdr;
309 char bytes[CMSG_SPACE (sizeof (int))];
310 } buf;
311 struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 2,
312 .msg_control = buf.bytes,
313 .msg_controllen = sizeof (buf) };
314 struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg);
316 cmsg->cmsg_level = SOL_SOCKET;
317 cmsg->cmsg_type = SCM_RIGHTS;
318 cmsg->cmsg_len = CMSG_LEN (sizeof (int));
320 /* This access is well-aligned since BUF is correctly aligned for an
321 int and CMSG_DATA preserves this alignment. */
322 memset (CMSG_DATA (cmsg), '\xff', sizeof (int));
324 msg.msg_controllen = cmsg->cmsg_len;
326 if (wait_on_socket (sock, 5 * 1000) <= 0)
327 goto out_close2;
329 # ifndef MSG_CMSG_CLOEXEC
330 # define MSG_CMSG_CLOEXEC 0
331 # endif
332 ssize_t n = TEMP_FAILURE_RETRY (__recvmsg (sock, &msg, MSG_CMSG_CLOEXEC));
334 if (__builtin_expect (CMSG_FIRSTHDR (&msg) == NULL
335 || (CMSG_FIRSTHDR (&msg)->cmsg_len
336 != CMSG_LEN (sizeof (int))), 0))
337 goto out_close2;
339 int *ip = (void *) CMSG_DATA (cmsg);
340 mapfd = *ip;
342 if (__builtin_expect (n != keylen && n != keylen + sizeof (mapsize), 0))
343 goto out_close;
345 if (__builtin_expect (strcmp (resdata, key) != 0, 0))
346 goto out_close;
348 if (__builtin_expect (n == keylen, 0))
350 struct stat64 st;
351 if (__builtin_expect (fstat64 (mapfd, &st) != 0, 0)
352 || __builtin_expect (st.st_size < sizeof (struct database_pers_head),
354 goto out_close;
356 mapsize = st.st_size;
359 /* The file is large enough, map it now. */
360 void *mapping = __mmap (NULL, mapsize, PROT_READ, MAP_SHARED, mapfd, 0);
361 if (__builtin_expect (mapping != MAP_FAILED, 1))
363 /* Check whether the database is correct and up-to-date. */
364 struct database_pers_head *head = mapping;
366 if (__builtin_expect (head->version != DB_VERSION, 0)
367 || __builtin_expect (head->header_size != sizeof (*head), 0)
368 /* Catch some misconfiguration. The server should catch
369 them now but some older versions did not. */
370 || __builtin_expect (head->module == 0, 0)
371 /* This really should not happen but who knows, maybe the update
372 thread got stuck. */
373 || __builtin_expect (! head->nscd_certainly_running
374 && (head->timestamp + MAPPING_TIMEOUT
375 < time (NULL)), 0))
377 out_unmap:
378 __munmap (mapping, mapsize);
379 goto out_close;
382 size_t size = (sizeof (*head) + roundup (head->module * sizeof (ref_t),
383 ALIGN)
384 + head->data_size);
386 if (__builtin_expect (mapsize < size, 0))
387 goto out_unmap;
389 /* Allocate a record for the mapping. */
390 struct mapped_database *newp = malloc (sizeof (*newp));
391 if (newp == NULL)
392 /* Ugh, after all we went through the memory allocation failed. */
393 goto out_unmap;
395 newp->head = mapping;
396 newp->data = ((char *) mapping + head->header_size
397 + roundup (head->module * sizeof (ref_t), ALIGN));
398 newp->mapsize = size;
399 newp->datasize = head->data_size;
400 /* Set counter to 1 to show it is usable. */
401 newp->counter = 1;
403 result = newp;
406 out_close:
407 __close (mapfd);
408 out_close2:
409 __close (sock);
410 out:
411 __set_errno (saved_errno);
412 #endif /* SCM_RIGHTS */
414 struct mapped_database *oldval = *mappedp;
415 *mappedp = result;
417 if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0)
418 __nscd_unmap (oldval);
420 return result;
423 struct mapped_database *
424 __nscd_get_map_ref (request_type type, const char *name,
425 volatile struct locked_map_ptr *mapptr, int *gc_cyclep)
427 struct mapped_database *cur = mapptr->mapped;
428 if (cur == NO_MAPPING)
429 return cur;
431 if (!__nscd_acquire_maplock (mapptr))
432 return NO_MAPPING;
434 cur = mapptr->mapped;
436 if (__builtin_expect (cur != NO_MAPPING, 1))
438 /* If not mapped or timestamp not updated, request new map. */
439 if (cur == NULL
440 || (cur->head->nscd_certainly_running == 0
441 && cur->head->timestamp + MAPPING_TIMEOUT < time (NULL))
442 || cur->head->data_size > cur->datasize)
443 cur = __nscd_get_mapping (type, name,
444 (struct mapped_database **) &mapptr->mapped);
446 if (__builtin_expect (cur != NO_MAPPING, 1))
448 if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0,
450 cur = NO_MAPPING;
451 else
452 atomic_increment (&cur->counter);
456 mapptr->lock = 0;
458 return cur;
462 /* Using sizeof (hashentry) is not always correct to determine the size of
463 the data structure as found in the nscd cache. The program could be
464 a 64-bit process and nscd could be a 32-bit process. In this case
465 sizeof (hashentry) would overestimate the size. The following is
466 the minimum size of such an entry, good enough for our tests here. */
467 #define MINIMUM_HASHENTRY_SIZE \
468 (offsetof (struct hashentry, dellist) + sizeof (int32_t))
471 /* Don't return const struct datahead *, as eventhough the record
472 is normally constant, it can change arbitrarily during nscd
473 garbage collection. */
474 struct datahead *
475 __nscd_cache_search (request_type type, const char *key, size_t keylen,
476 const struct mapped_database *mapped, size_t datalen)
478 unsigned long int hash = __nis_hash (key, keylen) % mapped->head->module;
479 size_t datasize = mapped->datasize;
481 ref_t trail = mapped->head->array[hash];
482 trail = atomic_forced_read (trail);
483 ref_t work = trail;
484 size_t loop_cnt = datasize / (MINIMUM_HASHENTRY_SIZE
485 + offsetof (struct datahead, data) / 2);
486 int tick = 0;
488 while (work != ENDREF && work + MINIMUM_HASHENTRY_SIZE <= datasize)
490 struct hashentry *here = (struct hashentry *) (mapped->data + work);
491 ref_t here_key, here_packet;
493 #ifndef _STRING_ARCH_unaligned
494 /* Although during garbage collection when moving struct hashentry
495 records around we first copy from old to new location and then
496 adjust pointer from previous hashentry to it, there is no barrier
497 between those memory writes. It is very unlikely to hit it,
498 so check alignment only if a misaligned load can crash the
499 application. */
500 if ((uintptr_t) here & (__alignof__ (*here) - 1))
501 return NULL;
502 #endif
504 if (type == here->type
505 && keylen == here->len
506 && (here_key = atomic_forced_read (here->key)) + keylen <= datasize
507 && memcmp (key, mapped->data + here_key, keylen) == 0
508 && ((here_packet = atomic_forced_read (here->packet))
509 + sizeof (struct datahead) <= datasize))
511 /* We found the entry. Increment the appropriate counter. */
512 struct datahead *dh
513 = (struct datahead *) (mapped->data + here_packet);
515 #ifndef _STRING_ARCH_unaligned
516 if ((uintptr_t) dh & (__alignof__ (*dh) - 1))
517 return NULL;
518 #endif
520 /* See whether we must ignore the entry or whether something
521 is wrong because garbage collection is in progress. */
522 if (dh->usable
523 && here_packet + dh->allocsize <= datasize
524 && (here_packet + offsetof (struct datahead, data) + datalen
525 <= datasize))
526 return dh;
529 work = atomic_forced_read (here->next);
530 /* Prevent endless loops. This should never happen but perhaps
531 the database got corrupted, accidentally or deliberately. */
532 if (work == trail || loop_cnt-- == 0)
533 break;
534 if (tick)
536 struct hashentry *trailelem;
537 trailelem = (struct hashentry *) (mapped->data + trail);
539 #ifndef _STRING_ARCH_unaligned
540 /* We have to redo the checks. Maybe the data changed. */
541 if ((uintptr_t) trailelem & (__alignof__ (*trailelem) - 1))
542 return NULL;
543 #endif
545 if (trail + MINIMUM_HASHENTRY_SIZE > datasize)
546 return NULL;
548 trail = atomic_forced_read (trailelem->next);
550 tick = 1 - tick;
553 return NULL;
557 /* Create a socket connected to a name. */
559 __nscd_open_socket (const char *key, size_t keylen, request_type type,
560 void *response, size_t responselen)
562 /* This should never happen and it is something the nscd daemon
563 enforces, too. He it helps to limit the amount of stack
564 used. */
565 if (keylen > MAXKEYLEN)
566 return -1;
568 int saved_errno = errno;
570 int sock = open_socket (type, key, keylen);
571 if (sock >= 0)
573 /* Wait for data. */
574 if (wait_on_socket (sock, 5 * 1000) > 0)
576 ssize_t nbytes = TEMP_FAILURE_RETRY (__read (sock, response,
577 responselen));
578 if (nbytes == (ssize_t) responselen)
579 return sock;
582 close_not_cancel_no_status (sock);
585 __set_errno (saved_errno);
587 return -1;