1 /* Copyright (C) 1998-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
29 #include <sys/socket.h>
34 #include <not-cancel.h>
35 #include <nis/rpcsvc/nis.h>
36 #include <kernel-features.h>
38 #include "nscd-client.h"
41 /* Extra time we wait if the socket is still receiving data. This
42 value is in milliseconds. Note that the other side is nscd on the
43 local machine and it is already transmitting data. So the wait
44 time need not be long. */
45 #define EXTRA_RECEIVE_TIME 200
49 wait_on_socket (int sock
, long int usectmo
)
53 fds
[0].events
= POLLIN
| POLLERR
| POLLHUP
;
54 int n
= __poll (fds
, 1, usectmo
);
55 if (n
== -1 && __builtin_expect (errno
== EINTR
, 0))
57 /* Handle the case where the poll() call is interrupted by a
58 signal. We cannot just use TEMP_FAILURE_RETRY since it might
59 lead to infinite loops. */
61 (void) __gettimeofday (&now
, NULL
);
62 long int end
= now
.tv_sec
* 1000 + usectmo
+ (now
.tv_usec
+ 500) / 1000;
63 long int timeout
= usectmo
;
66 n
= __poll (fds
, 1, timeout
);
67 if (n
!= -1 || errno
!= EINTR
)
70 /* Recompute the timeout time. */
71 (void) __gettimeofday (&now
, NULL
);
72 timeout
= end
- (now
.tv_sec
* 1000 + (now
.tv_usec
+ 500) / 1000);
81 __readall (int fd
, void *buf
, size_t len
)
88 ret
= TEMP_FAILURE_RETRY (__read (fd
, buf
, n
));
91 if (__builtin_expect (ret
< 0 && errno
== EAGAIN
, 0)
92 /* The socket is still receiving data. Wait a bit more. */
93 && wait_on_socket (fd
, EXTRA_RECEIVE_TIME
) > 0)
98 buf
= (char *) buf
+ ret
;
102 return ret
< 0 ? ret
: len
- n
;
107 __readvall (int fd
, const struct iovec
*iov
, int iovcnt
)
109 ssize_t ret
= TEMP_FAILURE_RETRY (__readv (fd
, iov
, iovcnt
));
112 if (__builtin_expect (ret
== 0 || errno
!= EAGAIN
, 1))
113 /* A genuine error or no data to read. */
116 /* The data has not all yet been received. Do as if we have not
117 read anything yet. */
122 for (int i
= 0; i
< iovcnt
; ++i
)
123 total
+= iov
[i
].iov_len
;
127 struct iovec iov_buf
[iovcnt
];
130 struct iovec
*iovp
= memcpy (iov_buf
, iov
, iovcnt
* sizeof (*iov
));
133 while (iovp
->iov_len
<= r
)
139 iovp
->iov_base
= (char *) iovp
->iov_base
+ r
;
142 r
= TEMP_FAILURE_RETRY (__readv (fd
, iovp
, iovcnt
));
145 if (__builtin_expect (r
< 0 && errno
== EAGAIN
, 0)
146 /* The socket is still receiving data. Wait a bit more. */
147 && wait_on_socket (fd
, EXTRA_RECEIVE_TIME
) > 0)
163 open_socket (request_type type
, const char *key
, size_t keylen
)
168 # ifndef __ASSUME_SOCK_CLOEXEC
169 if (__have_sock_cloexec
>= 0)
172 sock
= __socket (PF_UNIX
, SOCK_STREAM
| SOCK_CLOEXEC
| SOCK_NONBLOCK
, 0);
173 # ifndef __ASSUME_SOCK_CLOEXEC
174 if (__have_sock_cloexec
== 0)
175 __have_sock_cloexec
= sock
!= -1 || errno
!= EINVAL
? 1 : -1;
179 #ifndef __ASSUME_SOCK_CLOEXEC
181 if (__have_sock_cloexec
< 0)
183 sock
= __socket (PF_UNIX
, SOCK_STREAM
, 0);
193 size_t real_sizeof_reqdata
= sizeof (request_header
) + keylen
;
195 #ifndef __ASSUME_SOCK_CLOEXEC
196 # ifdef SOCK_NONBLOCK
197 if (__have_sock_cloexec
< 0)
199 /* Make socket non-blocking. */
200 __fcntl (sock
, F_SETFL
, O_RDWR
| O_NONBLOCK
);
203 struct sockaddr_un sun
;
204 sun
.sun_family
= AF_UNIX
;
205 strcpy (sun
.sun_path
, _PATH_NSCDSOCKET
);
206 if (__connect (sock
, (struct sockaddr
*) &sun
, sizeof (sun
)) < 0
207 && errno
!= EINPROGRESS
)
210 reqdata
.req
.version
= NSCD_VERSION
;
211 reqdata
.req
.type
= type
;
212 reqdata
.req
.key_len
= keylen
;
214 memcpy (reqdata
.key
, key
, keylen
);
216 bool first_try
= true;
217 struct timeval tvend
;
218 /* Fake initializing tvend. */
219 asm ("" : "=m" (tvend
));
223 # define MSG_NOSIGNAL 0
225 ssize_t wres
= TEMP_FAILURE_RETRY (__send (sock
, &reqdata
,
228 if (__builtin_expect (wres
== (ssize_t
) real_sizeof_reqdata
, 1))
229 /* We managed to send the request. */
232 if (wres
!= -1 || errno
!= EAGAIN
)
233 /* Something is really wrong, no chance to continue. */
236 /* The daemon is busy wait for it. */
239 (void) __gettimeofday (&now
, NULL
);
242 tvend
.tv_usec
= now
.tv_usec
;
243 tvend
.tv_sec
= now
.tv_sec
+ 5;
248 to
= ((tvend
.tv_sec
- now
.tv_sec
) * 1000
249 + (tvend
.tv_usec
- now
.tv_usec
) / 1000);
251 struct pollfd fds
[1];
253 fds
[0].events
= POLLOUT
| POLLERR
| POLLHUP
;
254 if (__poll (fds
, 1, to
) <= 0)
255 /* The connection timed out or broke down. */
258 /* We try to write again. */
262 close_not_cancel_no_status (sock
);
269 __nscd_unmap (struct mapped_database
*mapped
)
271 assert (mapped
->counter
== 0);
272 __munmap ((void *) mapped
->head
, mapped
->mapsize
);
277 /* Try to get a file descriptor for the shared meory segment
278 containing the database. */
279 struct mapped_database
*
280 __nscd_get_mapping (request_type type
, const char *key
,
281 struct mapped_database
**mappedp
)
283 struct mapped_database
*result
= NO_MAPPING
;
285 const size_t keylen
= strlen (key
) + 1;
286 int saved_errno
= errno
;
289 char resdata
[keylen
];
291 /* Open a socket and send the request. */
292 int sock
= open_socket (type
, key
, keylen
);
296 /* Room for the data sent along with the file descriptor. We expect
297 the key name back. */
300 iov
[0].iov_base
= resdata
;
301 iov
[0].iov_len
= keylen
;
302 iov
[1].iov_base
= &mapsize
;
303 iov
[1].iov_len
= sizeof (mapsize
);
308 char bytes
[CMSG_SPACE (sizeof (int))];
310 struct msghdr msg
= { .msg_iov
= iov
, .msg_iovlen
= 2,
311 .msg_control
= buf
.bytes
,
312 .msg_controllen
= sizeof (buf
) };
313 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR (&msg
);
315 cmsg
->cmsg_level
= SOL_SOCKET
;
316 cmsg
->cmsg_type
= SCM_RIGHTS
;
317 cmsg
->cmsg_len
= CMSG_LEN (sizeof (int));
319 /* This access is well-aligned since BUF is correctly aligned for an
320 int and CMSG_DATA preserves this alignment. */
321 memset (CMSG_DATA (cmsg
), '\xff', sizeof (int));
323 msg
.msg_controllen
= cmsg
->cmsg_len
;
325 if (wait_on_socket (sock
, 5 * 1000) <= 0)
328 # ifndef MSG_CMSG_CLOEXEC
329 # define MSG_CMSG_CLOEXEC 0
331 ssize_t n
= TEMP_FAILURE_RETRY (__recvmsg (sock
, &msg
, MSG_CMSG_CLOEXEC
));
333 if (__builtin_expect (CMSG_FIRSTHDR (&msg
) == NULL
334 || (CMSG_FIRSTHDR (&msg
)->cmsg_len
335 != CMSG_LEN (sizeof (int))), 0))
338 int *ip
= (void *) CMSG_DATA (cmsg
);
341 if (__builtin_expect (n
!= keylen
&& n
!= keylen
+ sizeof (mapsize
), 0))
344 if (__builtin_expect (strcmp (resdata
, key
) != 0, 0))
347 if (__builtin_expect (n
== keylen
, 0))
350 if (__builtin_expect (fstat64 (mapfd
, &st
) != 0, 0)
351 || __builtin_expect (st
.st_size
< sizeof (struct database_pers_head
),
355 mapsize
= st
.st_size
;
358 /* The file is large enough, map it now. */
359 void *mapping
= __mmap (NULL
, mapsize
, PROT_READ
, MAP_SHARED
, mapfd
, 0);
360 if (__builtin_expect (mapping
!= MAP_FAILED
, 1))
362 /* Check whether the database is correct and up-to-date. */
363 struct database_pers_head
*head
= mapping
;
365 if (__builtin_expect (head
->version
!= DB_VERSION
, 0)
366 || __builtin_expect (head
->header_size
!= sizeof (*head
), 0)
367 /* Catch some misconfiguration. The server should catch
368 them now but some older versions did not. */
369 || __builtin_expect (head
->module
== 0, 0)
370 /* This really should not happen but who knows, maybe the update
372 || __builtin_expect (! head
->nscd_certainly_running
373 && (head
->timestamp
+ MAPPING_TIMEOUT
377 __munmap (mapping
, mapsize
);
381 size_t size
= (sizeof (*head
) + roundup (head
->module
* sizeof (ref_t
),
385 if (__builtin_expect (mapsize
< size
, 0))
388 /* Allocate a record for the mapping. */
389 struct mapped_database
*newp
= malloc (sizeof (*newp
));
391 /* Ugh, after all we went through the memory allocation failed. */
394 newp
->head
= mapping
;
395 newp
->data
= ((char *) mapping
+ head
->header_size
396 + roundup (head
->module
* sizeof (ref_t
), ALIGN
));
397 newp
->mapsize
= size
;
398 newp
->datasize
= head
->data_size
;
399 /* Set counter to 1 to show it is usable. */
410 __set_errno (saved_errno
);
411 #endif /* SCM_RIGHTS */
413 struct mapped_database
*oldval
= *mappedp
;
416 if (oldval
!= NULL
&& atomic_decrement_val (&oldval
->counter
) == 0)
417 __nscd_unmap (oldval
);
422 struct mapped_database
*
423 __nscd_get_map_ref (request_type type
, const char *name
,
424 volatile struct locked_map_ptr
*mapptr
, int *gc_cyclep
)
426 struct mapped_database
*cur
= mapptr
->mapped
;
427 if (cur
== NO_MAPPING
)
430 if (!__nscd_acquire_maplock (mapptr
))
433 cur
= mapptr
->mapped
;
435 if (__builtin_expect (cur
!= NO_MAPPING
, 1))
437 /* If not mapped or timestamp not updated, request new map. */
439 || (cur
->head
->nscd_certainly_running
== 0
440 && cur
->head
->timestamp
+ MAPPING_TIMEOUT
< time (NULL
))
441 || cur
->head
->data_size
> cur
->datasize
)
442 cur
= __nscd_get_mapping (type
, name
,
443 (struct mapped_database
**) &mapptr
->mapped
);
445 if (__builtin_expect (cur
!= NO_MAPPING
, 1))
447 if (__builtin_expect (((*gc_cyclep
= cur
->head
->gc_cycle
) & 1) != 0,
451 atomic_increment (&cur
->counter
);
461 /* Using sizeof (hashentry) is not always correct to determine the size of
462 the data structure as found in the nscd cache. The program could be
463 a 64-bit process and nscd could be a 32-bit process. In this case
464 sizeof (hashentry) would overestimate the size. The following is
465 the minimum size of such an entry, good enough for our tests here. */
466 #define MINIMUM_HASHENTRY_SIZE \
467 (offsetof (struct hashentry, dellist) + sizeof (int32_t))
470 /* Don't return const struct datahead *, as eventhough the record
471 is normally constant, it can change arbitrarily during nscd
472 garbage collection. */
474 __nscd_cache_search (request_type type
, const char *key
, size_t keylen
,
475 const struct mapped_database
*mapped
, size_t datalen
)
477 unsigned long int hash
= __nis_hash (key
, keylen
) % mapped
->head
->module
;
478 size_t datasize
= mapped
->datasize
;
480 ref_t trail
= mapped
->head
->array
[hash
];
481 trail
= atomic_forced_read (trail
);
483 size_t loop_cnt
= datasize
/ (MINIMUM_HASHENTRY_SIZE
484 + offsetof (struct datahead
, data
) / 2);
487 while (work
!= ENDREF
&& work
+ MINIMUM_HASHENTRY_SIZE
<= datasize
)
489 struct hashentry
*here
= (struct hashentry
*) (mapped
->data
+ work
);
490 ref_t here_key
, here_packet
;
492 #ifndef _STRING_ARCH_unaligned
493 /* Although during garbage collection when moving struct hashentry
494 records around we first copy from old to new location and then
495 adjust pointer from previous hashentry to it, there is no barrier
496 between those memory writes. It is very unlikely to hit it,
497 so check alignment only if a misaligned load can crash the
499 if ((uintptr_t) here
& (__alignof__ (*here
) - 1))
503 if (type
== here
->type
504 && keylen
== here
->len
505 && (here_key
= atomic_forced_read (here
->key
)) + keylen
<= datasize
506 && memcmp (key
, mapped
->data
+ here_key
, keylen
) == 0
507 && ((here_packet
= atomic_forced_read (here
->packet
))
508 + sizeof (struct datahead
) <= datasize
))
510 /* We found the entry. Increment the appropriate counter. */
512 = (struct datahead
*) (mapped
->data
+ here_packet
);
514 #ifndef _STRING_ARCH_unaligned
515 if ((uintptr_t) dh
& (__alignof__ (*dh
) - 1))
519 /* See whether we must ignore the entry or whether something
520 is wrong because garbage collection is in progress. */
522 && here_packet
+ dh
->allocsize
<= datasize
523 && (here_packet
+ offsetof (struct datahead
, data
) + datalen
528 work
= atomic_forced_read (here
->next
);
529 /* Prevent endless loops. This should never happen but perhaps
530 the database got corrupted, accidentally or deliberately. */
531 if (work
== trail
|| loop_cnt
-- == 0)
535 struct hashentry
*trailelem
;
536 trailelem
= (struct hashentry
*) (mapped
->data
+ trail
);
538 #ifndef _STRING_ARCH_unaligned
539 /* We have to redo the checks. Maybe the data changed. */
540 if ((uintptr_t) trailelem
& (__alignof__ (*trailelem
) - 1))
544 if (trail
+ MINIMUM_HASHENTRY_SIZE
> datasize
)
547 trail
= atomic_forced_read (trailelem
->next
);
556 /* Create a socket connected to a name. */
558 __nscd_open_socket (const char *key
, size_t keylen
, request_type type
,
559 void *response
, size_t responselen
)
561 /* This should never happen and it is something the nscd daemon
562 enforces, too. He it helps to limit the amount of stack
564 if (keylen
> MAXKEYLEN
)
567 int saved_errno
= errno
;
569 int sock
= open_socket (type
, key
, keylen
);
573 if (wait_on_socket (sock
, 5 * 1000) > 0)
575 ssize_t nbytes
= TEMP_FAILURE_RETRY (__read (sock
, response
,
577 if (nbytes
== (ssize_t
) responselen
)
581 close_not_cancel_no_status (sock
);
584 __set_errno (saved_errno
);