1 /* Copyright (C) 1998-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
30 #include <sys/param.h>
32 #include <sys/socket.h>
37 #include <not-cancel.h>
38 #include <kernel-features.h>
40 #include "nscd-client.h"
41 #include "nscd_hash.h"
44 /* Extra time we wait if the socket is still receiving data. This
45 value is in milliseconds. Note that the other side is nscd on the
46 local machine and it is already transmitting data. So the wait
47 time need not be long. */
48 #define EXTRA_RECEIVE_TIME 200
52 wait_on_socket (int sock
, long int usectmo
)
56 fds
[0].events
= POLLIN
| POLLERR
| POLLHUP
;
57 int n
= __poll (fds
, 1, usectmo
);
58 if (n
== -1 && __builtin_expect (errno
== EINTR
, 0))
60 /* Handle the case where the poll() call is interrupted by a
61 signal. We cannot just use TEMP_FAILURE_RETRY since it might
62 lead to infinite loops. */
64 (void) __gettimeofday (&now
, NULL
);
65 long int end
= now
.tv_sec
* 1000 + usectmo
+ (now
.tv_usec
+ 500) / 1000;
66 long int timeout
= usectmo
;
69 n
= __poll (fds
, 1, timeout
);
70 if (n
!= -1 || errno
!= EINTR
)
73 /* Recompute the timeout time. */
74 (void) __gettimeofday (&now
, NULL
);
75 timeout
= end
- (now
.tv_sec
* 1000 + (now
.tv_usec
+ 500) / 1000);
84 __readall (int fd
, void *buf
, size_t len
)
91 ret
= TEMP_FAILURE_RETRY (__read (fd
, buf
, n
));
94 if (__builtin_expect (ret
< 0 && errno
== EAGAIN
, 0)
95 /* The socket is still receiving data. Wait a bit more. */
96 && wait_on_socket (fd
, EXTRA_RECEIVE_TIME
) > 0)
101 buf
= (char *) buf
+ ret
;
105 return ret
< 0 ? ret
: len
- n
;
110 __readvall (int fd
, const struct iovec
*iov
, int iovcnt
)
112 ssize_t ret
= TEMP_FAILURE_RETRY (__readv (fd
, iov
, iovcnt
));
115 if (__glibc_likely (ret
== 0 || errno
!= EAGAIN
))
116 /* A genuine error or no data to read. */
119 /* The data has not all yet been received. Do as if we have not
120 read anything yet. */
125 for (int i
= 0; i
< iovcnt
; ++i
)
126 total
+= iov
[i
].iov_len
;
130 struct iovec iov_buf
[iovcnt
];
133 struct iovec
*iovp
= memcpy (iov_buf
, iov
, iovcnt
* sizeof (*iov
));
136 while (iovp
->iov_len
<= r
)
142 iovp
->iov_base
= (char *) iovp
->iov_base
+ r
;
145 r
= TEMP_FAILURE_RETRY (__readv (fd
, iovp
, iovcnt
));
148 if (__builtin_expect (r
< 0 && errno
== EAGAIN
, 0)
149 /* The socket is still receiving data. Wait a bit more. */
150 && wait_on_socket (fd
, EXTRA_RECEIVE_TIME
) > 0)
166 open_socket (request_type type
, const char *key
, size_t keylen
)
170 sock
= __socket (PF_UNIX
, SOCK_STREAM
| SOCK_CLOEXEC
| SOCK_NONBLOCK
, 0);
174 size_t real_sizeof_reqdata
= sizeof (request_header
) + keylen
;
179 } *reqdata
= alloca (real_sizeof_reqdata
);
181 struct sockaddr_un sun
;
182 sun
.sun_family
= AF_UNIX
;
183 strcpy (sun
.sun_path
, _PATH_NSCDSOCKET
);
184 if (__connect (sock
, (struct sockaddr
*) &sun
, sizeof (sun
)) < 0
185 && errno
!= EINPROGRESS
)
188 reqdata
->req
.version
= NSCD_VERSION
;
189 reqdata
->req
.type
= type
;
190 reqdata
->req
.key_len
= keylen
;
192 memcpy (reqdata
->key
, key
, keylen
);
194 bool first_try
= true;
195 struct timeval tvend
;
196 /* Fake initializing tvend. */
197 asm ("" : "=m" (tvend
));
201 # define MSG_NOSIGNAL 0
203 ssize_t wres
= TEMP_FAILURE_RETRY (__send (sock
, reqdata
,
206 if (__glibc_likely (wres
== (ssize_t
) real_sizeof_reqdata
))
207 /* We managed to send the request. */
210 if (wres
!= -1 || errno
!= EAGAIN
)
211 /* Something is really wrong, no chance to continue. */
214 /* The daemon is busy wait for it. */
217 (void) __gettimeofday (&now
, NULL
);
220 tvend
.tv_usec
= now
.tv_usec
;
221 tvend
.tv_sec
= now
.tv_sec
+ 5;
226 to
= ((tvend
.tv_sec
- now
.tv_sec
) * 1000
227 + (tvend
.tv_usec
- now
.tv_usec
) / 1000);
229 struct pollfd fds
[1];
231 fds
[0].events
= POLLOUT
| POLLERR
| POLLHUP
;
232 if (__poll (fds
, 1, to
) <= 0)
233 /* The connection timed out or broke down. */
236 /* We try to write again. */
240 __close_nocancel_nostatus (sock
);
247 __nscd_unmap (struct mapped_database
*mapped
)
249 assert (mapped
->counter
== 0);
250 __munmap ((void *) mapped
->head
, mapped
->mapsize
);
255 /* Try to get a file descriptor for the shared meory segment
256 containing the database. */
257 struct mapped_database
*
258 __nscd_get_mapping (request_type type
, const char *key
,
259 struct mapped_database
**mappedp
)
261 struct mapped_database
*result
= NO_MAPPING
;
263 const size_t keylen
= strlen (key
) + 1;
264 int saved_errno
= errno
;
267 char resdata
[keylen
];
269 /* Open a socket and send the request. */
270 int sock
= open_socket (type
, key
, keylen
);
274 /* Room for the data sent along with the file descriptor. We expect
275 the key name back. */
278 iov
[0].iov_base
= resdata
;
279 iov
[0].iov_len
= keylen
;
280 iov
[1].iov_base
= &mapsize
;
281 iov
[1].iov_len
= sizeof (mapsize
);
286 char bytes
[CMSG_SPACE (sizeof (int))];
288 struct msghdr msg
= { .msg_iov
= iov
, .msg_iovlen
= 2,
289 .msg_control
= buf
.bytes
,
290 .msg_controllen
= sizeof (buf
) };
291 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR (&msg
);
293 cmsg
->cmsg_level
= SOL_SOCKET
;
294 cmsg
->cmsg_type
= SCM_RIGHTS
;
295 cmsg
->cmsg_len
= CMSG_LEN (sizeof (int));
297 /* This access is well-aligned since BUF is correctly aligned for an
298 int and CMSG_DATA preserves this alignment. */
299 memset (CMSG_DATA (cmsg
), '\xff', sizeof (int));
301 msg
.msg_controllen
= cmsg
->cmsg_len
;
303 if (wait_on_socket (sock
, 5 * 1000) <= 0)
306 # ifndef MSG_CMSG_CLOEXEC
307 # define MSG_CMSG_CLOEXEC 0
309 ssize_t n
= TEMP_FAILURE_RETRY (__recvmsg (sock
, &msg
, MSG_CMSG_CLOEXEC
));
311 if (__builtin_expect (CMSG_FIRSTHDR (&msg
) == NULL
312 || (CMSG_FIRSTHDR (&msg
)->cmsg_len
313 != CMSG_LEN (sizeof (int))), 0))
316 int *ip
= (void *) CMSG_DATA (cmsg
);
319 if (__glibc_unlikely (n
!= keylen
&& n
!= keylen
+ sizeof (mapsize
)))
322 if (__glibc_unlikely (strcmp (resdata
, key
) != 0))
325 if (__glibc_unlikely (n
== keylen
))
328 if (__builtin_expect (fstat64 (mapfd
, &st
) != 0, 0)
329 || __builtin_expect (st
.st_size
< sizeof (struct database_pers_head
),
333 mapsize
= st
.st_size
;
336 /* The file is large enough, map it now. */
337 void *mapping
= __mmap (NULL
, mapsize
, PROT_READ
, MAP_SHARED
, mapfd
, 0);
338 if (__glibc_likely (mapping
!= MAP_FAILED
))
340 /* Check whether the database is correct and up-to-date. */
341 struct database_pers_head
*head
= mapping
;
343 if (__builtin_expect (head
->version
!= DB_VERSION
, 0)
344 || __builtin_expect (head
->header_size
!= sizeof (*head
), 0)
345 /* Catch some misconfiguration. The server should catch
346 them now but some older versions did not. */
347 || __builtin_expect (head
->module
== 0, 0)
348 /* This really should not happen but who knows, maybe the update
350 || __builtin_expect (! head
->nscd_certainly_running
351 && (head
->timestamp
+ MAPPING_TIMEOUT
355 __munmap (mapping
, mapsize
);
359 size_t size
= (sizeof (*head
) + roundup (head
->module
* sizeof (ref_t
),
363 if (__glibc_unlikely (mapsize
< size
))
366 /* Allocate a record for the mapping. */
367 struct mapped_database
*newp
= malloc (sizeof (*newp
));
369 /* Ugh, after all we went through the memory allocation failed. */
372 newp
->head
= mapping
;
373 newp
->data
= ((char *) mapping
+ head
->header_size
374 + roundup (head
->module
* sizeof (ref_t
), ALIGN
));
375 newp
->mapsize
= size
;
376 newp
->datasize
= head
->data_size
;
377 /* Set counter to 1 to show it is usable. */
388 __set_errno (saved_errno
);
389 #endif /* SCM_RIGHTS */
391 struct mapped_database
*oldval
= *mappedp
;
394 if (oldval
!= NULL
&& atomic_decrement_val (&oldval
->counter
) == 0)
395 __nscd_unmap (oldval
);
400 struct mapped_database
*
401 __nscd_get_map_ref (request_type type
, const char *name
,
402 volatile struct locked_map_ptr
*mapptr
, int *gc_cyclep
)
404 struct mapped_database
*cur
= mapptr
->mapped
;
405 if (cur
== NO_MAPPING
)
408 if (!__nscd_acquire_maplock (mapptr
))
411 cur
= mapptr
->mapped
;
413 if (__glibc_likely (cur
!= NO_MAPPING
))
415 /* If not mapped or timestamp not updated, request new map. */
417 || (cur
->head
->nscd_certainly_running
== 0
418 && cur
->head
->timestamp
+ MAPPING_TIMEOUT
< time (NULL
))
419 || cur
->head
->data_size
> cur
->datasize
)
420 cur
= __nscd_get_mapping (type
, name
,
421 (struct mapped_database
**) &mapptr
->mapped
);
423 if (__glibc_likely (cur
!= NO_MAPPING
))
425 if (__builtin_expect (((*gc_cyclep
= cur
->head
->gc_cycle
) & 1) != 0,
429 atomic_increment (&cur
->counter
);
439 /* Using sizeof (hashentry) is not always correct to determine the size of
440 the data structure as found in the nscd cache. The program could be
441 a 64-bit process and nscd could be a 32-bit process. In this case
442 sizeof (hashentry) would overestimate the size. The following is
443 the minimum size of such an entry, good enough for our tests here. */
444 #define MINIMUM_HASHENTRY_SIZE \
445 (offsetof (struct hashentry, dellist) + sizeof (int32_t))
447 /* Don't return const struct datahead *, as eventhough the record
448 is normally constant, it can change arbitrarily during nscd
449 garbage collection. */
451 __nscd_cache_search (request_type type
, const char *key
, size_t keylen
,
452 const struct mapped_database
*mapped
, size_t datalen
)
454 unsigned long int hash
= __nscd_hash (key
, keylen
) % mapped
->head
->module
;
455 size_t datasize
= mapped
->datasize
;
457 ref_t trail
= mapped
->head
->array
[hash
];
458 trail
= atomic_forced_read (trail
);
460 size_t loop_cnt
= datasize
/ (MINIMUM_HASHENTRY_SIZE
461 + offsetof (struct datahead
, data
) / 2);
464 while (work
!= ENDREF
&& work
+ MINIMUM_HASHENTRY_SIZE
<= datasize
)
466 struct hashentry
*here
= (struct hashentry
*) (mapped
->data
+ work
);
467 ref_t here_key
, here_packet
;
469 #if !_STRING_ARCH_unaligned
470 /* Although during garbage collection when moving struct hashentry
471 records around we first copy from old to new location and then
472 adjust pointer from previous hashentry to it, there is no barrier
473 between those memory writes. It is very unlikely to hit it,
474 so check alignment only if a misaligned load can crash the
476 if ((uintptr_t) here
& (__alignof__ (*here
) - 1))
480 if (type
== here
->type
481 && keylen
== here
->len
482 && (here_key
= atomic_forced_read (here
->key
)) + keylen
<= datasize
483 && memcmp (key
, mapped
->data
+ here_key
, keylen
) == 0
484 && ((here_packet
= atomic_forced_read (here
->packet
))
485 + sizeof (struct datahead
) <= datasize
))
487 /* We found the entry. Increment the appropriate counter. */
489 = (struct datahead
*) (mapped
->data
+ here_packet
);
491 #if !_STRING_ARCH_unaligned
492 if ((uintptr_t) dh
& (__alignof__ (*dh
) - 1))
496 /* See whether we must ignore the entry or whether something
497 is wrong because garbage collection is in progress. */
499 && here_packet
+ dh
->allocsize
<= datasize
500 && (here_packet
+ offsetof (struct datahead
, data
) + datalen
505 work
= atomic_forced_read (here
->next
);
506 /* Prevent endless loops. This should never happen but perhaps
507 the database got corrupted, accidentally or deliberately. */
508 if (work
== trail
|| loop_cnt
-- == 0)
512 struct hashentry
*trailelem
;
513 trailelem
= (struct hashentry
*) (mapped
->data
+ trail
);
515 #if !_STRING_ARCH_unaligned
516 /* We have to redo the checks. Maybe the data changed. */
517 if ((uintptr_t) trailelem
& (__alignof__ (*trailelem
) - 1))
521 if (trail
+ MINIMUM_HASHENTRY_SIZE
> datasize
)
524 trail
= atomic_forced_read (trailelem
->next
);
533 /* Create a socket connected to a name. */
535 __nscd_open_socket (const char *key
, size_t keylen
, request_type type
,
536 void *response
, size_t responselen
)
538 /* This should never happen and it is something the nscd daemon
539 enforces, too. He it helps to limit the amount of stack
541 if (keylen
> MAXKEYLEN
)
544 int saved_errno
= errno
;
546 int sock
= open_socket (type
, key
, keylen
);
550 if (wait_on_socket (sock
, 5 * 1000) > 0)
552 ssize_t nbytes
= TEMP_FAILURE_RETRY (__read (sock
, response
,
554 if (nbytes
== (ssize_t
) responselen
)
558 __close_nocancel_nostatus (sock
);
561 __set_errno (saved_errno
);