1 /* Copyright (C) 1998-2002,2003,2004,2005,2006,2007
2 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
30 #include <sys/socket.h>
35 #include <not-cancel.h>
36 #include <nis/rpcsvc/nis.h>
38 #include "nscd-client.h"
42 __readall (int fd
, void *buf
, size_t len
)
48 ret
= TEMP_FAILURE_RETRY (__read (fd
, buf
, n
));
51 buf
= (char *) buf
+ ret
;
55 return ret
< 0 ? ret
: len
- n
;
60 __readvall (int fd
, const struct iovec
*iov
, int iovcnt
)
62 ssize_t ret
= TEMP_FAILURE_RETRY (__readv (fd
, iov
, iovcnt
));
67 for (int i
= 0; i
< iovcnt
; ++i
)
68 total
+= iov
[i
].iov_len
;
72 struct iovec iov_buf
[iovcnt
];
75 struct iovec
*iovp
= memcpy (iov_buf
, iov
, iovcnt
* sizeof (*iov
));
78 while (iovp
->iov_len
<= r
)
84 iovp
->iov_base
= (char *) iovp
->iov_base
+ r
;
86 r
= TEMP_FAILURE_RETRY (__readv (fd
, iovp
, iovcnt
));
100 open_socket (request_type type
, const char *key
, size_t keylen
)
102 int sock
= __socket (PF_UNIX
, SOCK_STREAM
, 0);
111 size_t real_sizeof_reqdata
= sizeof (request_header
) + keylen
;
113 /* Make socket non-blocking. */
114 __fcntl (sock
, F_SETFL
, O_RDWR
| O_NONBLOCK
);
116 struct sockaddr_un sun
;
117 sun
.sun_family
= AF_UNIX
;
118 strcpy (sun
.sun_path
, _PATH_NSCDSOCKET
);
119 if (__connect (sock
, (struct sockaddr
*) &sun
, sizeof (sun
)) < 0
120 && errno
!= EINPROGRESS
)
123 reqdata
.req
.version
= NSCD_VERSION
;
124 reqdata
.req
.type
= type
;
125 reqdata
.req
.key_len
= keylen
;
127 memcpy (reqdata
.key
, key
, keylen
);
129 bool first_try
= true;
130 struct timeval tvend
;
131 /* Fake initializing tvend. */
132 asm ("" : "=m" (tvend
));
136 # define MSG_NOSIGNAL 0
138 ssize_t wres
= TEMP_FAILURE_RETRY (__send (sock
, &reqdata
,
141 if (__builtin_expect (wres
== (ssize_t
) real_sizeof_reqdata
, 1))
142 /* We managed to send the request. */
145 if (wres
!= -1 || errno
!= EAGAIN
)
146 /* Something is really wrong, no chance to continue. */
149 /* The daemon is busy wait for it. */
152 (void) __gettimeofday (&now
, NULL
);
155 tvend
.tv_usec
= now
.tv_usec
;
156 tvend
.tv_sec
= now
.tv_sec
+ 5;
161 to
= ((tvend
.tv_sec
- now
.tv_sec
) * 1000
162 + (tvend
.tv_usec
- now
.tv_usec
) / 1000);
164 struct pollfd fds
[1];
166 fds
[0].events
= POLLOUT
| POLLERR
| POLLHUP
;
167 if (__poll (fds
, 1, to
) <= 0)
168 /* The connection timed out or broke down. */
171 /* We try to write again. */
175 close_not_cancel_no_status (sock
);
182 __nscd_unmap (struct mapped_database
*mapped
)
184 assert (mapped
->counter
== 0);
185 __munmap ((void *) mapped
->head
, mapped
->mapsize
);
191 wait_on_socket (int sock
)
193 struct pollfd fds
[1];
195 fds
[0].events
= POLLIN
| POLLERR
| POLLHUP
;
196 int n
= __poll (fds
, 1, 5 * 1000);
197 if (n
== -1 && __builtin_expect (errno
== EINTR
, 0))
199 /* Handle the case where the poll() call is interrupted by a
200 signal. We cannot just use TEMP_FAILURE_RETRY since it might
201 lead to infinite loops. */
203 (void) __gettimeofday (&now
, NULL
);
204 long int end
= (now
.tv_sec
+ 5) * 1000 + (now
.tv_usec
+ 500) / 1000;
207 long int timeout
= end
- (now
.tv_sec
* 1000
208 + (now
.tv_usec
+ 500) / 1000);
209 n
= __poll (fds
, 1, timeout
);
210 if (n
!= -1 || errno
!= EINTR
)
212 (void) __gettimeofday (&now
, NULL
);
220 /* Try to get a file descriptor for the shared meory segment
221 containing the database. */
222 static struct mapped_database
*
223 get_mapping (request_type type
, const char *key
,
224 struct mapped_database
**mappedp
)
226 struct mapped_database
*result
= NO_MAPPING
;
228 const size_t keylen
= strlen (key
) + 1;
229 int saved_errno
= errno
;
232 char resdata
[keylen
];
234 /* Open a socket and send the request. */
235 int sock
= open_socket (type
, key
, keylen
);
239 /* Room for the data sent along with the file descriptor. We expect
240 the key name back. */
243 iov
[0].iov_base
= resdata
;
244 iov
[0].iov_len
= keylen
;
245 iov
[1].iov_base
= &mapsize
;
246 iov
[1].iov_len
= sizeof (mapsize
);
251 char bytes
[CMSG_SPACE (sizeof (int))];
253 struct msghdr msg
= { .msg_iov
= iov
, .msg_iovlen
= 2,
254 .msg_control
= buf
.bytes
,
255 .msg_controllen
= sizeof (buf
) };
256 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR (&msg
);
258 cmsg
->cmsg_level
= SOL_SOCKET
;
259 cmsg
->cmsg_type
= SCM_RIGHTS
;
260 cmsg
->cmsg_len
= CMSG_LEN (sizeof (int));
262 /* This access is well-aligned since BUF is correctly aligned for an
263 int and CMSG_DATA preserves this alignment. */
264 *(int *) CMSG_DATA (cmsg
) = -1;
266 msg
.msg_controllen
= cmsg
->cmsg_len
;
268 if (wait_on_socket (sock
) <= 0)
271 # ifndef MSG_CMSG_CLOEXEC
272 # define MSG_CMSG_CLOEXEC 0
274 ssize_t n
= TEMP_FAILURE_RETRY (__recvmsg (sock
, &msg
, MSG_CMSG_CLOEXEC
));
276 if (__builtin_expect (CMSG_FIRSTHDR (&msg
) == NULL
277 || (CMSG_FIRSTHDR (&msg
)->cmsg_len
278 != CMSG_LEN (sizeof (int))), 0))
281 mapfd
= *(int *) CMSG_DATA (cmsg
);
283 if (__builtin_expect (n
!= keylen
&& n
!= keylen
+ sizeof (mapsize
), 0))
286 if (__builtin_expect (strcmp (resdata
, key
) != 0, 0))
289 if (__builtin_expect (n
== keylen
, 0))
292 if (__builtin_expect (fstat64 (mapfd
, &st
) != 0, 0)
293 || __builtin_expect (st
.st_size
< sizeof (struct database_pers_head
),
297 mapsize
= st
.st_size
;
300 /* The file is large enough, map it now. */
301 void *mapping
= __mmap (NULL
, mapsize
, PROT_READ
, MAP_SHARED
, mapfd
, 0);
302 if (__builtin_expect (mapping
!= MAP_FAILED
, 1))
304 /* Check whether the database is correct and up-to-date. */
305 struct database_pers_head
*head
= mapping
;
307 if (__builtin_expect (head
->version
!= DB_VERSION
, 0)
308 || __builtin_expect (head
->header_size
!= sizeof (*head
), 0)
309 /* This really should not happen but who knows, maybe the update
311 || __builtin_expect (! head
->nscd_certainly_running
312 && (head
->timestamp
+ MAPPING_TIMEOUT
316 __munmap (mapping
, mapsize
);
320 size_t size
= (sizeof (*head
) + roundup (head
->module
* sizeof (ref_t
),
324 if (__builtin_expect (mapsize
< size
, 0))
327 /* Allocate a record for the mapping. */
328 struct mapped_database
*newp
= malloc (sizeof (*newp
));
330 /* Ugh, after all we went through the memory allocation failed. */
333 newp
->head
= mapping
;
334 newp
->data
= ((char *) mapping
+ head
->header_size
335 + roundup (head
->module
* sizeof (ref_t
), ALIGN
));
336 newp
->mapsize
= size
;
337 newp
->datasize
= head
->data_size
;
338 /* Set counter to 1 to show it is usable. */
349 __set_errno (saved_errno
);
350 #endif /* SCM_RIGHTS */
352 struct mapped_database
*oldval
= *mappedp
;
355 if (oldval
!= NULL
&& atomic_decrement_val (&oldval
->counter
) == 0)
356 __nscd_unmap (oldval
);
362 struct mapped_database
*
363 __nscd_get_map_ref (request_type type
, const char *name
,
364 volatile struct locked_map_ptr
*mapptr
, int *gc_cyclep
)
366 struct mapped_database
*cur
= mapptr
->mapped
;
367 if (cur
== NO_MAPPING
)
371 while (__builtin_expect (atomic_compare_and_exchange_val_acq (&mapptr
->lock
,
374 // XXX Best number of rounds?
375 if (__builtin_expect (++cnt
> 5, 0))
381 cur
= mapptr
->mapped
;
383 if (__builtin_expect (cur
!= NO_MAPPING
, 1))
385 /* If not mapped or timestamp not updated, request new map. */
387 || (cur
->head
->nscd_certainly_running
== 0
388 && cur
->head
->timestamp
+ MAPPING_TIMEOUT
< time (NULL
))
389 || cur
->head
->data_size
> cur
->datasize
)
390 cur
= get_mapping (type
, name
,
391 (struct mapped_database
**) &mapptr
->mapped
);
393 if (__builtin_expect (cur
!= NO_MAPPING
, 1))
395 if (__builtin_expect (((*gc_cyclep
= cur
->head
->gc_cycle
) & 1) != 0,
399 atomic_increment (&cur
->counter
);
409 /* Don't return const struct datahead *, as eventhough the record
410 is normally constant, it can change arbitrarily during nscd
411 garbage collection. */
413 __nscd_cache_search (request_type type
, const char *key
, size_t keylen
,
414 const struct mapped_database
*mapped
)
416 unsigned long int hash
= __nis_hash (key
, keylen
) % mapped
->head
->module
;
417 size_t datasize
= mapped
->datasize
;
419 ref_t work
= mapped
->head
->array
[hash
];
420 while (work
!= ENDREF
&& work
+ sizeof (struct hashentry
) <= datasize
)
422 struct hashentry
*here
= (struct hashentry
*) (mapped
->data
+ work
);
424 #ifndef _STRING_ARCH_unaligned
425 /* Although during garbage collection when moving struct hashentry
426 records around we first copy from old to new location and then
427 adjust pointer from previous hashentry to it, there is no barrier
428 between those memory writes. It is very unlikely to hit it,
429 so check alignment only if a misaligned load can crash the
431 if ((uintptr_t) here
& (__alignof__ (*here
) - 1))
435 if (type
== here
->type
436 && keylen
== here
->len
437 && here
->key
+ keylen
<= datasize
438 && memcmp (key
, mapped
->data
+ here
->key
, keylen
) == 0
439 && here
->packet
+ sizeof (struct datahead
) <= datasize
)
441 /* We found the entry. Increment the appropriate counter. */
443 = (struct datahead
*) (mapped
->data
+ here
->packet
);
445 #ifndef _STRING_ARCH_unaligned
446 if ((uintptr_t) dh
& (__alignof__ (*dh
) - 1))
450 /* See whether we must ignore the entry or whether something
451 is wrong because garbage collection is in progress. */
452 if (dh
->usable
&& here
->packet
+ dh
->allocsize
<= datasize
)
463 /* Create a socket connected to a name. */
465 __nscd_open_socket (const char *key
, size_t keylen
, request_type type
,
466 void *response
, size_t responselen
)
468 /* This should never happen and it is something the nscd daemon
469 enforces, too. He it helps to limit the amount of stack
471 if (keylen
> MAXKEYLEN
)
474 int saved_errno
= errno
;
476 int sock
= open_socket (type
, key
, keylen
);
480 if (wait_on_socket (sock
) > 0)
482 ssize_t nbytes
= TEMP_FAILURE_RETRY (__read (sock
, response
,
484 if (nbytes
== (ssize_t
) responselen
)
488 close_not_cancel_no_status (sock
);
491 __set_errno (saved_errno
);