usched: Implement LWP lazy migration support.
[dragonfly.git] / lib / libc / rpc / clnt_dg.c
blob2a4f0aee707a9b3edb5f096256edcf493e8d4782
1 /*-
2 * Copyright (c) 2009, Sun Microsystems, Inc.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * - Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * - Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * - Neither the name of Sun Microsystems, Inc. nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
28 * @(#)clnt_dg.c 1.23 94/04/22 SMI; 1.19 89/03/16 Copyr 1988 Sun Micro
29 * $NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $
30 * $FreeBSD: src/lib/libc/rpc/clnt_dg.c,v 1.18 2006/02/27 22:10:58 deischen Exp $
33 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
37 * Implements a connectionless client side RPC.
40 #include "namespace.h"
41 #include "reentrant.h"
42 #include <sys/types.h>
43 #include <sys/event.h>
44 #include <sys/time.h>
45 #include <sys/socket.h>
46 #include <sys/ioctl.h>
47 #include <arpa/inet.h>
48 #include <rpc/rpc.h>
49 #include <errno.h>
50 #include <stdlib.h>
51 #include <string.h>
52 #include <signal.h>
53 #include <unistd.h>
54 #include <err.h>
55 #include "un-namespace.h"
56 #include "rpc_com.h"
57 #include "mt_misc.h"
60 #define RPC_MAX_BACKOFF 30 /* seconds */
63 static void clnt_dg_abort(CLIENT *);
64 static enum clnt_stat clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
65 xdrproc_t, void *, struct timeval);
66 static bool_t clnt_dg_control(CLIENT *, u_int, void *);
67 static void clnt_dg_destroy(CLIENT *);
68 static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, void *);
69 static void clnt_dg_geterr(CLIENT *, struct rpc_err *);
70 static struct clnt_ops *clnt_dg_ops(void);
71 static bool_t time_not_ok(struct timeval *);
75 * This machinery implements per-fd locks for MT-safety. It is not
76 * sufficient to do per-CLIENT handle locks for MT-safety because a
77 * user may create more than one CLIENT handle with the same fd behind
78 * it. Therfore, we allocate an array of flags (dg_fd_locks), protected
79 * by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
80 * similarly protected. Dg_fd_lock[fd] == 1 => a call is activte on some
81 * CLIENT handle created for that fd.
82 * The current implementation holds locks across the entire RPC and reply,
83 * including retransmissions. Yes, this is silly, and as soon as this
84 * code is proven to work, this should be the first thing fixed. One step
85 * at a time.
87 static int *dg_fd_locks;
88 static cond_t *dg_cv;
89 #define release_fd_lock(fd, mask) { \
90 mutex_lock(&clnt_fd_lock); \
91 dg_fd_locks[fd] = 0; \
92 mutex_unlock(&clnt_fd_lock); \
93 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); \
94 cond_signal(&dg_cv[fd]); \
97 static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
99 /* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
102 * Private data kept per client handle
104 struct cu_data {
105 int cu_fd; /* connections fd */
106 bool_t cu_closeit; /* opened by library */
107 struct sockaddr_storage cu_raddr; /* remote address */
108 int cu_rlen;
109 struct timeval cu_wait; /* retransmit interval */
110 struct timeval cu_total; /* total time for the call */
111 struct rpc_err cu_error;
112 XDR cu_outxdrs;
113 u_int cu_xdrpos;
114 u_int cu_sendsz; /* send size */
115 char *cu_outbuf;
116 u_int cu_recvsz; /* recv size */
117 int cu_async;
118 int cu_connect; /* Use connect(). */
119 int cu_connected; /* Have done connect(). */
120 struct kevent cu_kin;
121 int cu_kq;
122 char cu_inbuf[1];
126 * Connection less client creation returns with client handle parameters.
127 * Default options are set, which the user can change using clnt_control().
128 * fd should be open and bound.
129 * NB: The rpch->cl_auth is initialized to null authentication.
130 * Caller may wish to set this something more useful.
132 * sendsz and recvsz are the maximum allowable packet sizes that can be
133 * sent and received. Normally they are the same, but they can be
134 * changed to improve the program efficiency and buffer allocation.
135 * If they are 0, use the transport default.
137 * If svcaddr is NULL, returns NULL.
139 CLIENT *
140 clnt_dg_create(int fd, /* open file descriptor */
141 const struct netbuf *svcaddr, /* servers address */
142 rpcprog_t program, /* program number */
143 rpcvers_t version, /* version number */
144 u_int sendsz, /* buffer recv size */
145 u_int recvsz) /* buffer send size */
147 CLIENT *cl = NULL; /* client handle */
148 struct cu_data *cu = NULL; /* private data */
149 struct timeval now;
150 struct rpc_msg call_msg;
151 sigset_t mask;
152 sigset_t newmask;
153 struct __rpc_sockinfo si;
154 int one = 1;
156 sigfillset(&newmask);
157 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
158 mutex_lock(&clnt_fd_lock);
159 if (dg_fd_locks == NULL) {
160 int cv_allocsz;
161 size_t fd_allocsz;
162 int dtbsize = __rpc_dtbsize();
164 fd_allocsz = dtbsize * sizeof (int);
165 dg_fd_locks = (int *) mem_alloc(fd_allocsz);
166 if (dg_fd_locks == NULL) {
167 mutex_unlock(&clnt_fd_lock);
168 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
169 goto err1;
170 } else
171 memset(dg_fd_locks, '\0', fd_allocsz);
173 cv_allocsz = dtbsize * sizeof (cond_t);
174 dg_cv = (cond_t *) mem_alloc(cv_allocsz);
175 if (dg_cv == NULL) {
176 mem_free(dg_fd_locks, fd_allocsz);
177 dg_fd_locks = NULL;
178 mutex_unlock(&clnt_fd_lock);
179 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
180 goto err1;
181 } else {
182 int i;
184 for (i = 0; i < dtbsize; i++)
185 cond_init(&dg_cv[i], 0, NULL);
189 mutex_unlock(&clnt_fd_lock);
190 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
192 if (svcaddr == NULL) {
193 rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
194 return (NULL);
197 if (!__rpc_fd2sockinfo(fd, &si)) {
198 rpc_createerr.cf_stat = RPC_TLIERROR;
199 rpc_createerr.cf_error.re_errno = 0;
200 return (NULL);
203 * Find the receive and the send size
205 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
206 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
207 if ((sendsz == 0) || (recvsz == 0)) {
208 rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
209 rpc_createerr.cf_error.re_errno = 0;
210 return (NULL);
213 if ((cl = mem_alloc(sizeof (CLIENT))) == NULL)
214 goto err1;
216 * Should be multiple of 4 for XDR.
218 sendsz = ((sendsz + 3) / 4) * 4;
219 recvsz = ((recvsz + 3) / 4) * 4;
220 cu = mem_alloc(sizeof (*cu) + sendsz + recvsz);
221 if (cu == NULL)
222 goto err1;
223 memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len);
224 cu->cu_rlen = svcaddr->len;
225 cu->cu_outbuf = &cu->cu_inbuf[recvsz];
226 /* Other values can also be set through clnt_control() */
227 cu->cu_wait.tv_sec = 15; /* heuristically chosen */
228 cu->cu_wait.tv_usec = 0;
229 cu->cu_total.tv_sec = -1;
230 cu->cu_total.tv_usec = -1;
231 cu->cu_sendsz = sendsz;
232 cu->cu_recvsz = recvsz;
233 cu->cu_async = FALSE;
234 cu->cu_connect = FALSE;
235 cu->cu_connected = FALSE;
236 gettimeofday(&now, NULL);
237 call_msg.rm_xid = __RPC_GETXID(&now);
238 call_msg.rm_call.cb_prog = program;
239 call_msg.rm_call.cb_vers = version;
240 xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, sendsz, XDR_ENCODE);
241 if (! xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
242 rpc_createerr.cf_stat = RPC_CANTENCODEARGS; /* XXX */
243 rpc_createerr.cf_error.re_errno = 0;
244 goto err2;
246 cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
248 /* XXX fvdl - do we still want this? */
249 #if 0
250 bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf);
251 #endif
252 _ioctl(fd, FIONBIO, (char *)(void *)&one);
255 * By default, closeit is always FALSE. It is users responsibility
256 * to do a close on it, else the user may use clnt_control
257 * to let clnt_destroy do it for him/her.
259 cu->cu_closeit = FALSE;
260 cu->cu_fd = fd;
261 cl->cl_ops = clnt_dg_ops();
262 cl->cl_private = (caddr_t)(void *)cu;
263 cl->cl_auth = authnone_create();
264 cl->cl_tp = NULL;
265 cl->cl_netid = NULL;
266 cu->cu_kq = -1;
267 EV_SET(&cu->cu_kin, cu->cu_fd, EVFILT_READ, EV_ADD, 0, 0, 0);
268 return (cl);
269 err1:
270 warnx(mem_err_clnt_dg);
271 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
272 rpc_createerr.cf_error.re_errno = errno;
273 err2:
274 if (cl) {
275 mem_free(cl, sizeof (CLIENT));
276 if (cu)
277 mem_free(cu, sizeof (*cu) + sendsz + recvsz);
279 return (NULL);
282 static enum clnt_stat
283 clnt_dg_call(CLIENT *cl, /* client handle */
284 rpcproc_t proc, /* procedure number */
285 xdrproc_t xargs, /* xdr routine for args */
286 void *argsp, /* pointer to args */
287 xdrproc_t xresults, /* xdr routine for results */
288 void *resultsp, /* pointer to results */
289 struct timeval utimeout) /* seconds to wait before giving up */
291 struct cu_data *cu = (struct cu_data *)cl->cl_private;
292 XDR *xdrs;
293 size_t outlen = 0;
294 struct rpc_msg reply_msg;
295 XDR reply_xdrs;
296 bool_t ok;
297 int nrefreshes = 2; /* number of times to refresh cred */
298 struct timeval timeout;
299 struct timeval retransmit_time;
300 struct timeval next_sendtime, starttime, time_waited, tv;
301 struct timespec ts;
302 struct kevent kv;
303 struct sockaddr *sa;
304 sigset_t mask;
305 sigset_t newmask;
306 socklen_t salen;
307 ssize_t recvlen = 0;
308 int kin_len, n, rpc_lock_value;
309 u_int32_t xid;
311 outlen = 0;
312 sigfillset(&newmask);
313 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
314 mutex_lock(&clnt_fd_lock);
315 while (dg_fd_locks[cu->cu_fd])
316 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
317 if (__isthreaded)
318 rpc_lock_value = 1;
319 else
320 rpc_lock_value = 0;
321 dg_fd_locks[cu->cu_fd] = rpc_lock_value;
322 mutex_unlock(&clnt_fd_lock);
323 if (cu->cu_total.tv_usec == -1) {
324 timeout = utimeout; /* use supplied timeout */
325 } else {
326 timeout = cu->cu_total; /* use default timeout */
329 if (cu->cu_connect && !cu->cu_connected) {
330 if (_connect(cu->cu_fd, (struct sockaddr *)&cu->cu_raddr,
331 cu->cu_rlen) < 0) {
332 cu->cu_error.re_errno = errno;
333 cu->cu_error.re_status = RPC_CANTSEND;
334 goto out;
336 cu->cu_connected = 1;
338 if (cu->cu_connected) {
339 sa = NULL;
340 salen = 0;
341 } else {
342 sa = (struct sockaddr *)&cu->cu_raddr;
343 salen = cu->cu_rlen;
345 time_waited.tv_sec = 0;
346 time_waited.tv_usec = 0;
347 retransmit_time = next_sendtime = cu->cu_wait;
348 gettimeofday(&starttime, NULL);
350 /* Clean up in case the last call ended in a longjmp(3) call. */
351 if (cu->cu_kq >= 0)
352 _close(cu->cu_kq);
353 if ((cu->cu_kq = kqueue()) < 0) {
354 cu->cu_error.re_errno = errno;
355 cu->cu_error.re_status = RPC_CANTSEND;
356 goto out;
358 kin_len = 1;
360 call_again:
361 xdrs = &(cu->cu_outxdrs);
362 if (cu->cu_async == TRUE && xargs == NULL)
363 goto get_reply;
364 xdrs->x_op = XDR_ENCODE;
365 XDR_SETPOS(xdrs, cu->cu_xdrpos);
367 * the transaction is the first thing in the out buffer
368 * XXX Yes, and it's in network byte order, so we should to
369 * be careful when we increment it, shouldn't we.
371 xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf));
372 xid++;
373 *(u_int32_t *)(void *)(cu->cu_outbuf) = htonl(xid);
375 if ((! XDR_PUTINT32(xdrs, &proc)) ||
376 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
377 (! (*xargs)(xdrs, argsp))) {
378 cu->cu_error.re_status = RPC_CANTENCODEARGS;
379 goto out;
381 outlen = (size_t)XDR_GETPOS(xdrs);
383 send_again:
384 if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0, sa, salen) != outlen) {
385 cu->cu_error.re_errno = errno;
386 cu->cu_error.re_status = RPC_CANTSEND;
387 goto out;
391 * Hack to provide rpc-based message passing
393 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
394 cu->cu_error.re_status = RPC_TIMEDOUT;
395 goto out;
398 get_reply:
401 * sub-optimal code appears here because we have
402 * some clock time to spare while the packets are in flight.
403 * (We assume that this is actually only executed once.)
405 reply_msg.acpted_rply.ar_verf = _null_auth;
406 reply_msg.acpted_rply.ar_results.where = resultsp;
407 reply_msg.acpted_rply.ar_results.proc = xresults;
409 for (;;) {
410 /* Decide how long to wait. */
411 if (timercmp(&next_sendtime, &timeout, <))
412 timersub(&next_sendtime, &time_waited, &tv);
413 else
414 timersub(&timeout, &time_waited, &tv);
415 if (tv.tv_sec < 0 || tv.tv_usec < 0)
416 tv.tv_sec = tv.tv_usec = 0;
417 TIMEVAL_TO_TIMESPEC(&tv, &ts);
419 n = _kevent(cu->cu_kq, &cu->cu_kin, kin_len, &kv, 1, &ts);
420 /* We don't need to register the event again. */
421 kin_len = 0;
423 if (n == 1) {
424 if (kv.flags & EV_ERROR) {
425 cu->cu_error.re_errno = kv.data;
426 cu->cu_error.re_status = RPC_CANTRECV;
427 goto out;
429 /* We have some data now */
430 do {
431 recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf,
432 cu->cu_recvsz, 0, NULL, NULL);
433 } while (recvlen < 0 && errno == EINTR);
434 if (recvlen < 0 && errno != EWOULDBLOCK) {
435 cu->cu_error.re_errno = errno;
436 cu->cu_error.re_status = RPC_CANTRECV;
437 goto out;
439 if (recvlen >= sizeof(u_int32_t) &&
440 (cu->cu_async == TRUE ||
441 *((u_int32_t *)(void *)(cu->cu_inbuf)) ==
442 *((u_int32_t *)(void *)(cu->cu_outbuf)))) {
443 /* We now assume we have the proper reply. */
444 break;
447 if (n == -1 && errno != EINTR) {
448 cu->cu_error.re_errno = errno;
449 cu->cu_error.re_status = RPC_CANTRECV;
450 goto out;
452 gettimeofday(&tv, NULL);
453 timersub(&tv, &starttime, &time_waited);
455 /* Check for timeout. */
456 if (timercmp(&time_waited, &timeout, >)) {
457 cu->cu_error.re_status = RPC_TIMEDOUT;
458 goto out;
461 /* Retransmit if necessary. */
462 if (timercmp(&time_waited, &next_sendtime, >)) {
463 /* update retransmit_time */
464 if (retransmit_time.tv_sec < RPC_MAX_BACKOFF)
465 timeradd(&retransmit_time, &retransmit_time,
466 &retransmit_time);
467 timeradd(&next_sendtime, &retransmit_time,
468 &next_sendtime);
469 goto send_again;
474 * now decode and validate the response
477 xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)recvlen, XDR_DECODE);
478 ok = xdr_replymsg(&reply_xdrs, &reply_msg);
479 /* XDR_DESTROY(&reply_xdrs); save a few cycles on noop destroy */
480 if (ok) {
481 if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
482 (reply_msg.acpted_rply.ar_stat == SUCCESS))
483 cu->cu_error.re_status = RPC_SUCCESS;
484 else
485 _seterr_reply(&reply_msg, &(cu->cu_error));
487 if (cu->cu_error.re_status == RPC_SUCCESS) {
488 if (! AUTH_VALIDATE(cl->cl_auth,
489 &reply_msg.acpted_rply.ar_verf)) {
490 cu->cu_error.re_status = RPC_AUTHERROR;
491 cu->cu_error.re_why = AUTH_INVALIDRESP;
493 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
494 xdrs->x_op = XDR_FREE;
495 xdr_opaque_auth(xdrs,
496 &(reply_msg.acpted_rply.ar_verf));
498 } /* end successful completion */
500 * If unsuccesful AND error is an authentication error
501 * then refresh credentials and try again, else break
503 else if (cu->cu_error.re_status == RPC_AUTHERROR)
504 /* maybe our credentials need to be refreshed ... */
505 if (nrefreshes > 0 &&
506 AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
507 nrefreshes--;
508 goto call_again;
510 /* end of unsuccessful completion */
511 } /* end of valid reply message */
512 else {
513 cu->cu_error.re_status = RPC_CANTDECODERES;
516 out:
517 if (cu->cu_kq >= 0)
518 _close(cu->cu_kq);
519 cu->cu_kq = -1;
520 release_fd_lock(cu->cu_fd, mask);
521 return (cu->cu_error.re_status);
524 static void
525 clnt_dg_geterr(CLIENT *cl, struct rpc_err *errp)
527 struct cu_data *cu = (struct cu_data *)cl->cl_private;
529 *errp = cu->cu_error;
532 static bool_t
533 clnt_dg_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
535 struct cu_data *cu = (struct cu_data *)cl->cl_private;
536 XDR *xdrs = &(cu->cu_outxdrs);
537 bool_t dummy;
538 sigset_t mask;
539 sigset_t newmask;
541 sigfillset(&newmask);
542 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
543 mutex_lock(&clnt_fd_lock);
544 while (dg_fd_locks[cu->cu_fd])
545 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
546 xdrs->x_op = XDR_FREE;
547 dummy = (*xdr_res)(xdrs, res_ptr);
548 mutex_unlock(&clnt_fd_lock);
549 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
550 cond_signal(&dg_cv[cu->cu_fd]);
551 return (dummy);
554 /*ARGSUSED*/
555 static void
556 clnt_dg_abort(CLIENT *h __unused)
560 static bool_t
561 clnt_dg_control(CLIENT *cl, u_int request, void *info)
563 struct cu_data *cu = (struct cu_data *)cl->cl_private;
564 struct netbuf *addr;
565 sigset_t mask;
566 sigset_t newmask;
567 int rpc_lock_value;
569 sigfillset(&newmask);
570 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
571 mutex_lock(&clnt_fd_lock);
572 while (dg_fd_locks[cu->cu_fd])
573 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
574 if (__isthreaded)
575 rpc_lock_value = 1;
576 else
577 rpc_lock_value = 0;
578 dg_fd_locks[cu->cu_fd] = rpc_lock_value;
579 mutex_unlock(&clnt_fd_lock);
580 switch (request) {
581 case CLSET_FD_CLOSE:
582 cu->cu_closeit = TRUE;
583 release_fd_lock(cu->cu_fd, mask);
584 return (TRUE);
585 case CLSET_FD_NCLOSE:
586 cu->cu_closeit = FALSE;
587 release_fd_lock(cu->cu_fd, mask);
588 return (TRUE);
591 /* for other requests which use info */
592 if (info == NULL) {
593 release_fd_lock(cu->cu_fd, mask);
594 return (FALSE);
596 switch (request) {
597 case CLSET_TIMEOUT:
598 if (time_not_ok((struct timeval *)info)) {
599 release_fd_lock(cu->cu_fd, mask);
600 return (FALSE);
602 cu->cu_total = *(struct timeval *)info;
603 break;
604 case CLGET_TIMEOUT:
605 *(struct timeval *)info = cu->cu_total;
606 break;
607 case CLGET_SERVER_ADDR: /* Give him the fd address */
608 /* Now obsolete. Only for backward compatibility */
609 memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen);
610 break;
611 case CLSET_RETRY_TIMEOUT:
612 if (time_not_ok((struct timeval *)info)) {
613 release_fd_lock(cu->cu_fd, mask);
614 return (FALSE);
616 cu->cu_wait = *(struct timeval *)info;
617 break;
618 case CLGET_RETRY_TIMEOUT:
619 *(struct timeval *)info = cu->cu_wait;
620 break;
621 case CLGET_FD:
622 *(int *)info = cu->cu_fd;
623 break;
624 case CLGET_SVC_ADDR:
625 addr = (struct netbuf *)info;
626 addr->buf = &cu->cu_raddr;
627 addr->len = cu->cu_rlen;
628 addr->maxlen = sizeof cu->cu_raddr;
629 break;
630 case CLSET_SVC_ADDR: /* set to new address */
631 addr = (struct netbuf *)info;
632 if (addr->len < sizeof cu->cu_raddr) {
633 release_fd_lock(cu->cu_fd, mask);
634 return (FALSE);
636 memcpy(&cu->cu_raddr, addr->buf, addr->len);
637 cu->cu_rlen = addr->len;
638 break;
639 case CLGET_XID:
641 * use the knowledge that xid is the
642 * first element in the call structure *.
643 * This will get the xid of the PREVIOUS call
645 *(u_int32_t *)info =
646 ntohl(*(u_int32_t *)(void *)cu->cu_outbuf);
647 break;
649 case CLSET_XID:
650 /* This will set the xid of the NEXT call */
651 *(u_int32_t *)(void *)cu->cu_outbuf =
652 htonl(*(u_int32_t *)info - 1);
653 /* decrement by 1 as clnt_dg_call() increments once */
654 break;
656 case CLGET_VERS:
658 * This RELIES on the information that, in the call body,
659 * the version number field is the fifth field from the
660 * begining of the RPC header. MUST be changed if the
661 * call_struct is changed
663 *(u_int32_t *)info =
664 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
665 4 * BYTES_PER_XDR_UNIT));
666 break;
668 case CLSET_VERS:
669 *(u_int32_t *)(void *)(cu->cu_outbuf + 4 * BYTES_PER_XDR_UNIT)
670 = htonl(*(u_int32_t *)info);
671 break;
673 case CLGET_PROG:
675 * This RELIES on the information that, in the call body,
676 * the program number field is the fourth field from the
677 * begining of the RPC header. MUST be changed if the
678 * call_struct is changed
680 *(u_int32_t *)info =
681 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
682 3 * BYTES_PER_XDR_UNIT));
683 break;
685 case CLSET_PROG:
686 *(u_int32_t *)(void *)(cu->cu_outbuf + 3 * BYTES_PER_XDR_UNIT)
687 = htonl(*(u_int32_t *)info);
688 break;
689 case CLSET_ASYNC:
690 cu->cu_async = *(int *)info;
691 break;
692 case CLSET_CONNECT:
693 cu->cu_connect = *(int *)info;
694 break;
695 default:
696 release_fd_lock(cu->cu_fd, mask);
697 return (FALSE);
699 release_fd_lock(cu->cu_fd, mask);
700 return (TRUE);
703 static void
704 clnt_dg_destroy(CLIENT *cl)
706 struct cu_data *cu = (struct cu_data *)cl->cl_private;
707 int cu_fd = cu->cu_fd;
708 sigset_t mask;
709 sigset_t newmask;
711 sigfillset(&newmask);
712 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
713 mutex_lock(&clnt_fd_lock);
714 while (dg_fd_locks[cu_fd])
715 cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
716 if (cu->cu_closeit)
717 _close(cu_fd);
718 if (cu->cu_kq >= 0)
719 _close(cu->cu_kq);
720 XDR_DESTROY(&(cu->cu_outxdrs));
721 mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
722 if (cl->cl_netid && cl->cl_netid[0])
723 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
724 if (cl->cl_tp && cl->cl_tp[0])
725 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
726 mem_free(cl, sizeof (CLIENT));
727 mutex_unlock(&clnt_fd_lock);
728 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
729 cond_signal(&dg_cv[cu_fd]);
732 static struct clnt_ops *
733 clnt_dg_ops(void)
735 static struct clnt_ops ops;
736 sigset_t mask;
737 sigset_t newmask;
739 /* VARIABLES PROTECTED BY ops_lock: ops */
741 sigfillset(&newmask);
742 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
743 mutex_lock(&ops_lock);
744 if (ops.cl_call == NULL) {
745 ops.cl_call = clnt_dg_call;
746 ops.cl_abort = clnt_dg_abort;
747 ops.cl_geterr = clnt_dg_geterr;
748 ops.cl_freeres = clnt_dg_freeres;
749 ops.cl_destroy = clnt_dg_destroy;
750 ops.cl_control = clnt_dg_control;
752 mutex_unlock(&ops_lock);
753 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
754 return (&ops);
758 * Make sure that the time is not garbage. -1 value is allowed.
760 static bool_t
761 time_not_ok(struct timeval *t)
763 return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
764 t->tv_usec < -1 || t->tv_usec > 1000000);