2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/net.h>
27 #include <linux/inet.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/unistd.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/file.h>
36 #include <net/checksum.h>
38 #include <net/tcp_states.h>
39 #include <asm/uaccess.h>
40 #include <asm/ioctls.h>
42 #include <linux/sunrpc/types.h>
43 #include <linux/sunrpc/xdr.h>
44 #include <linux/sunrpc/svcsock.h>
45 #include <linux/sunrpc/stats.h>
47 /* SMP locking strategy:
49 * svc_pool->sp_lock protects most of the fields of that pool.
50 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
51 * when both need to be taken (rare), svc_serv->sv_lock is first.
52 * BKL protects svc_serv->sv_nrthread.
53 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
54 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
56 * Some flags can be set to certain values at any time
57 * providing that certain rules are followed:
59 * SK_CONN, SK_DATA, can be set or cleared at any time.
60 * after a set, svc_sock_enqueue must be called.
61 * after a clear, the socket must be read/accepted
62 * if this succeeds, it must be set again.
63 * SK_CLOSE can set at any time. It is never cleared.
67 #define RPCDBG_FACILITY RPCDBG_SVCSOCK
70 static struct svc_sock
*svc_setup_socket(struct svc_serv
*, struct socket
*,
71 int *errp
, int pmap_reg
);
72 static void svc_udp_data_ready(struct sock
*, int);
73 static int svc_udp_recvfrom(struct svc_rqst
*);
74 static int svc_udp_sendto(struct svc_rqst
*);
76 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
);
77 static int svc_deferred_recv(struct svc_rqst
*rqstp
);
78 static struct cache_deferred_req
*svc_defer(struct cache_req
*req
);
80 /* apparently the "standard" is that clients close
81 * idle connections after 5 minutes, servers after
83 * http://www.connectathon.org/talks96/nfstcp.pdf
85 static int svc_conn_age_period
= 6*60;
88 * Queue up an idle server thread. Must have pool->sp_lock held.
89 * Note: this is really a stack rather than a queue, so that we only
90 * use as many different threads as we need, and the rest don't pollute
94 svc_thread_enqueue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
96 list_add(&rqstp
->rq_list
, &pool
->sp_threads
);
100 * Dequeue an nfsd thread. Must have pool->sp_lock held.
103 svc_thread_dequeue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
105 list_del(&rqstp
->rq_list
);
109 * Release an skbuff after use
112 svc_release_skb(struct svc_rqst
*rqstp
)
114 struct sk_buff
*skb
= rqstp
->rq_skbuff
;
115 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
118 rqstp
->rq_skbuff
= NULL
;
120 dprintk("svc: service %p, releasing skb %p\n", rqstp
, skb
);
121 skb_free_datagram(rqstp
->rq_sock
->sk_sk
, skb
);
124 rqstp
->rq_deferred
= NULL
;
130 * Any space to write?
132 static inline unsigned long
133 svc_sock_wspace(struct svc_sock
*svsk
)
137 if (svsk
->sk_sock
->type
== SOCK_STREAM
)
138 wspace
= sk_stream_wspace(svsk
->sk_sk
);
140 wspace
= sock_wspace(svsk
->sk_sk
);
146 * Queue up a socket with data pending. If there are idle nfsd
147 * processes, wake 'em up.
151 svc_sock_enqueue(struct svc_sock
*svsk
)
153 struct svc_serv
*serv
= svsk
->sk_server
;
154 struct svc_pool
*pool
= &serv
->sv_pools
[0];
155 struct svc_rqst
*rqstp
;
157 if (!(svsk
->sk_flags
&
158 ( (1<<SK_CONN
)|(1<<SK_DATA
)|(1<<SK_CLOSE
)|(1<<SK_DEFERRED
)) ))
160 if (test_bit(SK_DEAD
, &svsk
->sk_flags
))
163 spin_lock_bh(&pool
->sp_lock
);
165 if (!list_empty(&pool
->sp_threads
) &&
166 !list_empty(&pool
->sp_sockets
))
168 "svc_sock_enqueue: threads and sockets both waiting??\n");
170 if (test_bit(SK_DEAD
, &svsk
->sk_flags
)) {
171 /* Don't enqueue dead sockets */
172 dprintk("svc: socket %p is dead, not enqueued\n", svsk
->sk_sk
);
176 /* Mark socket as busy. It will remain in this state until the
177 * server has processed all pending data and put the socket back
178 * on the idle list. We update SK_BUSY atomically because
179 * it also guards against trying to enqueue the svc_sock twice.
181 if (test_and_set_bit(SK_BUSY
, &svsk
->sk_flags
)) {
182 /* Don't enqueue socket while already enqueued */
183 dprintk("svc: socket %p busy, not enqueued\n", svsk
->sk_sk
);
186 BUG_ON(svsk
->sk_pool
!= NULL
);
187 svsk
->sk_pool
= pool
;
189 set_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
190 if (((atomic_read(&svsk
->sk_reserved
) + serv
->sv_bufsz
)*2
191 > svc_sock_wspace(svsk
))
192 && !test_bit(SK_CLOSE
, &svsk
->sk_flags
)
193 && !test_bit(SK_CONN
, &svsk
->sk_flags
)) {
194 /* Don't enqueue while not enough space for reply */
195 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
196 svsk
->sk_sk
, atomic_read(&svsk
->sk_reserved
)+serv
->sv_bufsz
,
197 svc_sock_wspace(svsk
));
198 svsk
->sk_pool
= NULL
;
199 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
202 clear_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
205 if (!list_empty(&pool
->sp_threads
)) {
206 rqstp
= list_entry(pool
->sp_threads
.next
,
209 dprintk("svc: socket %p served by daemon %p\n",
211 svc_thread_dequeue(pool
, rqstp
);
214 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
215 rqstp
, rqstp
->rq_sock
);
216 rqstp
->rq_sock
= svsk
;
217 atomic_inc(&svsk
->sk_inuse
);
218 rqstp
->rq_reserved
= serv
->sv_bufsz
;
219 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
220 BUG_ON(svsk
->sk_pool
!= pool
);
221 wake_up(&rqstp
->rq_wait
);
223 dprintk("svc: socket %p put into queue\n", svsk
->sk_sk
);
224 list_add_tail(&svsk
->sk_ready
, &pool
->sp_sockets
);
225 BUG_ON(svsk
->sk_pool
!= pool
);
229 spin_unlock_bh(&pool
->sp_lock
);
233 * Dequeue the first socket. Must be called with the pool->sp_lock held.
235 static inline struct svc_sock
*
236 svc_sock_dequeue(struct svc_pool
*pool
)
238 struct svc_sock
*svsk
;
240 if (list_empty(&pool
->sp_sockets
))
243 svsk
= list_entry(pool
->sp_sockets
.next
,
244 struct svc_sock
, sk_ready
);
245 list_del_init(&svsk
->sk_ready
);
247 dprintk("svc: socket %p dequeued, inuse=%d\n",
248 svsk
->sk_sk
, atomic_read(&svsk
->sk_inuse
));
254 * Having read something from a socket, check whether it
255 * needs to be re-enqueued.
256 * Note: SK_DATA only gets cleared when a read-attempt finds
257 * no (or insufficient) data.
260 svc_sock_received(struct svc_sock
*svsk
)
262 svsk
->sk_pool
= NULL
;
263 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
264 svc_sock_enqueue(svsk
);
269 * svc_reserve - change the space reserved for the reply to a request.
270 * @rqstp: The request in question
271 * @space: new max space to reserve
273 * Each request reserves some space on the output queue of the socket
274 * to make sure the reply fits. This function reduces that reserved
275 * space to be the amount of space used already, plus @space.
278 void svc_reserve(struct svc_rqst
*rqstp
, int space
)
280 space
+= rqstp
->rq_res
.head
[0].iov_len
;
282 if (space
< rqstp
->rq_reserved
) {
283 struct svc_sock
*svsk
= rqstp
->rq_sock
;
284 atomic_sub((rqstp
->rq_reserved
- space
), &svsk
->sk_reserved
);
285 rqstp
->rq_reserved
= space
;
287 svc_sock_enqueue(svsk
);
292 * Release a socket after use.
295 svc_sock_put(struct svc_sock
*svsk
)
297 if (atomic_dec_and_test(&svsk
->sk_inuse
) && test_bit(SK_DEAD
, &svsk
->sk_flags
)) {
298 dprintk("svc: releasing dead socket\n");
299 sock_release(svsk
->sk_sock
);
305 svc_sock_release(struct svc_rqst
*rqstp
)
307 struct svc_sock
*svsk
= rqstp
->rq_sock
;
309 svc_release_skb(rqstp
);
311 svc_free_allpages(rqstp
);
312 rqstp
->rq_res
.page_len
= 0;
313 rqstp
->rq_res
.page_base
= 0;
316 /* Reset response buffer and release
318 * But first, check that enough space was reserved
319 * for the reply, otherwise we have a bug!
321 if ((rqstp
->rq_res
.len
) > rqstp
->rq_reserved
)
322 printk(KERN_ERR
"RPC request reserved %d but used %d\n",
326 rqstp
->rq_res
.head
[0].iov_len
= 0;
327 svc_reserve(rqstp
, 0);
328 rqstp
->rq_sock
= NULL
;
334 * External function to wake up a server waiting for data
335 * This really only makes sense for services like lockd
336 * which have exactly one thread anyway.
339 svc_wake_up(struct svc_serv
*serv
)
341 struct svc_rqst
*rqstp
;
343 struct svc_pool
*pool
;
345 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
346 pool
= &serv
->sv_pools
[i
];
348 spin_lock_bh(&pool
->sp_lock
);
349 if (!list_empty(&pool
->sp_threads
)) {
350 rqstp
= list_entry(pool
->sp_threads
.next
,
353 dprintk("svc: daemon %p woken up.\n", rqstp
);
355 svc_thread_dequeue(pool, rqstp);
356 rqstp->rq_sock = NULL;
358 wake_up(&rqstp
->rq_wait
);
360 spin_unlock_bh(&pool
->sp_lock
);
365 * Generic sendto routine
368 svc_sendto(struct svc_rqst
*rqstp
, struct xdr_buf
*xdr
)
370 struct svc_sock
*svsk
= rqstp
->rq_sock
;
371 struct socket
*sock
= svsk
->sk_sock
;
373 char buffer
[CMSG_SPACE(sizeof(struct in_pktinfo
))];
374 struct cmsghdr
*cmh
= (struct cmsghdr
*)buffer
;
375 struct in_pktinfo
*pki
= (struct in_pktinfo
*)CMSG_DATA(cmh
);
379 struct page
**ppage
= xdr
->pages
;
380 size_t base
= xdr
->page_base
;
381 unsigned int pglen
= xdr
->page_len
;
382 unsigned int flags
= MSG_MORE
;
386 if (rqstp
->rq_prot
== IPPROTO_UDP
) {
387 /* set the source and destination */
389 msg
.msg_name
= &rqstp
->rq_addr
;
390 msg
.msg_namelen
= sizeof(rqstp
->rq_addr
);
393 msg
.msg_flags
= MSG_MORE
;
395 msg
.msg_control
= cmh
;
396 msg
.msg_controllen
= sizeof(buffer
);
397 cmh
->cmsg_len
= CMSG_LEN(sizeof(*pki
));
398 cmh
->cmsg_level
= SOL_IP
;
399 cmh
->cmsg_type
= IP_PKTINFO
;
400 pki
->ipi_ifindex
= 0;
401 pki
->ipi_spec_dst
.s_addr
= rqstp
->rq_daddr
;
403 if (sock_sendmsg(sock
, &msg
, 0) < 0)
408 if (slen
== xdr
->head
[0].iov_len
)
410 len
= kernel_sendpage(sock
, rqstp
->rq_respages
[0], 0, xdr
->head
[0].iov_len
, flags
);
411 if (len
!= xdr
->head
[0].iov_len
)
413 slen
-= xdr
->head
[0].iov_len
;
418 size
= PAGE_SIZE
- base
< pglen
? PAGE_SIZE
- base
: pglen
;
422 result
= kernel_sendpage(sock
, *ppage
, base
, size
, flags
);
429 size
= PAGE_SIZE
< pglen
? PAGE_SIZE
: pglen
;
434 if (xdr
->tail
[0].iov_len
) {
435 result
= kernel_sendpage(sock
, rqstp
->rq_respages
[rqstp
->rq_restailpage
],
436 ((unsigned long)xdr
->tail
[0].iov_base
)& (PAGE_SIZE
-1),
437 xdr
->tail
[0].iov_len
, 0);
443 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
444 rqstp
->rq_sock
, xdr
->head
[0].iov_base
, xdr
->head
[0].iov_len
, xdr
->len
, len
,
445 rqstp
->rq_addr
.sin_addr
.s_addr
);
451 * Report socket names for nfsdfs
453 static int one_sock_name(char *buf
, struct svc_sock
*svsk
)
457 switch(svsk
->sk_sk
->sk_family
) {
459 len
= sprintf(buf
, "ipv4 %s %u.%u.%u.%u %d\n",
460 svsk
->sk_sk
->sk_protocol
==IPPROTO_UDP
?
462 NIPQUAD(inet_sk(svsk
->sk_sk
)->rcv_saddr
),
463 inet_sk(svsk
->sk_sk
)->num
);
466 len
= sprintf(buf
, "*unknown-%d*\n",
467 svsk
->sk_sk
->sk_family
);
473 svc_sock_names(char *buf
, struct svc_serv
*serv
, char *toclose
)
475 struct svc_sock
*svsk
, *closesk
= NULL
;
480 spin_lock(&serv
->sv_lock
);
481 list_for_each_entry(svsk
, &serv
->sv_permsocks
, sk_list
) {
482 int onelen
= one_sock_name(buf
+len
, svsk
);
483 if (toclose
&& strcmp(toclose
, buf
+len
) == 0)
488 spin_unlock(&serv
->sv_lock
);
490 svc_delete_socket(closesk
);
493 EXPORT_SYMBOL(svc_sock_names
);
496 * Check input queue length
499 svc_recv_available(struct svc_sock
*svsk
)
501 struct socket
*sock
= svsk
->sk_sock
;
504 err
= kernel_sock_ioctl(sock
, TIOCINQ
, (unsigned long) &avail
);
506 return (err
>= 0)? avail
: err
;
510 * Generic recvfrom routine.
513 svc_recvfrom(struct svc_rqst
*rqstp
, struct kvec
*iov
, int nr
, int buflen
)
519 rqstp
->rq_addrlen
= sizeof(rqstp
->rq_addr
);
520 sock
= rqstp
->rq_sock
->sk_sock
;
522 msg
.msg_name
= &rqstp
->rq_addr
;
523 msg
.msg_namelen
= sizeof(rqstp
->rq_addr
);
524 msg
.msg_control
= NULL
;
525 msg
.msg_controllen
= 0;
527 msg
.msg_flags
= MSG_DONTWAIT
;
529 len
= kernel_recvmsg(sock
, &msg
, iov
, nr
, buflen
, MSG_DONTWAIT
);
531 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
532 * possibly we should cache this in the svc_sock structure
533 * at accept time. FIXME
535 alen
= sizeof(rqstp
->rq_addr
);
536 kernel_getpeername(sock
, (struct sockaddr
*)&rqstp
->rq_addr
, &alen
);
538 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
539 rqstp
->rq_sock
, iov
[0].iov_base
, iov
[0].iov_len
, len
);
545 * Set socket snd and rcv buffer lengths
548 svc_sock_setbufsize(struct socket
*sock
, unsigned int snd
, unsigned int rcv
)
552 oldfs
= get_fs(); set_fs(KERNEL_DS
);
553 sock_setsockopt(sock
, SOL_SOCKET
, SO_SNDBUF
,
554 (char*)&snd
, sizeof(snd
));
555 sock_setsockopt(sock
, SOL_SOCKET
, SO_RCVBUF
,
556 (char*)&rcv
, sizeof(rcv
));
558 /* sock_setsockopt limits use to sysctl_?mem_max,
559 * which isn't acceptable. Until that is made conditional
560 * on not having CAP_SYS_RESOURCE or similar, we go direct...
561 * DaveM said I could!
564 sock
->sk
->sk_sndbuf
= snd
* 2;
565 sock
->sk
->sk_rcvbuf
= rcv
* 2;
566 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
|SOCK_RCVBUF_LOCK
;
567 release_sock(sock
->sk
);
571 * INET callback when data has been received on the socket.
574 svc_udp_data_ready(struct sock
*sk
, int count
)
576 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
579 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
580 svsk
, sk
, count
, test_bit(SK_BUSY
, &svsk
->sk_flags
));
581 set_bit(SK_DATA
, &svsk
->sk_flags
);
582 svc_sock_enqueue(svsk
);
584 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
585 wake_up_interruptible(sk
->sk_sleep
);
589 * INET callback when space is newly available on the socket.
592 svc_write_space(struct sock
*sk
)
594 struct svc_sock
*svsk
= (struct svc_sock
*)(sk
->sk_user_data
);
597 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
598 svsk
, sk
, test_bit(SK_BUSY
, &svsk
->sk_flags
));
599 svc_sock_enqueue(svsk
);
602 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
)) {
603 dprintk("RPC svc_write_space: someone sleeping on %p\n",
605 wake_up_interruptible(sk
->sk_sleep
);
610 * Receive a datagram from a UDP socket.
613 svc_udp_recvfrom(struct svc_rqst
*rqstp
)
615 struct svc_sock
*svsk
= rqstp
->rq_sock
;
616 struct svc_serv
*serv
= svsk
->sk_server
;
620 if (test_and_clear_bit(SK_CHNGBUF
, &svsk
->sk_flags
))
621 /* udp sockets need large rcvbuf as all pending
622 * requests are still in that buffer. sndbuf must
623 * also be large enough that there is enough space
624 * for one reply per thread. We count all threads
625 * rather than threads in a particular pool, which
626 * provides an upper bound on the number of threads
627 * which will access the socket.
629 svc_sock_setbufsize(svsk
->sk_sock
,
630 (serv
->sv_nrthreads
+3) * serv
->sv_bufsz
,
631 (serv
->sv_nrthreads
+3) * serv
->sv_bufsz
);
633 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
634 svc_sock_received(svsk
);
635 return svc_deferred_recv(rqstp
);
638 clear_bit(SK_DATA
, &svsk
->sk_flags
);
639 while ((skb
= skb_recv_datagram(svsk
->sk_sk
, 0, 1, &err
)) == NULL
) {
640 if (err
== -EAGAIN
) {
641 svc_sock_received(svsk
);
644 /* possibly an icmp error */
645 dprintk("svc: recvfrom returned error %d\n", -err
);
647 if (skb
->tstamp
.off_sec
== 0) {
650 tv
.tv_sec
= xtime
.tv_sec
;
651 tv
.tv_usec
= xtime
.tv_nsec
/ NSEC_PER_USEC
;
652 skb_set_timestamp(skb
, &tv
);
653 /* Don't enable netstamp, sunrpc doesn't
654 need that much accuracy */
656 skb_get_timestamp(skb
, &svsk
->sk_sk
->sk_stamp
);
657 set_bit(SK_DATA
, &svsk
->sk_flags
); /* there may be more data... */
660 * Maybe more packets - kick another thread ASAP.
662 svc_sock_received(svsk
);
664 len
= skb
->len
- sizeof(struct udphdr
);
665 rqstp
->rq_arg
.len
= len
;
667 rqstp
->rq_prot
= IPPROTO_UDP
;
669 /* Get sender address */
670 rqstp
->rq_addr
.sin_family
= AF_INET
;
671 rqstp
->rq_addr
.sin_port
= skb
->h
.uh
->source
;
672 rqstp
->rq_addr
.sin_addr
.s_addr
= skb
->nh
.iph
->saddr
;
673 rqstp
->rq_daddr
= skb
->nh
.iph
->daddr
;
675 if (skb_is_nonlinear(skb
)) {
676 /* we have to copy */
678 if (csum_partial_copy_to_xdr(&rqstp
->rq_arg
, skb
)) {
681 skb_free_datagram(svsk
->sk_sk
, skb
);
685 skb_free_datagram(svsk
->sk_sk
, skb
);
687 /* we can use it in-place */
688 rqstp
->rq_arg
.head
[0].iov_base
= skb
->data
+ sizeof(struct udphdr
);
689 rqstp
->rq_arg
.head
[0].iov_len
= len
;
690 if (skb_checksum_complete(skb
)) {
691 skb_free_datagram(svsk
->sk_sk
, skb
);
694 rqstp
->rq_skbuff
= skb
;
697 rqstp
->rq_arg
.page_base
= 0;
698 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
699 rqstp
->rq_arg
.head
[0].iov_len
= len
;
700 rqstp
->rq_arg
.page_len
= 0;
702 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
703 rqstp
->rq_argused
+= (rqstp
->rq_arg
.page_len
+ PAGE_SIZE
- 1)/ PAGE_SIZE
;
707 serv
->sv_stats
->netudpcnt
++;
713 svc_udp_sendto(struct svc_rqst
*rqstp
)
717 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
718 if (error
== -ECONNREFUSED
)
719 /* ICMP error on earlier request. */
720 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
726 svc_udp_init(struct svc_sock
*svsk
)
728 svsk
->sk_sk
->sk_data_ready
= svc_udp_data_ready
;
729 svsk
->sk_sk
->sk_write_space
= svc_write_space
;
730 svsk
->sk_recvfrom
= svc_udp_recvfrom
;
731 svsk
->sk_sendto
= svc_udp_sendto
;
733 /* initialise setting must have enough space to
734 * receive and respond to one request.
735 * svc_udp_recvfrom will re-adjust if necessary
737 svc_sock_setbufsize(svsk
->sk_sock
,
738 3 * svsk
->sk_server
->sv_bufsz
,
739 3 * svsk
->sk_server
->sv_bufsz
);
741 set_bit(SK_DATA
, &svsk
->sk_flags
); /* might have come in before data_ready set up */
742 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
746 * A data_ready event on a listening socket means there's a connection
747 * pending. Do not use state_change as a substitute for it.
750 svc_tcp_listen_data_ready(struct sock
*sk
, int count_unused
)
752 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
754 dprintk("svc: socket %p TCP (listen) state change %d\n",
758 * This callback may called twice when a new connection
759 * is established as a child socket inherits everything
760 * from a parent LISTEN socket.
761 * 1) data_ready method of the parent socket will be called
762 * when one of child sockets become ESTABLISHED.
763 * 2) data_ready method of the child socket may be called
764 * when it receives data before the socket is accepted.
765 * In case of 2, we should ignore it silently.
767 if (sk
->sk_state
== TCP_LISTEN
) {
769 set_bit(SK_CONN
, &svsk
->sk_flags
);
770 svc_sock_enqueue(svsk
);
772 printk("svc: socket %p: no user data\n", sk
);
775 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
776 wake_up_interruptible_all(sk
->sk_sleep
);
780 * A state change on a connected socket means it's dying or dead.
783 svc_tcp_state_change(struct sock
*sk
)
785 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
787 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
788 sk
, sk
->sk_state
, sk
->sk_user_data
);
791 printk("svc: socket %p: no user data\n", sk
);
793 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
794 svc_sock_enqueue(svsk
);
796 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
797 wake_up_interruptible_all(sk
->sk_sleep
);
801 svc_tcp_data_ready(struct sock
*sk
, int count
)
803 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
805 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
806 sk
, sk
->sk_user_data
);
808 set_bit(SK_DATA
, &svsk
->sk_flags
);
809 svc_sock_enqueue(svsk
);
811 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
812 wake_up_interruptible(sk
->sk_sleep
);
816 * Accept a TCP connection
819 svc_tcp_accept(struct svc_sock
*svsk
)
821 struct sockaddr_in sin
;
822 struct svc_serv
*serv
= svsk
->sk_server
;
823 struct socket
*sock
= svsk
->sk_sock
;
824 struct socket
*newsock
;
825 struct svc_sock
*newsvsk
;
828 dprintk("svc: tcp_accept %p sock %p\n", svsk
, sock
);
832 clear_bit(SK_CONN
, &svsk
->sk_flags
);
833 err
= kernel_accept(sock
, &newsock
, O_NONBLOCK
);
836 printk(KERN_WARNING
"%s: no more sockets!\n",
838 else if (err
!= -EAGAIN
&& net_ratelimit())
839 printk(KERN_WARNING
"%s: accept failed (err %d)!\n",
840 serv
->sv_name
, -err
);
844 set_bit(SK_CONN
, &svsk
->sk_flags
);
845 svc_sock_enqueue(svsk
);
848 err
= kernel_getpeername(newsock
, (struct sockaddr
*) &sin
, &slen
);
851 printk(KERN_WARNING
"%s: peername failed (err %d)!\n",
852 serv
->sv_name
, -err
);
853 goto failed
; /* aborted connection or whatever */
856 /* Ideally, we would want to reject connections from unauthorized
857 * hosts here, but when we get encription, the IP of the host won't
858 * tell us anything. For now just warn about unpriv connections.
860 if (ntohs(sin
.sin_port
) >= 1024) {
862 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
864 NIPQUAD(sin
.sin_addr
.s_addr
), ntohs(sin
.sin_port
));
867 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv
->sv_name
,
868 NIPQUAD(sin
.sin_addr
.s_addr
), ntohs(sin
.sin_port
));
870 /* make sure that a write doesn't block forever when
873 newsock
->sk
->sk_sndtimeo
= HZ
*30;
875 if (!(newsvsk
= svc_setup_socket(serv
, newsock
, &err
, 0)))
879 /* make sure that we don't have too many active connections.
880 * If we have, something must be dropped.
882 * There's no point in trying to do random drop here for
883 * DoS prevention. The NFS clients does 1 reconnect in 15
884 * seconds. An attacker can easily beat that.
886 * The only somewhat efficient mechanism would be if drop
887 * old connections from the same IP first. But right now
888 * we don't even record the client IP in svc_sock.
890 if (serv
->sv_tmpcnt
> (serv
->sv_nrthreads
+3)*20) {
891 struct svc_sock
*svsk
= NULL
;
892 spin_lock_bh(&serv
->sv_lock
);
893 if (!list_empty(&serv
->sv_tempsocks
)) {
894 if (net_ratelimit()) {
895 /* Try to help the admin */
896 printk(KERN_NOTICE
"%s: too many open TCP "
897 "sockets, consider increasing the "
898 "number of nfsd threads\n",
900 printk(KERN_NOTICE
"%s: last TCP connect from "
903 NIPQUAD(sin
.sin_addr
.s_addr
),
904 ntohs(sin
.sin_port
));
907 * Always select the oldest socket. It's not fair,
910 svsk
= list_entry(serv
->sv_tempsocks
.prev
,
913 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
914 atomic_inc(&svsk
->sk_inuse
);
916 spin_unlock_bh(&serv
->sv_lock
);
919 svc_sock_enqueue(svsk
);
926 serv
->sv_stats
->nettcpconn
++;
931 sock_release(newsock
);
936 * Receive data from a TCP socket.
939 svc_tcp_recvfrom(struct svc_rqst
*rqstp
)
941 struct svc_sock
*svsk
= rqstp
->rq_sock
;
942 struct svc_serv
*serv
= svsk
->sk_server
;
944 struct kvec vec
[RPCSVC_MAXPAGES
];
947 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
948 svsk
, test_bit(SK_DATA
, &svsk
->sk_flags
),
949 test_bit(SK_CONN
, &svsk
->sk_flags
),
950 test_bit(SK_CLOSE
, &svsk
->sk_flags
));
952 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
953 svc_sock_received(svsk
);
954 return svc_deferred_recv(rqstp
);
957 if (test_bit(SK_CLOSE
, &svsk
->sk_flags
)) {
958 svc_delete_socket(svsk
);
962 if (test_bit(SK_CONN
, &svsk
->sk_flags
)) {
963 svc_tcp_accept(svsk
);
964 svc_sock_received(svsk
);
968 if (test_and_clear_bit(SK_CHNGBUF
, &svsk
->sk_flags
))
969 /* sndbuf needs to have room for one request
970 * per thread, otherwise we can stall even when the
971 * network isn't a bottleneck.
973 * We count all threads rather than threads in a
974 * particular pool, which provides an upper bound
975 * on the number of threads which will access the socket.
977 * rcvbuf just needs to be able to hold a few requests.
978 * Normally they will be removed from the queue
979 * as soon a a complete request arrives.
981 svc_sock_setbufsize(svsk
->sk_sock
,
982 (serv
->sv_nrthreads
+3) * serv
->sv_bufsz
,
985 clear_bit(SK_DATA
, &svsk
->sk_flags
);
987 /* Receive data. If we haven't got the record length yet, get
988 * the next four bytes. Otherwise try to gobble up as much as
989 * possible up to the complete record length.
991 if (svsk
->sk_tcplen
< 4) {
992 unsigned long want
= 4 - svsk
->sk_tcplen
;
995 iov
.iov_base
= ((char *) &svsk
->sk_reclen
) + svsk
->sk_tcplen
;
997 if ((len
= svc_recvfrom(rqstp
, &iov
, 1, want
)) < 0)
999 svsk
->sk_tcplen
+= len
;
1002 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1004 svc_sock_received(svsk
);
1005 return -EAGAIN
; /* record header not complete */
1008 svsk
->sk_reclen
= ntohl(svsk
->sk_reclen
);
1009 if (!(svsk
->sk_reclen
& 0x80000000)) {
1010 /* FIXME: technically, a record can be fragmented,
1011 * and non-terminal fragments will not have the top
1012 * bit set in the fragment length header.
1013 * But apparently no known nfs clients send fragmented
1015 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx (non-terminal)\n",
1016 (unsigned long) svsk
->sk_reclen
);
1019 svsk
->sk_reclen
&= 0x7fffffff;
1020 dprintk("svc: TCP record, %d bytes\n", svsk
->sk_reclen
);
1021 if (svsk
->sk_reclen
> serv
->sv_bufsz
) {
1022 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx (large)\n",
1023 (unsigned long) svsk
->sk_reclen
);
1028 /* Check whether enough data is available */
1029 len
= svc_recv_available(svsk
);
1033 if (len
< svsk
->sk_reclen
) {
1034 dprintk("svc: incomplete TCP record (%d of %d)\n",
1035 len
, svsk
->sk_reclen
);
1036 svc_sock_received(svsk
);
1037 return -EAGAIN
; /* record not complete */
1039 len
= svsk
->sk_reclen
;
1040 set_bit(SK_DATA
, &svsk
->sk_flags
);
1042 vec
[0] = rqstp
->rq_arg
.head
[0];
1045 while (vlen
< len
) {
1046 vec
[pnum
].iov_base
= page_address(rqstp
->rq_argpages
[rqstp
->rq_argused
++]);
1047 vec
[pnum
].iov_len
= PAGE_SIZE
;
1052 /* Now receive data */
1053 len
= svc_recvfrom(rqstp
, vec
, pnum
, len
);
1057 dprintk("svc: TCP complete record (%d bytes)\n", len
);
1058 rqstp
->rq_arg
.len
= len
;
1059 rqstp
->rq_arg
.page_base
= 0;
1060 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
1061 rqstp
->rq_arg
.head
[0].iov_len
= len
;
1062 rqstp
->rq_arg
.page_len
= 0;
1064 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
1067 rqstp
->rq_skbuff
= NULL
;
1068 rqstp
->rq_prot
= IPPROTO_TCP
;
1070 /* Reset TCP read info */
1071 svsk
->sk_reclen
= 0;
1072 svsk
->sk_tcplen
= 0;
1074 svc_sock_received(svsk
);
1076 serv
->sv_stats
->nettcpcnt
++;
1081 svc_delete_socket(svsk
);
1085 if (len
== -EAGAIN
) {
1086 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1087 svc_sock_received(svsk
);
1089 printk(KERN_NOTICE
"%s: recvfrom returned errno %d\n",
1090 svsk
->sk_server
->sv_name
, -len
);
1098 * Send out data on TCP socket.
1101 svc_tcp_sendto(struct svc_rqst
*rqstp
)
1103 struct xdr_buf
*xbufp
= &rqstp
->rq_res
;
1107 /* Set up the first element of the reply kvec.
1108 * Any other kvecs that may be in use have been taken
1109 * care of by the server implementation itself.
1111 reclen
= htonl(0x80000000|((xbufp
->len
) - 4));
1112 memcpy(xbufp
->head
[0].iov_base
, &reclen
, 4);
1114 if (test_bit(SK_DEAD
, &rqstp
->rq_sock
->sk_flags
))
1117 sent
= svc_sendto(rqstp
, &rqstp
->rq_res
);
1118 if (sent
!= xbufp
->len
) {
1119 printk(KERN_NOTICE
"rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1120 rqstp
->rq_sock
->sk_server
->sv_name
,
1121 (sent
<0)?"got error":"sent only",
1123 svc_delete_socket(rqstp
->rq_sock
);
1130 svc_tcp_init(struct svc_sock
*svsk
)
1132 struct sock
*sk
= svsk
->sk_sk
;
1133 struct tcp_sock
*tp
= tcp_sk(sk
);
1135 svsk
->sk_recvfrom
= svc_tcp_recvfrom
;
1136 svsk
->sk_sendto
= svc_tcp_sendto
;
1138 if (sk
->sk_state
== TCP_LISTEN
) {
1139 dprintk("setting up TCP socket for listening\n");
1140 sk
->sk_data_ready
= svc_tcp_listen_data_ready
;
1141 set_bit(SK_CONN
, &svsk
->sk_flags
);
1143 dprintk("setting up TCP socket for reading\n");
1144 sk
->sk_state_change
= svc_tcp_state_change
;
1145 sk
->sk_data_ready
= svc_tcp_data_ready
;
1146 sk
->sk_write_space
= svc_write_space
;
1148 svsk
->sk_reclen
= 0;
1149 svsk
->sk_tcplen
= 0;
1151 tp
->nonagle
= 1; /* disable Nagle's algorithm */
1153 /* initialise setting must have enough space to
1154 * receive and respond to one request.
1155 * svc_tcp_recvfrom will re-adjust if necessary
1157 svc_sock_setbufsize(svsk
->sk_sock
,
1158 3 * svsk
->sk_server
->sv_bufsz
,
1159 3 * svsk
->sk_server
->sv_bufsz
);
1161 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1162 set_bit(SK_DATA
, &svsk
->sk_flags
);
1163 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1164 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1169 svc_sock_update_bufs(struct svc_serv
*serv
)
1172 * The number of server threads has changed. Update
1173 * rcvbuf and sndbuf accordingly on all sockets
1175 struct list_head
*le
;
1177 spin_lock_bh(&serv
->sv_lock
);
1178 list_for_each(le
, &serv
->sv_permsocks
) {
1179 struct svc_sock
*svsk
=
1180 list_entry(le
, struct svc_sock
, sk_list
);
1181 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1183 list_for_each(le
, &serv
->sv_tempsocks
) {
1184 struct svc_sock
*svsk
=
1185 list_entry(le
, struct svc_sock
, sk_list
);
1186 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1188 spin_unlock_bh(&serv
->sv_lock
);
1192 * Receive the next request on any socket. This code is carefully
1193 * organised not to touch any cachelines in the shared svc_serv
1194 * structure, only cachelines in the local svc_pool.
1197 svc_recv(struct svc_rqst
*rqstp
, long timeout
)
1199 struct svc_sock
*svsk
=NULL
;
1200 struct svc_serv
*serv
= rqstp
->rq_server
;
1201 struct svc_pool
*pool
= rqstp
->rq_pool
;
1204 struct xdr_buf
*arg
;
1205 DECLARE_WAITQUEUE(wait
, current
);
1207 dprintk("svc: server %p waiting for data (to = %ld)\n",
1212 "svc_recv: service %p, socket not NULL!\n",
1214 if (waitqueue_active(&rqstp
->rq_wait
))
1216 "svc_recv: service %p, wait queue active!\n",
1219 /* Initialize the buffers */
1220 /* first reclaim pages that were moved to response list */
1221 svc_pushback_allpages(rqstp
);
1223 /* now allocate needed pages. If we get a failure, sleep briefly */
1224 pages
= 2 + (serv
->sv_bufsz
+ PAGE_SIZE
-1) / PAGE_SIZE
;
1225 while (rqstp
->rq_arghi
< pages
) {
1226 struct page
*p
= alloc_page(GFP_KERNEL
);
1228 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1231 rqstp
->rq_argpages
[rqstp
->rq_arghi
++] = p
;
1234 /* Make arg->head point to first page and arg->pages point to rest */
1235 arg
= &rqstp
->rq_arg
;
1236 arg
->head
[0].iov_base
= page_address(rqstp
->rq_argpages
[0]);
1237 arg
->head
[0].iov_len
= PAGE_SIZE
;
1238 rqstp
->rq_argused
= 1;
1239 arg
->pages
= rqstp
->rq_argpages
+ 1;
1241 /* save at least one page for response */
1242 arg
->page_len
= (pages
-2)*PAGE_SIZE
;
1243 arg
->len
= (pages
-1)*PAGE_SIZE
;
1244 arg
->tail
[0].iov_len
= 0;
1251 spin_lock_bh(&pool
->sp_lock
);
1252 if ((svsk
= svc_sock_dequeue(pool
)) != NULL
) {
1253 rqstp
->rq_sock
= svsk
;
1254 atomic_inc(&svsk
->sk_inuse
);
1255 rqstp
->rq_reserved
= serv
->sv_bufsz
;
1256 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
1258 /* No data pending. Go to sleep */
1259 svc_thread_enqueue(pool
, rqstp
);
1262 * We have to be able to interrupt this wait
1263 * to bring down the daemons ...
1265 set_current_state(TASK_INTERRUPTIBLE
);
1266 add_wait_queue(&rqstp
->rq_wait
, &wait
);
1267 spin_unlock_bh(&pool
->sp_lock
);
1269 schedule_timeout(timeout
);
1273 spin_lock_bh(&pool
->sp_lock
);
1274 remove_wait_queue(&rqstp
->rq_wait
, &wait
);
1276 if (!(svsk
= rqstp
->rq_sock
)) {
1277 svc_thread_dequeue(pool
, rqstp
);
1278 spin_unlock_bh(&pool
->sp_lock
);
1279 dprintk("svc: server %p, no data yet\n", rqstp
);
1280 return signalled()? -EINTR
: -EAGAIN
;
1283 spin_unlock_bh(&pool
->sp_lock
);
1285 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1286 rqstp
, pool
->sp_id
, svsk
, atomic_read(&svsk
->sk_inuse
));
1287 len
= svsk
->sk_recvfrom(rqstp
);
1288 dprintk("svc: got len=%d\n", len
);
1290 /* No data, incomplete (TCP) read, or accept() */
1291 if (len
== 0 || len
== -EAGAIN
) {
1292 rqstp
->rq_res
.len
= 0;
1293 svc_sock_release(rqstp
);
1296 svsk
->sk_lastrecv
= get_seconds();
1297 clear_bit(SK_OLD
, &svsk
->sk_flags
);
1299 rqstp
->rq_secure
= ntohs(rqstp
->rq_addr
.sin_port
) < 1024;
1300 rqstp
->rq_chandle
.defer
= svc_defer
;
1303 serv
->sv_stats
->netcnt
++;
1311 svc_drop(struct svc_rqst
*rqstp
)
1313 dprintk("svc: socket %p dropped request\n", rqstp
->rq_sock
);
1314 svc_sock_release(rqstp
);
1318 * Return reply to client.
1321 svc_send(struct svc_rqst
*rqstp
)
1323 struct svc_sock
*svsk
;
1327 if ((svsk
= rqstp
->rq_sock
) == NULL
) {
1328 printk(KERN_WARNING
"NULL socket pointer in %s:%d\n",
1329 __FILE__
, __LINE__
);
1333 /* release the receive skb before sending the reply */
1334 svc_release_skb(rqstp
);
1336 /* calculate over-all length */
1337 xb
= & rqstp
->rq_res
;
1338 xb
->len
= xb
->head
[0].iov_len
+
1340 xb
->tail
[0].iov_len
;
1342 /* Grab svsk->sk_mutex to serialize outgoing data. */
1343 mutex_lock(&svsk
->sk_mutex
);
1344 if (test_bit(SK_DEAD
, &svsk
->sk_flags
))
1347 len
= svsk
->sk_sendto(rqstp
);
1348 mutex_unlock(&svsk
->sk_mutex
);
1349 svc_sock_release(rqstp
);
1351 if (len
== -ECONNREFUSED
|| len
== -ENOTCONN
|| len
== -EAGAIN
)
1357 * Timer function to close old temporary sockets, using
1358 * a mark-and-sweep algorithm.
1361 svc_age_temp_sockets(unsigned long closure
)
1363 struct svc_serv
*serv
= (struct svc_serv
*)closure
;
1364 struct svc_sock
*svsk
;
1365 struct list_head
*le
, *next
;
1366 LIST_HEAD(to_be_aged
);
1368 dprintk("svc_age_temp_sockets\n");
1370 if (!spin_trylock_bh(&serv
->sv_lock
)) {
1371 /* busy, try again 1 sec later */
1372 dprintk("svc_age_temp_sockets: busy\n");
1373 mod_timer(&serv
->sv_temptimer
, jiffies
+ HZ
);
1377 list_for_each_safe(le
, next
, &serv
->sv_tempsocks
) {
1378 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1380 if (!test_and_set_bit(SK_OLD
, &svsk
->sk_flags
))
1382 if (atomic_read(&svsk
->sk_inuse
) || test_bit(SK_BUSY
, &svsk
->sk_flags
))
1384 atomic_inc(&svsk
->sk_inuse
);
1385 list_move(le
, &to_be_aged
);
1386 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1387 set_bit(SK_DETACHED
, &svsk
->sk_flags
);
1389 spin_unlock_bh(&serv
->sv_lock
);
1391 while (!list_empty(&to_be_aged
)) {
1392 le
= to_be_aged
.next
;
1393 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1395 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1397 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1398 svsk
, get_seconds() - svsk
->sk_lastrecv
);
1400 /* a thread will dequeue and close it soon */
1401 svc_sock_enqueue(svsk
);
1405 mod_timer(&serv
->sv_temptimer
, jiffies
+ svc_conn_age_period
* HZ
);
1409 * Initialize socket for RPC use and create svc_sock struct
1410 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1412 static struct svc_sock
*
1413 svc_setup_socket(struct svc_serv
*serv
, struct socket
*sock
,
1414 int *errp
, int pmap_register
)
1416 struct svc_sock
*svsk
;
1419 dprintk("svc: svc_setup_socket %p\n", sock
);
1420 if (!(svsk
= kzalloc(sizeof(*svsk
), GFP_KERNEL
))) {
1427 /* Register socket with portmapper */
1428 if (*errp
>= 0 && pmap_register
)
1429 *errp
= svc_register(serv
, inet
->sk_protocol
,
1430 ntohs(inet_sk(inet
)->sport
));
1437 set_bit(SK_BUSY
, &svsk
->sk_flags
);
1438 inet
->sk_user_data
= svsk
;
1439 svsk
->sk_sock
= sock
;
1441 svsk
->sk_ostate
= inet
->sk_state_change
;
1442 svsk
->sk_odata
= inet
->sk_data_ready
;
1443 svsk
->sk_owspace
= inet
->sk_write_space
;
1444 svsk
->sk_server
= serv
;
1445 atomic_set(&svsk
->sk_inuse
, 0);
1446 svsk
->sk_lastrecv
= get_seconds();
1447 spin_lock_init(&svsk
->sk_defer_lock
);
1448 INIT_LIST_HEAD(&svsk
->sk_deferred
);
1449 INIT_LIST_HEAD(&svsk
->sk_ready
);
1450 mutex_init(&svsk
->sk_mutex
);
1452 /* Initialize the socket */
1453 if (sock
->type
== SOCK_DGRAM
)
1458 spin_lock_bh(&serv
->sv_lock
);
1459 if (!pmap_register
) {
1460 set_bit(SK_TEMP
, &svsk
->sk_flags
);
1461 list_add(&svsk
->sk_list
, &serv
->sv_tempsocks
);
1463 if (serv
->sv_temptimer
.function
== NULL
) {
1464 /* setup timer to age temp sockets */
1465 setup_timer(&serv
->sv_temptimer
, svc_age_temp_sockets
,
1466 (unsigned long)serv
);
1467 mod_timer(&serv
->sv_temptimer
,
1468 jiffies
+ svc_conn_age_period
* HZ
);
1471 clear_bit(SK_TEMP
, &svsk
->sk_flags
);
1472 list_add(&svsk
->sk_list
, &serv
->sv_permsocks
);
1474 spin_unlock_bh(&serv
->sv_lock
);
1476 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1479 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
1480 svc_sock_enqueue(svsk
);
1484 int svc_addsock(struct svc_serv
*serv
,
1490 struct socket
*so
= sockfd_lookup(fd
, &err
);
1491 struct svc_sock
*svsk
= NULL
;
1495 if (so
->sk
->sk_family
!= AF_INET
)
1496 err
= -EAFNOSUPPORT
;
1497 else if (so
->sk
->sk_protocol
!= IPPROTO_TCP
&&
1498 so
->sk
->sk_protocol
!= IPPROTO_UDP
)
1499 err
= -EPROTONOSUPPORT
;
1500 else if (so
->state
> SS_UNCONNECTED
)
1503 svsk
= svc_setup_socket(serv
, so
, &err
, 1);
1511 if (proto
) *proto
= so
->sk
->sk_protocol
;
1512 return one_sock_name(name_return
, svsk
);
1514 EXPORT_SYMBOL_GPL(svc_addsock
);
1517 * Create socket for RPC service.
1520 svc_create_socket(struct svc_serv
*serv
, int protocol
, struct sockaddr_in
*sin
)
1522 struct svc_sock
*svsk
;
1523 struct socket
*sock
;
1527 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
1528 serv
->sv_program
->pg_name
, protocol
,
1529 NIPQUAD(sin
->sin_addr
.s_addr
),
1530 ntohs(sin
->sin_port
));
1532 if (protocol
!= IPPROTO_UDP
&& protocol
!= IPPROTO_TCP
) {
1533 printk(KERN_WARNING
"svc: only UDP and TCP "
1534 "sockets supported\n");
1537 type
= (protocol
== IPPROTO_UDP
)? SOCK_DGRAM
: SOCK_STREAM
;
1539 if ((error
= sock_create_kern(PF_INET
, type
, protocol
, &sock
)) < 0)
1542 if (type
== SOCK_STREAM
)
1543 sock
->sk
->sk_reuse
= 1; /* allow address reuse */
1544 error
= kernel_bind(sock
, (struct sockaddr
*) sin
,
1549 if (protocol
== IPPROTO_TCP
) {
1550 if ((error
= kernel_listen(sock
, 64)) < 0)
1554 if ((svsk
= svc_setup_socket(serv
, sock
, &error
, 1)) != NULL
)
1558 dprintk("svc: svc_create_socket error = %d\n", -error
);
1564 * Remove a dead socket
1567 svc_delete_socket(struct svc_sock
*svsk
)
1569 struct svc_serv
*serv
;
1572 dprintk("svc: svc_delete_socket(%p)\n", svsk
);
1574 serv
= svsk
->sk_server
;
1577 sk
->sk_state_change
= svsk
->sk_ostate
;
1578 sk
->sk_data_ready
= svsk
->sk_odata
;
1579 sk
->sk_write_space
= svsk
->sk_owspace
;
1581 spin_lock_bh(&serv
->sv_lock
);
1583 if (!test_and_set_bit(SK_DETACHED
, &svsk
->sk_flags
))
1584 list_del_init(&svsk
->sk_list
);
1586 * We used to delete the svc_sock from whichever list
1587 * it's sk_ready node was on, but we don't actually
1588 * need to. This is because the only time we're called
1589 * while still attached to a queue, the queue itself
1590 * is about to be destroyed (in svc_destroy).
1592 if (!test_and_set_bit(SK_DEAD
, &svsk
->sk_flags
))
1593 if (test_bit(SK_TEMP
, &svsk
->sk_flags
))
1596 if (!atomic_read(&svsk
->sk_inuse
)) {
1597 spin_unlock_bh(&serv
->sv_lock
);
1598 if (svsk
->sk_sock
->file
)
1599 sockfd_put(svsk
->sk_sock
);
1601 sock_release(svsk
->sk_sock
);
1604 spin_unlock_bh(&serv
->sv_lock
);
1605 dprintk(KERN_NOTICE
"svc: server socket destroy delayed\n");
1606 /* svsk->sk_server = NULL; */
1611 * Make a socket for nfsd and lockd
1614 svc_makesock(struct svc_serv
*serv
, int protocol
, unsigned short port
)
1616 struct sockaddr_in sin
;
1618 dprintk("svc: creating socket proto = %d\n", protocol
);
1619 sin
.sin_family
= AF_INET
;
1620 sin
.sin_addr
.s_addr
= INADDR_ANY
;
1621 sin
.sin_port
= htons(port
);
1622 return svc_create_socket(serv
, protocol
, &sin
);
1626 * Handle defer and revisit of requests
1629 static void svc_revisit(struct cache_deferred_req
*dreq
, int too_many
)
1631 struct svc_deferred_req
*dr
= container_of(dreq
, struct svc_deferred_req
, handle
);
1632 struct svc_sock
*svsk
;
1635 svc_sock_put(dr
->svsk
);
1639 dprintk("revisit queued\n");
1642 spin_lock_bh(&svsk
->sk_defer_lock
);
1643 list_add(&dr
->handle
.recent
, &svsk
->sk_deferred
);
1644 spin_unlock_bh(&svsk
->sk_defer_lock
);
1645 set_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1646 svc_sock_enqueue(svsk
);
1650 static struct cache_deferred_req
*
1651 svc_defer(struct cache_req
*req
)
1653 struct svc_rqst
*rqstp
= container_of(req
, struct svc_rqst
, rq_chandle
);
1654 int size
= sizeof(struct svc_deferred_req
) + (rqstp
->rq_arg
.len
);
1655 struct svc_deferred_req
*dr
;
1657 if (rqstp
->rq_arg
.page_len
)
1658 return NULL
; /* if more than a page, give up FIXME */
1659 if (rqstp
->rq_deferred
) {
1660 dr
= rqstp
->rq_deferred
;
1661 rqstp
->rq_deferred
= NULL
;
1663 int skip
= rqstp
->rq_arg
.len
- rqstp
->rq_arg
.head
[0].iov_len
;
1664 /* FIXME maybe discard if size too large */
1665 dr
= kmalloc(size
, GFP_KERNEL
);
1669 dr
->handle
.owner
= rqstp
->rq_server
;
1670 dr
->prot
= rqstp
->rq_prot
;
1671 dr
->addr
= rqstp
->rq_addr
;
1672 dr
->daddr
= rqstp
->rq_daddr
;
1673 dr
->argslen
= rqstp
->rq_arg
.len
>> 2;
1674 memcpy(dr
->args
, rqstp
->rq_arg
.head
[0].iov_base
-skip
, dr
->argslen
<<2);
1676 atomic_inc(&rqstp
->rq_sock
->sk_inuse
);
1677 dr
->svsk
= rqstp
->rq_sock
;
1679 dr
->handle
.revisit
= svc_revisit
;
1684 * recv data from a deferred request into an active one
1686 static int svc_deferred_recv(struct svc_rqst
*rqstp
)
1688 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
1690 rqstp
->rq_arg
.head
[0].iov_base
= dr
->args
;
1691 rqstp
->rq_arg
.head
[0].iov_len
= dr
->argslen
<<2;
1692 rqstp
->rq_arg
.page_len
= 0;
1693 rqstp
->rq_arg
.len
= dr
->argslen
<<2;
1694 rqstp
->rq_prot
= dr
->prot
;
1695 rqstp
->rq_addr
= dr
->addr
;
1696 rqstp
->rq_daddr
= dr
->daddr
;
1697 return dr
->argslen
<<2;
1701 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
)
1703 struct svc_deferred_req
*dr
= NULL
;
1705 if (!test_bit(SK_DEFERRED
, &svsk
->sk_flags
))
1707 spin_lock_bh(&svsk
->sk_defer_lock
);
1708 clear_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1709 if (!list_empty(&svsk
->sk_deferred
)) {
1710 dr
= list_entry(svsk
->sk_deferred
.next
,
1711 struct svc_deferred_req
,
1713 list_del_init(&dr
->handle
.recent
);
1714 set_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1716 spin_unlock_bh(&svsk
->sk_defer_lock
);