2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/net.h>
27 #include <linux/inet.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/unistd.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/file.h>
35 #include <linux/freezer.h>
37 #include <net/checksum.h>
39 #include <net/tcp_states.h>
40 #include <asm/uaccess.h>
41 #include <asm/ioctls.h>
43 #include <linux/sunrpc/types.h>
44 #include <linux/sunrpc/xdr.h>
45 #include <linux/sunrpc/svcsock.h>
46 #include <linux/sunrpc/stats.h>
48 /* SMP locking strategy:
50 * svc_pool->sp_lock protects most of the fields of that pool.
51 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
52 * when both need to be taken (rare), svc_serv->sv_lock is first.
53 * BKL protects svc_serv->sv_nrthread.
54 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
55 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
57 * Some flags can be set to certain values at any time
58 * providing that certain rules are followed:
60 * SK_CONN, SK_DATA, can be set or cleared at any time.
61 * after a set, svc_sock_enqueue must be called.
62 * after a clear, the socket must be read/accepted
63 * if this succeeds, it must be set again.
64 * SK_CLOSE can set at any time. It is never cleared.
65 * sk_inuse contains a bias of '1' until SK_DEAD is set.
66 * so when sk_inuse hits zero, we know the socket is dead
67 * and no-one is using it.
68 * SK_DEAD can only be set while SK_BUSY is held which ensures
69 * no other thread will be using the socket or will try to
74 #define RPCDBG_FACILITY RPCDBG_SVCSOCK
77 static struct svc_sock
*svc_setup_socket(struct svc_serv
*, struct socket
*,
78 int *errp
, int pmap_reg
);
79 static void svc_delete_socket(struct svc_sock
*svsk
);
80 static void svc_udp_data_ready(struct sock
*, int);
81 static int svc_udp_recvfrom(struct svc_rqst
*);
82 static int svc_udp_sendto(struct svc_rqst
*);
84 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
);
85 static int svc_deferred_recv(struct svc_rqst
*rqstp
);
86 static struct cache_deferred_req
*svc_defer(struct cache_req
*req
);
88 /* apparently the "standard" is that clients close
89 * idle connections after 5 minutes, servers after
91 * http://www.connectathon.org/talks96/nfstcp.pdf
93 static int svc_conn_age_period
= 6*60;
95 #ifdef CONFIG_DEBUG_LOCK_ALLOC
96 static struct lock_class_key svc_key
[2];
97 static struct lock_class_key svc_slock_key
[2];
99 static inline void svc_reclassify_socket(struct socket
*sock
)
101 struct sock
*sk
= sock
->sk
;
102 BUG_ON(sk
->sk_lock
.owner
!= NULL
);
103 switch (sk
->sk_family
) {
105 sock_lock_init_class_and_name(sk
, "slock-AF_INET-NFSD",
106 &svc_slock_key
[0], "sk_lock-AF_INET-NFSD", &svc_key
[0]);
110 sock_lock_init_class_and_name(sk
, "slock-AF_INET6-NFSD",
111 &svc_slock_key
[1], "sk_lock-AF_INET6-NFSD", &svc_key
[1]);
119 static inline void svc_reclassify_socket(struct socket
*sock
)
125 * Queue up an idle server thread. Must have pool->sp_lock held.
126 * Note: this is really a stack rather than a queue, so that we only
127 * use as many different threads as we need, and the rest don't pollute
131 svc_thread_enqueue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
133 list_add(&rqstp
->rq_list
, &pool
->sp_threads
);
137 * Dequeue an nfsd thread. Must have pool->sp_lock held.
140 svc_thread_dequeue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
142 list_del(&rqstp
->rq_list
);
146 * Release an skbuff after use
149 svc_release_skb(struct svc_rqst
*rqstp
)
151 struct sk_buff
*skb
= rqstp
->rq_skbuff
;
152 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
155 rqstp
->rq_skbuff
= NULL
;
157 dprintk("svc: service %p, releasing skb %p\n", rqstp
, skb
);
158 skb_free_datagram(rqstp
->rq_sock
->sk_sk
, skb
);
161 rqstp
->rq_deferred
= NULL
;
167 * Any space to write?
169 static inline unsigned long
170 svc_sock_wspace(struct svc_sock
*svsk
)
174 if (svsk
->sk_sock
->type
== SOCK_STREAM
)
175 wspace
= sk_stream_wspace(svsk
->sk_sk
);
177 wspace
= sock_wspace(svsk
->sk_sk
);
183 * Queue up a socket with data pending. If there are idle nfsd
184 * processes, wake 'em up.
188 svc_sock_enqueue(struct svc_sock
*svsk
)
190 struct svc_serv
*serv
= svsk
->sk_server
;
191 struct svc_pool
*pool
;
192 struct svc_rqst
*rqstp
;
195 if (!(svsk
->sk_flags
&
196 ( (1<<SK_CONN
)|(1<<SK_DATA
)|(1<<SK_CLOSE
)|(1<<SK_DEFERRED
)) ))
198 if (test_bit(SK_DEAD
, &svsk
->sk_flags
))
202 pool
= svc_pool_for_cpu(svsk
->sk_server
, cpu
);
205 spin_lock_bh(&pool
->sp_lock
);
207 if (!list_empty(&pool
->sp_threads
) &&
208 !list_empty(&pool
->sp_sockets
))
210 "svc_sock_enqueue: threads and sockets both waiting??\n");
212 if (test_bit(SK_DEAD
, &svsk
->sk_flags
)) {
213 /* Don't enqueue dead sockets */
214 dprintk("svc: socket %p is dead, not enqueued\n", svsk
->sk_sk
);
218 /* Mark socket as busy. It will remain in this state until the
219 * server has processed all pending data and put the socket back
220 * on the idle list. We update SK_BUSY atomically because
221 * it also guards against trying to enqueue the svc_sock twice.
223 if (test_and_set_bit(SK_BUSY
, &svsk
->sk_flags
)) {
224 /* Don't enqueue socket while already enqueued */
225 dprintk("svc: socket %p busy, not enqueued\n", svsk
->sk_sk
);
228 BUG_ON(svsk
->sk_pool
!= NULL
);
229 svsk
->sk_pool
= pool
;
231 set_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
232 if (((atomic_read(&svsk
->sk_reserved
) + serv
->sv_max_mesg
)*2
233 > svc_sock_wspace(svsk
))
234 && !test_bit(SK_CLOSE
, &svsk
->sk_flags
)
235 && !test_bit(SK_CONN
, &svsk
->sk_flags
)) {
236 /* Don't enqueue while not enough space for reply */
237 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
238 svsk
->sk_sk
, atomic_read(&svsk
->sk_reserved
)+serv
->sv_max_mesg
,
239 svc_sock_wspace(svsk
));
240 svsk
->sk_pool
= NULL
;
241 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
244 clear_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
247 if (!list_empty(&pool
->sp_threads
)) {
248 rqstp
= list_entry(pool
->sp_threads
.next
,
251 dprintk("svc: socket %p served by daemon %p\n",
253 svc_thread_dequeue(pool
, rqstp
);
256 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
257 rqstp
, rqstp
->rq_sock
);
258 rqstp
->rq_sock
= svsk
;
259 atomic_inc(&svsk
->sk_inuse
);
260 rqstp
->rq_reserved
= serv
->sv_max_mesg
;
261 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
262 BUG_ON(svsk
->sk_pool
!= pool
);
263 wake_up(&rqstp
->rq_wait
);
265 dprintk("svc: socket %p put into queue\n", svsk
->sk_sk
);
266 list_add_tail(&svsk
->sk_ready
, &pool
->sp_sockets
);
267 BUG_ON(svsk
->sk_pool
!= pool
);
271 spin_unlock_bh(&pool
->sp_lock
);
275 * Dequeue the first socket. Must be called with the pool->sp_lock held.
277 static inline struct svc_sock
*
278 svc_sock_dequeue(struct svc_pool
*pool
)
280 struct svc_sock
*svsk
;
282 if (list_empty(&pool
->sp_sockets
))
285 svsk
= list_entry(pool
->sp_sockets
.next
,
286 struct svc_sock
, sk_ready
);
287 list_del_init(&svsk
->sk_ready
);
289 dprintk("svc: socket %p dequeued, inuse=%d\n",
290 svsk
->sk_sk
, atomic_read(&svsk
->sk_inuse
));
296 * Having read something from a socket, check whether it
297 * needs to be re-enqueued.
298 * Note: SK_DATA only gets cleared when a read-attempt finds
299 * no (or insufficient) data.
302 svc_sock_received(struct svc_sock
*svsk
)
304 svsk
->sk_pool
= NULL
;
305 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
306 svc_sock_enqueue(svsk
);
311 * svc_reserve - change the space reserved for the reply to a request.
312 * @rqstp: The request in question
313 * @space: new max space to reserve
315 * Each request reserves some space on the output queue of the socket
316 * to make sure the reply fits. This function reduces that reserved
317 * space to be the amount of space used already, plus @space.
320 void svc_reserve(struct svc_rqst
*rqstp
, int space
)
322 space
+= rqstp
->rq_res
.head
[0].iov_len
;
324 if (space
< rqstp
->rq_reserved
) {
325 struct svc_sock
*svsk
= rqstp
->rq_sock
;
326 atomic_sub((rqstp
->rq_reserved
- space
), &svsk
->sk_reserved
);
327 rqstp
->rq_reserved
= space
;
329 svc_sock_enqueue(svsk
);
334 * Release a socket after use.
337 svc_sock_put(struct svc_sock
*svsk
)
339 if (atomic_dec_and_test(&svsk
->sk_inuse
)) {
340 BUG_ON(! test_bit(SK_DEAD
, &svsk
->sk_flags
));
342 dprintk("svc: releasing dead socket\n");
343 if (svsk
->sk_sock
->file
)
344 sockfd_put(svsk
->sk_sock
);
346 sock_release(svsk
->sk_sock
);
347 if (svsk
->sk_info_authunix
!= NULL
)
348 svcauth_unix_info_release(svsk
->sk_info_authunix
);
354 svc_sock_release(struct svc_rqst
*rqstp
)
356 struct svc_sock
*svsk
= rqstp
->rq_sock
;
358 svc_release_skb(rqstp
);
360 svc_free_res_pages(rqstp
);
361 rqstp
->rq_res
.page_len
= 0;
362 rqstp
->rq_res
.page_base
= 0;
365 /* Reset response buffer and release
367 * But first, check that enough space was reserved
368 * for the reply, otherwise we have a bug!
370 if ((rqstp
->rq_res
.len
) > rqstp
->rq_reserved
)
371 printk(KERN_ERR
"RPC request reserved %d but used %d\n",
375 rqstp
->rq_res
.head
[0].iov_len
= 0;
376 svc_reserve(rqstp
, 0);
377 rqstp
->rq_sock
= NULL
;
383 * External function to wake up a server waiting for data
384 * This really only makes sense for services like lockd
385 * which have exactly one thread anyway.
388 svc_wake_up(struct svc_serv
*serv
)
390 struct svc_rqst
*rqstp
;
392 struct svc_pool
*pool
;
394 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
395 pool
= &serv
->sv_pools
[i
];
397 spin_lock_bh(&pool
->sp_lock
);
398 if (!list_empty(&pool
->sp_threads
)) {
399 rqstp
= list_entry(pool
->sp_threads
.next
,
402 dprintk("svc: daemon %p woken up.\n", rqstp
);
404 svc_thread_dequeue(pool, rqstp);
405 rqstp->rq_sock = NULL;
407 wake_up(&rqstp
->rq_wait
);
409 spin_unlock_bh(&pool
->sp_lock
);
414 * Generic sendto routine
417 svc_sendto(struct svc_rqst
*rqstp
, struct xdr_buf
*xdr
)
419 struct svc_sock
*svsk
= rqstp
->rq_sock
;
420 struct socket
*sock
= svsk
->sk_sock
;
422 char buffer
[CMSG_SPACE(sizeof(struct in_pktinfo
))];
423 struct cmsghdr
*cmh
= (struct cmsghdr
*)buffer
;
424 struct in_pktinfo
*pki
= (struct in_pktinfo
*)CMSG_DATA(cmh
);
428 struct page
**ppage
= xdr
->pages
;
429 size_t base
= xdr
->page_base
;
430 unsigned int pglen
= xdr
->page_len
;
431 unsigned int flags
= MSG_MORE
;
435 if (rqstp
->rq_prot
== IPPROTO_UDP
) {
436 /* set the source and destination */
438 msg
.msg_name
= &rqstp
->rq_addr
;
439 msg
.msg_namelen
= sizeof(rqstp
->rq_addr
);
442 msg
.msg_flags
= MSG_MORE
;
444 msg
.msg_control
= cmh
;
445 msg
.msg_controllen
= sizeof(buffer
);
446 cmh
->cmsg_len
= CMSG_LEN(sizeof(*pki
));
447 cmh
->cmsg_level
= SOL_IP
;
448 cmh
->cmsg_type
= IP_PKTINFO
;
449 pki
->ipi_ifindex
= 0;
450 pki
->ipi_spec_dst
.s_addr
= rqstp
->rq_daddr
;
452 if (sock_sendmsg(sock
, &msg
, 0) < 0)
457 if (slen
== xdr
->head
[0].iov_len
)
459 len
= kernel_sendpage(sock
, rqstp
->rq_respages
[0], 0,
460 xdr
->head
[0].iov_len
, flags
);
461 if (len
!= xdr
->head
[0].iov_len
)
463 slen
-= xdr
->head
[0].iov_len
;
468 size
= PAGE_SIZE
- base
< pglen
? PAGE_SIZE
- base
: pglen
;
472 result
= kernel_sendpage(sock
, *ppage
, base
, size
, flags
);
479 size
= PAGE_SIZE
< pglen
? PAGE_SIZE
: pglen
;
484 if (xdr
->tail
[0].iov_len
) {
485 result
= kernel_sendpage(sock
, rqstp
->rq_respages
[0],
486 ((unsigned long)xdr
->tail
[0].iov_base
)
488 xdr
->tail
[0].iov_len
, 0);
494 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
495 rqstp
->rq_sock
, xdr
->head
[0].iov_base
, xdr
->head
[0].iov_len
, xdr
->len
, len
,
496 rqstp
->rq_addr
.sin_addr
.s_addr
);
502 * Report socket names for nfsdfs
504 static int one_sock_name(char *buf
, struct svc_sock
*svsk
)
508 switch(svsk
->sk_sk
->sk_family
) {
510 len
= sprintf(buf
, "ipv4 %s %u.%u.%u.%u %d\n",
511 svsk
->sk_sk
->sk_protocol
==IPPROTO_UDP
?
513 NIPQUAD(inet_sk(svsk
->sk_sk
)->rcv_saddr
),
514 inet_sk(svsk
->sk_sk
)->num
);
517 len
= sprintf(buf
, "*unknown-%d*\n",
518 svsk
->sk_sk
->sk_family
);
524 svc_sock_names(char *buf
, struct svc_serv
*serv
, char *toclose
)
526 struct svc_sock
*svsk
, *closesk
= NULL
;
531 spin_lock_bh(&serv
->sv_lock
);
532 list_for_each_entry(svsk
, &serv
->sv_permsocks
, sk_list
) {
533 int onelen
= one_sock_name(buf
+len
, svsk
);
534 if (toclose
&& strcmp(toclose
, buf
+len
) == 0)
539 spin_unlock_bh(&serv
->sv_lock
);
541 /* Should unregister with portmap, but you cannot
542 * unregister just one protocol...
544 svc_close_socket(closesk
);
549 EXPORT_SYMBOL(svc_sock_names
);
552 * Check input queue length
555 svc_recv_available(struct svc_sock
*svsk
)
557 struct socket
*sock
= svsk
->sk_sock
;
560 err
= kernel_sock_ioctl(sock
, TIOCINQ
, (unsigned long) &avail
);
562 return (err
>= 0)? avail
: err
;
566 * Generic recvfrom routine.
569 svc_recvfrom(struct svc_rqst
*rqstp
, struct kvec
*iov
, int nr
, int buflen
)
575 rqstp
->rq_addrlen
= sizeof(rqstp
->rq_addr
);
576 sock
= rqstp
->rq_sock
->sk_sock
;
578 msg
.msg_name
= &rqstp
->rq_addr
;
579 msg
.msg_namelen
= sizeof(rqstp
->rq_addr
);
580 msg
.msg_control
= NULL
;
581 msg
.msg_controllen
= 0;
583 msg
.msg_flags
= MSG_DONTWAIT
;
585 len
= kernel_recvmsg(sock
, &msg
, iov
, nr
, buflen
, MSG_DONTWAIT
);
587 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
588 * possibly we should cache this in the svc_sock structure
589 * at accept time. FIXME
591 alen
= sizeof(rqstp
->rq_addr
);
592 kernel_getpeername(sock
, (struct sockaddr
*)&rqstp
->rq_addr
, &alen
);
594 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
595 rqstp
->rq_sock
, iov
[0].iov_base
, iov
[0].iov_len
, len
);
601 * Set socket snd and rcv buffer lengths
604 svc_sock_setbufsize(struct socket
*sock
, unsigned int snd
, unsigned int rcv
)
608 oldfs
= get_fs(); set_fs(KERNEL_DS
);
609 sock_setsockopt(sock
, SOL_SOCKET
, SO_SNDBUF
,
610 (char*)&snd
, sizeof(snd
));
611 sock_setsockopt(sock
, SOL_SOCKET
, SO_RCVBUF
,
612 (char*)&rcv
, sizeof(rcv
));
614 /* sock_setsockopt limits use to sysctl_?mem_max,
615 * which isn't acceptable. Until that is made conditional
616 * on not having CAP_SYS_RESOURCE or similar, we go direct...
617 * DaveM said I could!
620 sock
->sk
->sk_sndbuf
= snd
* 2;
621 sock
->sk
->sk_rcvbuf
= rcv
* 2;
622 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
|SOCK_RCVBUF_LOCK
;
623 release_sock(sock
->sk
);
627 * INET callback when data has been received on the socket.
630 svc_udp_data_ready(struct sock
*sk
, int count
)
632 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
635 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
636 svsk
, sk
, count
, test_bit(SK_BUSY
, &svsk
->sk_flags
));
637 set_bit(SK_DATA
, &svsk
->sk_flags
);
638 svc_sock_enqueue(svsk
);
640 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
641 wake_up_interruptible(sk
->sk_sleep
);
645 * INET callback when space is newly available on the socket.
648 svc_write_space(struct sock
*sk
)
650 struct svc_sock
*svsk
= (struct svc_sock
*)(sk
->sk_user_data
);
653 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
654 svsk
, sk
, test_bit(SK_BUSY
, &svsk
->sk_flags
));
655 svc_sock_enqueue(svsk
);
658 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
)) {
659 dprintk("RPC svc_write_space: someone sleeping on %p\n",
661 wake_up_interruptible(sk
->sk_sleep
);
666 * Receive a datagram from a UDP socket.
669 svc_udp_recvfrom(struct svc_rqst
*rqstp
)
671 struct svc_sock
*svsk
= rqstp
->rq_sock
;
672 struct svc_serv
*serv
= svsk
->sk_server
;
676 if (test_and_clear_bit(SK_CHNGBUF
, &svsk
->sk_flags
))
677 /* udp sockets need large rcvbuf as all pending
678 * requests are still in that buffer. sndbuf must
679 * also be large enough that there is enough space
680 * for one reply per thread. We count all threads
681 * rather than threads in a particular pool, which
682 * provides an upper bound on the number of threads
683 * which will access the socket.
685 svc_sock_setbufsize(svsk
->sk_sock
,
686 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
,
687 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
);
689 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
690 svc_sock_received(svsk
);
691 return svc_deferred_recv(rqstp
);
694 if (test_bit(SK_CLOSE
, &svsk
->sk_flags
)) {
695 svc_delete_socket(svsk
);
699 clear_bit(SK_DATA
, &svsk
->sk_flags
);
700 while ((skb
= skb_recv_datagram(svsk
->sk_sk
, 0, 1, &err
)) == NULL
) {
701 if (err
== -EAGAIN
) {
702 svc_sock_received(svsk
);
705 /* possibly an icmp error */
706 dprintk("svc: recvfrom returned error %d\n", -err
);
708 if (skb
->tstamp
.off_sec
== 0) {
711 tv
.tv_sec
= xtime
.tv_sec
;
712 tv
.tv_usec
= xtime
.tv_nsec
/ NSEC_PER_USEC
;
713 skb_set_timestamp(skb
, &tv
);
714 /* Don't enable netstamp, sunrpc doesn't
715 need that much accuracy */
717 skb_get_timestamp(skb
, &svsk
->sk_sk
->sk_stamp
);
718 set_bit(SK_DATA
, &svsk
->sk_flags
); /* there may be more data... */
721 * Maybe more packets - kick another thread ASAP.
723 svc_sock_received(svsk
);
725 len
= skb
->len
- sizeof(struct udphdr
);
726 rqstp
->rq_arg
.len
= len
;
728 rqstp
->rq_prot
= IPPROTO_UDP
;
730 /* Get sender address */
731 rqstp
->rq_addr
.sin_family
= AF_INET
;
732 rqstp
->rq_addr
.sin_port
= skb
->h
.uh
->source
;
733 rqstp
->rq_addr
.sin_addr
.s_addr
= skb
->nh
.iph
->saddr
;
734 rqstp
->rq_daddr
= skb
->nh
.iph
->daddr
;
736 if (skb_is_nonlinear(skb
)) {
737 /* we have to copy */
739 if (csum_partial_copy_to_xdr(&rqstp
->rq_arg
, skb
)) {
742 skb_free_datagram(svsk
->sk_sk
, skb
);
746 skb_free_datagram(svsk
->sk_sk
, skb
);
748 /* we can use it in-place */
749 rqstp
->rq_arg
.head
[0].iov_base
= skb
->data
+ sizeof(struct udphdr
);
750 rqstp
->rq_arg
.head
[0].iov_len
= len
;
751 if (skb_checksum_complete(skb
)) {
752 skb_free_datagram(svsk
->sk_sk
, skb
);
755 rqstp
->rq_skbuff
= skb
;
758 rqstp
->rq_arg
.page_base
= 0;
759 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
760 rqstp
->rq_arg
.head
[0].iov_len
= len
;
761 rqstp
->rq_arg
.page_len
= 0;
762 rqstp
->rq_respages
= rqstp
->rq_pages
+1;
764 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
765 rqstp
->rq_respages
= rqstp
->rq_pages
+ 1 +
766 (rqstp
->rq_arg
.page_len
+ PAGE_SIZE
- 1)/ PAGE_SIZE
;
770 serv
->sv_stats
->netudpcnt
++;
776 svc_udp_sendto(struct svc_rqst
*rqstp
)
780 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
781 if (error
== -ECONNREFUSED
)
782 /* ICMP error on earlier request. */
783 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
789 svc_udp_init(struct svc_sock
*svsk
)
791 svsk
->sk_sk
->sk_data_ready
= svc_udp_data_ready
;
792 svsk
->sk_sk
->sk_write_space
= svc_write_space
;
793 svsk
->sk_recvfrom
= svc_udp_recvfrom
;
794 svsk
->sk_sendto
= svc_udp_sendto
;
796 /* initialise setting must have enough space to
797 * receive and respond to one request.
798 * svc_udp_recvfrom will re-adjust if necessary
800 svc_sock_setbufsize(svsk
->sk_sock
,
801 3 * svsk
->sk_server
->sv_max_mesg
,
802 3 * svsk
->sk_server
->sv_max_mesg
);
804 set_bit(SK_DATA
, &svsk
->sk_flags
); /* might have come in before data_ready set up */
805 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
809 * A data_ready event on a listening socket means there's a connection
810 * pending. Do not use state_change as a substitute for it.
813 svc_tcp_listen_data_ready(struct sock
*sk
, int count_unused
)
815 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
817 dprintk("svc: socket %p TCP (listen) state change %d\n",
821 * This callback may called twice when a new connection
822 * is established as a child socket inherits everything
823 * from a parent LISTEN socket.
824 * 1) data_ready method of the parent socket will be called
825 * when one of child sockets become ESTABLISHED.
826 * 2) data_ready method of the child socket may be called
827 * when it receives data before the socket is accepted.
828 * In case of 2, we should ignore it silently.
830 if (sk
->sk_state
== TCP_LISTEN
) {
832 set_bit(SK_CONN
, &svsk
->sk_flags
);
833 svc_sock_enqueue(svsk
);
835 printk("svc: socket %p: no user data\n", sk
);
838 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
839 wake_up_interruptible_all(sk
->sk_sleep
);
843 * A state change on a connected socket means it's dying or dead.
846 svc_tcp_state_change(struct sock
*sk
)
848 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
850 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
851 sk
, sk
->sk_state
, sk
->sk_user_data
);
854 printk("svc: socket %p: no user data\n", sk
);
856 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
857 svc_sock_enqueue(svsk
);
859 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
860 wake_up_interruptible_all(sk
->sk_sleep
);
864 svc_tcp_data_ready(struct sock
*sk
, int count
)
866 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
868 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
869 sk
, sk
->sk_user_data
);
871 set_bit(SK_DATA
, &svsk
->sk_flags
);
872 svc_sock_enqueue(svsk
);
874 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
875 wake_up_interruptible(sk
->sk_sleep
);
879 * Accept a TCP connection
882 svc_tcp_accept(struct svc_sock
*svsk
)
884 struct sockaddr_in sin
;
885 struct svc_serv
*serv
= svsk
->sk_server
;
886 struct socket
*sock
= svsk
->sk_sock
;
887 struct socket
*newsock
;
888 struct svc_sock
*newsvsk
;
891 dprintk("svc: tcp_accept %p sock %p\n", svsk
, sock
);
895 clear_bit(SK_CONN
, &svsk
->sk_flags
);
896 err
= kernel_accept(sock
, &newsock
, O_NONBLOCK
);
899 printk(KERN_WARNING
"%s: no more sockets!\n",
901 else if (err
!= -EAGAIN
&& net_ratelimit())
902 printk(KERN_WARNING
"%s: accept failed (err %d)!\n",
903 serv
->sv_name
, -err
);
907 set_bit(SK_CONN
, &svsk
->sk_flags
);
908 svc_sock_enqueue(svsk
);
911 err
= kernel_getpeername(newsock
, (struct sockaddr
*) &sin
, &slen
);
914 printk(KERN_WARNING
"%s: peername failed (err %d)!\n",
915 serv
->sv_name
, -err
);
916 goto failed
; /* aborted connection or whatever */
919 /* Ideally, we would want to reject connections from unauthorized
920 * hosts here, but when we get encription, the IP of the host won't
921 * tell us anything. For now just warn about unpriv connections.
923 if (ntohs(sin
.sin_port
) >= 1024) {
925 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
927 NIPQUAD(sin
.sin_addr
.s_addr
), ntohs(sin
.sin_port
));
930 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv
->sv_name
,
931 NIPQUAD(sin
.sin_addr
.s_addr
), ntohs(sin
.sin_port
));
933 /* make sure that a write doesn't block forever when
936 newsock
->sk
->sk_sndtimeo
= HZ
*30;
938 if (!(newsvsk
= svc_setup_socket(serv
, newsock
, &err
, 0)))
942 /* make sure that we don't have too many active connections.
943 * If we have, something must be dropped.
945 * There's no point in trying to do random drop here for
946 * DoS prevention. The NFS clients does 1 reconnect in 15
947 * seconds. An attacker can easily beat that.
949 * The only somewhat efficient mechanism would be if drop
950 * old connections from the same IP first. But right now
951 * we don't even record the client IP in svc_sock.
953 if (serv
->sv_tmpcnt
> (serv
->sv_nrthreads
+3)*20) {
954 struct svc_sock
*svsk
= NULL
;
955 spin_lock_bh(&serv
->sv_lock
);
956 if (!list_empty(&serv
->sv_tempsocks
)) {
957 if (net_ratelimit()) {
958 /* Try to help the admin */
959 printk(KERN_NOTICE
"%s: too many open TCP "
960 "sockets, consider increasing the "
961 "number of nfsd threads\n",
963 printk(KERN_NOTICE
"%s: last TCP connect from "
966 NIPQUAD(sin
.sin_addr
.s_addr
),
967 ntohs(sin
.sin_port
));
970 * Always select the oldest socket. It's not fair,
973 svsk
= list_entry(serv
->sv_tempsocks
.prev
,
976 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
977 atomic_inc(&svsk
->sk_inuse
);
979 spin_unlock_bh(&serv
->sv_lock
);
982 svc_sock_enqueue(svsk
);
989 serv
->sv_stats
->nettcpconn
++;
994 sock_release(newsock
);
999 * Receive data from a TCP socket.
1002 svc_tcp_recvfrom(struct svc_rqst
*rqstp
)
1004 struct svc_sock
*svsk
= rqstp
->rq_sock
;
1005 struct svc_serv
*serv
= svsk
->sk_server
;
1010 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1011 svsk
, test_bit(SK_DATA
, &svsk
->sk_flags
),
1012 test_bit(SK_CONN
, &svsk
->sk_flags
),
1013 test_bit(SK_CLOSE
, &svsk
->sk_flags
));
1015 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
1016 svc_sock_received(svsk
);
1017 return svc_deferred_recv(rqstp
);
1020 if (test_bit(SK_CLOSE
, &svsk
->sk_flags
)) {
1021 svc_delete_socket(svsk
);
1025 if (svsk
->sk_sk
->sk_state
== TCP_LISTEN
) {
1026 svc_tcp_accept(svsk
);
1027 svc_sock_received(svsk
);
1031 if (test_and_clear_bit(SK_CHNGBUF
, &svsk
->sk_flags
))
1032 /* sndbuf needs to have room for one request
1033 * per thread, otherwise we can stall even when the
1034 * network isn't a bottleneck.
1036 * We count all threads rather than threads in a
1037 * particular pool, which provides an upper bound
1038 * on the number of threads which will access the socket.
1040 * rcvbuf just needs to be able to hold a few requests.
1041 * Normally they will be removed from the queue
1042 * as soon a a complete request arrives.
1044 svc_sock_setbufsize(svsk
->sk_sock
,
1045 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
,
1046 3 * serv
->sv_max_mesg
);
1048 clear_bit(SK_DATA
, &svsk
->sk_flags
);
1050 /* Receive data. If we haven't got the record length yet, get
1051 * the next four bytes. Otherwise try to gobble up as much as
1052 * possible up to the complete record length.
1054 if (svsk
->sk_tcplen
< 4) {
1055 unsigned long want
= 4 - svsk
->sk_tcplen
;
1058 iov
.iov_base
= ((char *) &svsk
->sk_reclen
) + svsk
->sk_tcplen
;
1060 if ((len
= svc_recvfrom(rqstp
, &iov
, 1, want
)) < 0)
1062 svsk
->sk_tcplen
+= len
;
1065 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1067 svc_sock_received(svsk
);
1068 return -EAGAIN
; /* record header not complete */
1071 svsk
->sk_reclen
= ntohl(svsk
->sk_reclen
);
1072 if (!(svsk
->sk_reclen
& 0x80000000)) {
1073 /* FIXME: technically, a record can be fragmented,
1074 * and non-terminal fragments will not have the top
1075 * bit set in the fragment length header.
1076 * But apparently no known nfs clients send fragmented
1078 if (net_ratelimit())
1079 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx"
1080 " (non-terminal)\n",
1081 (unsigned long) svsk
->sk_reclen
);
1084 svsk
->sk_reclen
&= 0x7fffffff;
1085 dprintk("svc: TCP record, %d bytes\n", svsk
->sk_reclen
);
1086 if (svsk
->sk_reclen
> serv
->sv_max_mesg
) {
1087 if (net_ratelimit())
1088 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx"
1090 (unsigned long) svsk
->sk_reclen
);
1095 /* Check whether enough data is available */
1096 len
= svc_recv_available(svsk
);
1100 if (len
< svsk
->sk_reclen
) {
1101 dprintk("svc: incomplete TCP record (%d of %d)\n",
1102 len
, svsk
->sk_reclen
);
1103 svc_sock_received(svsk
);
1104 return -EAGAIN
; /* record not complete */
1106 len
= svsk
->sk_reclen
;
1107 set_bit(SK_DATA
, &svsk
->sk_flags
);
1109 vec
= rqstp
->rq_vec
;
1110 vec
[0] = rqstp
->rq_arg
.head
[0];
1113 while (vlen
< len
) {
1114 vec
[pnum
].iov_base
= page_address(rqstp
->rq_pages
[pnum
]);
1115 vec
[pnum
].iov_len
= PAGE_SIZE
;
1119 rqstp
->rq_respages
= &rqstp
->rq_pages
[pnum
];
1121 /* Now receive data */
1122 len
= svc_recvfrom(rqstp
, vec
, pnum
, len
);
1126 dprintk("svc: TCP complete record (%d bytes)\n", len
);
1127 rqstp
->rq_arg
.len
= len
;
1128 rqstp
->rq_arg
.page_base
= 0;
1129 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
1130 rqstp
->rq_arg
.head
[0].iov_len
= len
;
1131 rqstp
->rq_arg
.page_len
= 0;
1133 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
1136 rqstp
->rq_skbuff
= NULL
;
1137 rqstp
->rq_prot
= IPPROTO_TCP
;
1139 /* Reset TCP read info */
1140 svsk
->sk_reclen
= 0;
1141 svsk
->sk_tcplen
= 0;
1143 svc_sock_received(svsk
);
1145 serv
->sv_stats
->nettcpcnt
++;
1150 svc_delete_socket(svsk
);
1154 if (len
== -EAGAIN
) {
1155 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1156 svc_sock_received(svsk
);
1158 printk(KERN_NOTICE
"%s: recvfrom returned errno %d\n",
1159 svsk
->sk_server
->sv_name
, -len
);
1167 * Send out data on TCP socket.
1170 svc_tcp_sendto(struct svc_rqst
*rqstp
)
1172 struct xdr_buf
*xbufp
= &rqstp
->rq_res
;
1176 /* Set up the first element of the reply kvec.
1177 * Any other kvecs that may be in use have been taken
1178 * care of by the server implementation itself.
1180 reclen
= htonl(0x80000000|((xbufp
->len
) - 4));
1181 memcpy(xbufp
->head
[0].iov_base
, &reclen
, 4);
1183 if (test_bit(SK_DEAD
, &rqstp
->rq_sock
->sk_flags
))
1186 sent
= svc_sendto(rqstp
, &rqstp
->rq_res
);
1187 if (sent
!= xbufp
->len
) {
1188 printk(KERN_NOTICE
"rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1189 rqstp
->rq_sock
->sk_server
->sv_name
,
1190 (sent
<0)?"got error":"sent only",
1192 set_bit(SK_CLOSE
, &rqstp
->rq_sock
->sk_flags
);
1193 svc_sock_enqueue(rqstp
->rq_sock
);
1200 svc_tcp_init(struct svc_sock
*svsk
)
1202 struct sock
*sk
= svsk
->sk_sk
;
1203 struct tcp_sock
*tp
= tcp_sk(sk
);
1205 svsk
->sk_recvfrom
= svc_tcp_recvfrom
;
1206 svsk
->sk_sendto
= svc_tcp_sendto
;
1208 if (sk
->sk_state
== TCP_LISTEN
) {
1209 dprintk("setting up TCP socket for listening\n");
1210 sk
->sk_data_ready
= svc_tcp_listen_data_ready
;
1211 set_bit(SK_CONN
, &svsk
->sk_flags
);
1213 dprintk("setting up TCP socket for reading\n");
1214 sk
->sk_state_change
= svc_tcp_state_change
;
1215 sk
->sk_data_ready
= svc_tcp_data_ready
;
1216 sk
->sk_write_space
= svc_write_space
;
1218 svsk
->sk_reclen
= 0;
1219 svsk
->sk_tcplen
= 0;
1221 tp
->nonagle
= 1; /* disable Nagle's algorithm */
1223 /* initialise setting must have enough space to
1224 * receive and respond to one request.
1225 * svc_tcp_recvfrom will re-adjust if necessary
1227 svc_sock_setbufsize(svsk
->sk_sock
,
1228 3 * svsk
->sk_server
->sv_max_mesg
,
1229 3 * svsk
->sk_server
->sv_max_mesg
);
1231 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1232 set_bit(SK_DATA
, &svsk
->sk_flags
);
1233 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1234 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1239 svc_sock_update_bufs(struct svc_serv
*serv
)
1242 * The number of server threads has changed. Update
1243 * rcvbuf and sndbuf accordingly on all sockets
1245 struct list_head
*le
;
1247 spin_lock_bh(&serv
->sv_lock
);
1248 list_for_each(le
, &serv
->sv_permsocks
) {
1249 struct svc_sock
*svsk
=
1250 list_entry(le
, struct svc_sock
, sk_list
);
1251 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1253 list_for_each(le
, &serv
->sv_tempsocks
) {
1254 struct svc_sock
*svsk
=
1255 list_entry(le
, struct svc_sock
, sk_list
);
1256 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1258 spin_unlock_bh(&serv
->sv_lock
);
1262 * Receive the next request on any socket. This code is carefully
1263 * organised not to touch any cachelines in the shared svc_serv
1264 * structure, only cachelines in the local svc_pool.
1267 svc_recv(struct svc_rqst
*rqstp
, long timeout
)
1269 struct svc_sock
*svsk
=NULL
;
1270 struct svc_serv
*serv
= rqstp
->rq_server
;
1271 struct svc_pool
*pool
= rqstp
->rq_pool
;
1274 struct xdr_buf
*arg
;
1275 DECLARE_WAITQUEUE(wait
, current
);
1277 dprintk("svc: server %p waiting for data (to = %ld)\n",
1282 "svc_recv: service %p, socket not NULL!\n",
1284 if (waitqueue_active(&rqstp
->rq_wait
))
1286 "svc_recv: service %p, wait queue active!\n",
1290 /* now allocate needed pages. If we get a failure, sleep briefly */
1291 pages
= (serv
->sv_max_mesg
+ PAGE_SIZE
) / PAGE_SIZE
;
1292 for (i
=0; i
< pages
; i
++)
1293 while (rqstp
->rq_pages
[i
] == NULL
) {
1294 struct page
*p
= alloc_page(GFP_KERNEL
);
1296 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1297 rqstp
->rq_pages
[i
] = p
;
1299 rqstp
->rq_pages
[i
++] = NULL
; /* this might be seen in nfs_read_actor */
1300 BUG_ON(pages
>= RPCSVC_MAXPAGES
);
1302 /* Make arg->head point to first page and arg->pages point to rest */
1303 arg
= &rqstp
->rq_arg
;
1304 arg
->head
[0].iov_base
= page_address(rqstp
->rq_pages
[0]);
1305 arg
->head
[0].iov_len
= PAGE_SIZE
;
1306 arg
->pages
= rqstp
->rq_pages
+ 1;
1308 /* save at least one page for response */
1309 arg
->page_len
= (pages
-2)*PAGE_SIZE
;
1310 arg
->len
= (pages
-1)*PAGE_SIZE
;
1311 arg
->tail
[0].iov_len
= 0;
1318 spin_lock_bh(&pool
->sp_lock
);
1319 if ((svsk
= svc_sock_dequeue(pool
)) != NULL
) {
1320 rqstp
->rq_sock
= svsk
;
1321 atomic_inc(&svsk
->sk_inuse
);
1322 rqstp
->rq_reserved
= serv
->sv_max_mesg
;
1323 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
1325 /* No data pending. Go to sleep */
1326 svc_thread_enqueue(pool
, rqstp
);
1329 * We have to be able to interrupt this wait
1330 * to bring down the daemons ...
1332 set_current_state(TASK_INTERRUPTIBLE
);
1333 add_wait_queue(&rqstp
->rq_wait
, &wait
);
1334 spin_unlock_bh(&pool
->sp_lock
);
1336 schedule_timeout(timeout
);
1340 spin_lock_bh(&pool
->sp_lock
);
1341 remove_wait_queue(&rqstp
->rq_wait
, &wait
);
1343 if (!(svsk
= rqstp
->rq_sock
)) {
1344 svc_thread_dequeue(pool
, rqstp
);
1345 spin_unlock_bh(&pool
->sp_lock
);
1346 dprintk("svc: server %p, no data yet\n", rqstp
);
1347 return signalled()? -EINTR
: -EAGAIN
;
1350 spin_unlock_bh(&pool
->sp_lock
);
1352 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1353 rqstp
, pool
->sp_id
, svsk
, atomic_read(&svsk
->sk_inuse
));
1354 len
= svsk
->sk_recvfrom(rqstp
);
1355 dprintk("svc: got len=%d\n", len
);
1357 /* No data, incomplete (TCP) read, or accept() */
1358 if (len
== 0 || len
== -EAGAIN
) {
1359 rqstp
->rq_res
.len
= 0;
1360 svc_sock_release(rqstp
);
1363 svsk
->sk_lastrecv
= get_seconds();
1364 clear_bit(SK_OLD
, &svsk
->sk_flags
);
1366 rqstp
->rq_secure
= ntohs(rqstp
->rq_addr
.sin_port
) < 1024;
1367 rqstp
->rq_chandle
.defer
= svc_defer
;
1370 serv
->sv_stats
->netcnt
++;
1378 svc_drop(struct svc_rqst
*rqstp
)
1380 dprintk("svc: socket %p dropped request\n", rqstp
->rq_sock
);
1381 svc_sock_release(rqstp
);
1385 * Return reply to client.
1388 svc_send(struct svc_rqst
*rqstp
)
1390 struct svc_sock
*svsk
;
1394 if ((svsk
= rqstp
->rq_sock
) == NULL
) {
1395 printk(KERN_WARNING
"NULL socket pointer in %s:%d\n",
1396 __FILE__
, __LINE__
);
1400 /* release the receive skb before sending the reply */
1401 svc_release_skb(rqstp
);
1403 /* calculate over-all length */
1404 xb
= & rqstp
->rq_res
;
1405 xb
->len
= xb
->head
[0].iov_len
+
1407 xb
->tail
[0].iov_len
;
1409 /* Grab svsk->sk_mutex to serialize outgoing data. */
1410 mutex_lock(&svsk
->sk_mutex
);
1411 if (test_bit(SK_DEAD
, &svsk
->sk_flags
))
1414 len
= svsk
->sk_sendto(rqstp
);
1415 mutex_unlock(&svsk
->sk_mutex
);
1416 svc_sock_release(rqstp
);
1418 if (len
== -ECONNREFUSED
|| len
== -ENOTCONN
|| len
== -EAGAIN
)
1424 * Timer function to close old temporary sockets, using
1425 * a mark-and-sweep algorithm.
1428 svc_age_temp_sockets(unsigned long closure
)
1430 struct svc_serv
*serv
= (struct svc_serv
*)closure
;
1431 struct svc_sock
*svsk
;
1432 struct list_head
*le
, *next
;
1433 LIST_HEAD(to_be_aged
);
1435 dprintk("svc_age_temp_sockets\n");
1437 if (!spin_trylock_bh(&serv
->sv_lock
)) {
1438 /* busy, try again 1 sec later */
1439 dprintk("svc_age_temp_sockets: busy\n");
1440 mod_timer(&serv
->sv_temptimer
, jiffies
+ HZ
);
1444 list_for_each_safe(le
, next
, &serv
->sv_tempsocks
) {
1445 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1447 if (!test_and_set_bit(SK_OLD
, &svsk
->sk_flags
))
1449 if (atomic_read(&svsk
->sk_inuse
) || test_bit(SK_BUSY
, &svsk
->sk_flags
))
1451 atomic_inc(&svsk
->sk_inuse
);
1452 list_move(le
, &to_be_aged
);
1453 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1454 set_bit(SK_DETACHED
, &svsk
->sk_flags
);
1456 spin_unlock_bh(&serv
->sv_lock
);
1458 while (!list_empty(&to_be_aged
)) {
1459 le
= to_be_aged
.next
;
1460 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1462 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1464 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1465 svsk
, get_seconds() - svsk
->sk_lastrecv
);
1467 /* a thread will dequeue and close it soon */
1468 svc_sock_enqueue(svsk
);
1472 mod_timer(&serv
->sv_temptimer
, jiffies
+ svc_conn_age_period
* HZ
);
1476 * Initialize socket for RPC use and create svc_sock struct
1477 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1479 static struct svc_sock
*
1480 svc_setup_socket(struct svc_serv
*serv
, struct socket
*sock
,
1481 int *errp
, int pmap_register
)
1483 struct svc_sock
*svsk
;
1486 dprintk("svc: svc_setup_socket %p\n", sock
);
1487 if (!(svsk
= kzalloc(sizeof(*svsk
), GFP_KERNEL
))) {
1494 /* Register socket with portmapper */
1495 if (*errp
>= 0 && pmap_register
)
1496 *errp
= svc_register(serv
, inet
->sk_protocol
,
1497 ntohs(inet_sk(inet
)->sport
));
1504 set_bit(SK_BUSY
, &svsk
->sk_flags
);
1505 inet
->sk_user_data
= svsk
;
1506 svsk
->sk_sock
= sock
;
1508 svsk
->sk_ostate
= inet
->sk_state_change
;
1509 svsk
->sk_odata
= inet
->sk_data_ready
;
1510 svsk
->sk_owspace
= inet
->sk_write_space
;
1511 svsk
->sk_server
= serv
;
1512 atomic_set(&svsk
->sk_inuse
, 1);
1513 svsk
->sk_lastrecv
= get_seconds();
1514 spin_lock_init(&svsk
->sk_defer_lock
);
1515 INIT_LIST_HEAD(&svsk
->sk_deferred
);
1516 INIT_LIST_HEAD(&svsk
->sk_ready
);
1517 mutex_init(&svsk
->sk_mutex
);
1519 /* Initialize the socket */
1520 if (sock
->type
== SOCK_DGRAM
)
1525 spin_lock_bh(&serv
->sv_lock
);
1526 if (!pmap_register
) {
1527 set_bit(SK_TEMP
, &svsk
->sk_flags
);
1528 list_add(&svsk
->sk_list
, &serv
->sv_tempsocks
);
1530 if (serv
->sv_temptimer
.function
== NULL
) {
1531 /* setup timer to age temp sockets */
1532 setup_timer(&serv
->sv_temptimer
, svc_age_temp_sockets
,
1533 (unsigned long)serv
);
1534 mod_timer(&serv
->sv_temptimer
,
1535 jiffies
+ svc_conn_age_period
* HZ
);
1538 clear_bit(SK_TEMP
, &svsk
->sk_flags
);
1539 list_add(&svsk
->sk_list
, &serv
->sv_permsocks
);
1541 spin_unlock_bh(&serv
->sv_lock
);
1543 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1546 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
1547 svc_sock_enqueue(svsk
);
1551 int svc_addsock(struct svc_serv
*serv
,
1557 struct socket
*so
= sockfd_lookup(fd
, &err
);
1558 struct svc_sock
*svsk
= NULL
;
1562 if (so
->sk
->sk_family
!= AF_INET
)
1563 err
= -EAFNOSUPPORT
;
1564 else if (so
->sk
->sk_protocol
!= IPPROTO_TCP
&&
1565 so
->sk
->sk_protocol
!= IPPROTO_UDP
)
1566 err
= -EPROTONOSUPPORT
;
1567 else if (so
->state
> SS_UNCONNECTED
)
1570 svsk
= svc_setup_socket(serv
, so
, &err
, 1);
1578 if (proto
) *proto
= so
->sk
->sk_protocol
;
1579 return one_sock_name(name_return
, svsk
);
1581 EXPORT_SYMBOL_GPL(svc_addsock
);
1584 * Create socket for RPC service.
1587 svc_create_socket(struct svc_serv
*serv
, int protocol
, struct sockaddr_in
*sin
)
1589 struct svc_sock
*svsk
;
1590 struct socket
*sock
;
1594 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
1595 serv
->sv_program
->pg_name
, protocol
,
1596 NIPQUAD(sin
->sin_addr
.s_addr
),
1597 ntohs(sin
->sin_port
));
1599 if (protocol
!= IPPROTO_UDP
&& protocol
!= IPPROTO_TCP
) {
1600 printk(KERN_WARNING
"svc: only UDP and TCP "
1601 "sockets supported\n");
1604 type
= (protocol
== IPPROTO_UDP
)? SOCK_DGRAM
: SOCK_STREAM
;
1606 if ((error
= sock_create_kern(PF_INET
, type
, protocol
, &sock
)) < 0)
1609 svc_reclassify_socket(sock
);
1611 if (type
== SOCK_STREAM
)
1612 sock
->sk
->sk_reuse
= 1; /* allow address reuse */
1613 error
= kernel_bind(sock
, (struct sockaddr
*) sin
,
1618 if (protocol
== IPPROTO_TCP
) {
1619 if ((error
= kernel_listen(sock
, 64)) < 0)
1623 if ((svsk
= svc_setup_socket(serv
, sock
, &error
, 1)) != NULL
)
1627 dprintk("svc: svc_create_socket error = %d\n", -error
);
1633 * Remove a dead socket
1636 svc_delete_socket(struct svc_sock
*svsk
)
1638 struct svc_serv
*serv
;
1641 dprintk("svc: svc_delete_socket(%p)\n", svsk
);
1643 serv
= svsk
->sk_server
;
1646 sk
->sk_state_change
= svsk
->sk_ostate
;
1647 sk
->sk_data_ready
= svsk
->sk_odata
;
1648 sk
->sk_write_space
= svsk
->sk_owspace
;
1650 spin_lock_bh(&serv
->sv_lock
);
1652 if (!test_and_set_bit(SK_DETACHED
, &svsk
->sk_flags
))
1653 list_del_init(&svsk
->sk_list
);
1655 * We used to delete the svc_sock from whichever list
1656 * it's sk_ready node was on, but we don't actually
1657 * need to. This is because the only time we're called
1658 * while still attached to a queue, the queue itself
1659 * is about to be destroyed (in svc_destroy).
1661 if (!test_and_set_bit(SK_DEAD
, &svsk
->sk_flags
)) {
1662 BUG_ON(atomic_read(&svsk
->sk_inuse
)<2);
1663 atomic_dec(&svsk
->sk_inuse
);
1664 if (test_bit(SK_TEMP
, &svsk
->sk_flags
))
1668 spin_unlock_bh(&serv
->sv_lock
);
1671 void svc_close_socket(struct svc_sock
*svsk
)
1673 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1674 if (test_and_set_bit(SK_BUSY
, &svsk
->sk_flags
))
1675 /* someone else will have to effect the close */
1678 atomic_inc(&svsk
->sk_inuse
);
1679 svc_delete_socket(svsk
);
1680 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
1685 * Make a socket for nfsd and lockd
1688 svc_makesock(struct svc_serv
*serv
, int protocol
, unsigned short port
)
1690 struct sockaddr_in sin
;
1692 dprintk("svc: creating socket proto = %d\n", protocol
);
1693 sin
.sin_family
= AF_INET
;
1694 sin
.sin_addr
.s_addr
= INADDR_ANY
;
1695 sin
.sin_port
= htons(port
);
1696 return svc_create_socket(serv
, protocol
, &sin
);
1700 * Handle defer and revisit of requests
1703 static void svc_revisit(struct cache_deferred_req
*dreq
, int too_many
)
1705 struct svc_deferred_req
*dr
= container_of(dreq
, struct svc_deferred_req
, handle
);
1706 struct svc_sock
*svsk
;
1709 svc_sock_put(dr
->svsk
);
1713 dprintk("revisit queued\n");
1716 spin_lock_bh(&svsk
->sk_defer_lock
);
1717 list_add(&dr
->handle
.recent
, &svsk
->sk_deferred
);
1718 spin_unlock_bh(&svsk
->sk_defer_lock
);
1719 set_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1720 svc_sock_enqueue(svsk
);
1724 static struct cache_deferred_req
*
1725 svc_defer(struct cache_req
*req
)
1727 struct svc_rqst
*rqstp
= container_of(req
, struct svc_rqst
, rq_chandle
);
1728 int size
= sizeof(struct svc_deferred_req
) + (rqstp
->rq_arg
.len
);
1729 struct svc_deferred_req
*dr
;
1731 if (rqstp
->rq_arg
.page_len
)
1732 return NULL
; /* if more than a page, give up FIXME */
1733 if (rqstp
->rq_deferred
) {
1734 dr
= rqstp
->rq_deferred
;
1735 rqstp
->rq_deferred
= NULL
;
1737 int skip
= rqstp
->rq_arg
.len
- rqstp
->rq_arg
.head
[0].iov_len
;
1738 /* FIXME maybe discard if size too large */
1739 dr
= kmalloc(size
, GFP_KERNEL
);
1743 dr
->handle
.owner
= rqstp
->rq_server
;
1744 dr
->prot
= rqstp
->rq_prot
;
1745 dr
->addr
= rqstp
->rq_addr
;
1746 dr
->daddr
= rqstp
->rq_daddr
;
1747 dr
->argslen
= rqstp
->rq_arg
.len
>> 2;
1748 memcpy(dr
->args
, rqstp
->rq_arg
.head
[0].iov_base
-skip
, dr
->argslen
<<2);
1750 atomic_inc(&rqstp
->rq_sock
->sk_inuse
);
1751 dr
->svsk
= rqstp
->rq_sock
;
1753 dr
->handle
.revisit
= svc_revisit
;
1758 * recv data from a deferred request into an active one
1760 static int svc_deferred_recv(struct svc_rqst
*rqstp
)
1762 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
1764 rqstp
->rq_arg
.head
[0].iov_base
= dr
->args
;
1765 rqstp
->rq_arg
.head
[0].iov_len
= dr
->argslen
<<2;
1766 rqstp
->rq_arg
.page_len
= 0;
1767 rqstp
->rq_arg
.len
= dr
->argslen
<<2;
1768 rqstp
->rq_prot
= dr
->prot
;
1769 rqstp
->rq_addr
= dr
->addr
;
1770 rqstp
->rq_daddr
= dr
->daddr
;
1771 rqstp
->rq_respages
= rqstp
->rq_pages
;
1772 return dr
->argslen
<<2;
1776 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
)
1778 struct svc_deferred_req
*dr
= NULL
;
1780 if (!test_bit(SK_DEFERRED
, &svsk
->sk_flags
))
1782 spin_lock_bh(&svsk
->sk_defer_lock
);
1783 clear_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1784 if (!list_empty(&svsk
->sk_deferred
)) {
1785 dr
= list_entry(svsk
->sk_deferred
.next
,
1786 struct svc_deferred_req
,
1788 list_del_init(&dr
->handle
.recent
);
1789 set_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1791 spin_unlock_bh(&svsk
->sk_defer_lock
);