2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/net.h>
27 #include <linux/inet.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/unistd.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/file.h>
35 #include <linux/freezer.h>
37 #include <net/checksum.h>
40 #include <net/tcp_states.h>
41 #include <asm/uaccess.h>
42 #include <asm/ioctls.h>
44 #include <linux/sunrpc/types.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/sunrpc/xdr.h>
47 #include <linux/sunrpc/svcsock.h>
48 #include <linux/sunrpc/stats.h>
50 /* SMP locking strategy:
52 * svc_pool->sp_lock protects most of the fields of that pool.
53 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
54 * when both need to be taken (rare), svc_serv->sv_lock is first.
55 * BKL protects svc_serv->sv_nrthread.
56 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
57 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
59 * Some flags can be set to certain values at any time
60 * providing that certain rules are followed:
62 * SK_CONN, SK_DATA, can be set or cleared at any time.
63 * after a set, svc_sock_enqueue must be called.
64 * after a clear, the socket must be read/accepted
65 * if this succeeds, it must be set again.
66 * SK_CLOSE can set at any time. It is never cleared.
67 * sk_inuse contains a bias of '1' until SK_DEAD is set.
68 * so when sk_inuse hits zero, we know the socket is dead
69 * and no-one is using it.
70 * SK_DEAD can only be set while SK_BUSY is held which ensures
71 * no other thread will be using the socket or will try to
76 #define RPCDBG_FACILITY RPCDBG_SVCSOCK
79 static struct svc_sock
*svc_setup_socket(struct svc_serv
*, struct socket
*,
80 int *errp
, int flags
);
81 static void svc_delete_socket(struct svc_sock
*svsk
);
82 static void svc_udp_data_ready(struct sock
*, int);
83 static int svc_udp_recvfrom(struct svc_rqst
*);
84 static int svc_udp_sendto(struct svc_rqst
*);
85 static void svc_close_socket(struct svc_sock
*svsk
);
87 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
);
88 static int svc_deferred_recv(struct svc_rqst
*rqstp
);
89 static struct cache_deferred_req
*svc_defer(struct cache_req
*req
);
91 /* apparently the "standard" is that clients close
92 * idle connections after 5 minutes, servers after
94 * http://www.connectathon.org/talks96/nfstcp.pdf
96 static int svc_conn_age_period
= 6*60;
98 #ifdef CONFIG_DEBUG_LOCK_ALLOC
99 static struct lock_class_key svc_key
[2];
100 static struct lock_class_key svc_slock_key
[2];
102 static inline void svc_reclassify_socket(struct socket
*sock
)
104 struct sock
*sk
= sock
->sk
;
105 BUG_ON(sk
->sk_lock
.owner
!= NULL
);
106 switch (sk
->sk_family
) {
108 sock_lock_init_class_and_name(sk
, "slock-AF_INET-NFSD",
109 &svc_slock_key
[0], "sk_lock-AF_INET-NFSD", &svc_key
[0]);
113 sock_lock_init_class_and_name(sk
, "slock-AF_INET6-NFSD",
114 &svc_slock_key
[1], "sk_lock-AF_INET6-NFSD", &svc_key
[1]);
122 static inline void svc_reclassify_socket(struct socket
*sock
)
127 static char *__svc_print_addr(struct sockaddr
*addr
, char *buf
, size_t len
)
129 switch (addr
->sa_family
) {
131 snprintf(buf
, len
, "%u.%u.%u.%u, port=%u",
132 NIPQUAD(((struct sockaddr_in
*) addr
)->sin_addr
),
133 htons(((struct sockaddr_in
*) addr
)->sin_port
));
137 snprintf(buf
, len
, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
138 NIP6(((struct sockaddr_in6
*) addr
)->sin6_addr
),
139 htons(((struct sockaddr_in6
*) addr
)->sin6_port
));
143 snprintf(buf
, len
, "unknown address type: %d", addr
->sa_family
);
150 * svc_print_addr - Format rq_addr field for printing
151 * @rqstp: svc_rqst struct containing address to print
152 * @buf: target buffer for formatted address
153 * @len: length of target buffer
156 char *svc_print_addr(struct svc_rqst
*rqstp
, char *buf
, size_t len
)
158 return __svc_print_addr(svc_addr(rqstp
), buf
, len
);
160 EXPORT_SYMBOL_GPL(svc_print_addr
);
163 * Queue up an idle server thread. Must have pool->sp_lock held.
164 * Note: this is really a stack rather than a queue, so that we only
165 * use as many different threads as we need, and the rest don't pollute
169 svc_thread_enqueue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
171 list_add(&rqstp
->rq_list
, &pool
->sp_threads
);
175 * Dequeue an nfsd thread. Must have pool->sp_lock held.
178 svc_thread_dequeue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
180 list_del(&rqstp
->rq_list
);
184 * Release an skbuff after use
187 svc_release_skb(struct svc_rqst
*rqstp
)
189 struct sk_buff
*skb
= rqstp
->rq_skbuff
;
190 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
193 rqstp
->rq_skbuff
= NULL
;
195 dprintk("svc: service %p, releasing skb %p\n", rqstp
, skb
);
196 skb_free_datagram(rqstp
->rq_sock
->sk_sk
, skb
);
199 rqstp
->rq_deferred
= NULL
;
205 * Any space to write?
207 static inline unsigned long
208 svc_sock_wspace(struct svc_sock
*svsk
)
212 if (svsk
->sk_sock
->type
== SOCK_STREAM
)
213 wspace
= sk_stream_wspace(svsk
->sk_sk
);
215 wspace
= sock_wspace(svsk
->sk_sk
);
221 * Queue up a socket with data pending. If there are idle nfsd
222 * processes, wake 'em up.
226 svc_sock_enqueue(struct svc_sock
*svsk
)
228 struct svc_serv
*serv
= svsk
->sk_server
;
229 struct svc_pool
*pool
;
230 struct svc_rqst
*rqstp
;
233 if (!(svsk
->sk_flags
&
234 ( (1<<SK_CONN
)|(1<<SK_DATA
)|(1<<SK_CLOSE
)|(1<<SK_DEFERRED
)) ))
236 if (test_bit(SK_DEAD
, &svsk
->sk_flags
))
240 pool
= svc_pool_for_cpu(svsk
->sk_server
, cpu
);
243 spin_lock_bh(&pool
->sp_lock
);
245 if (!list_empty(&pool
->sp_threads
) &&
246 !list_empty(&pool
->sp_sockets
))
248 "svc_sock_enqueue: threads and sockets both waiting??\n");
250 if (test_bit(SK_DEAD
, &svsk
->sk_flags
)) {
251 /* Don't enqueue dead sockets */
252 dprintk("svc: socket %p is dead, not enqueued\n", svsk
->sk_sk
);
256 /* Mark socket as busy. It will remain in this state until the
257 * server has processed all pending data and put the socket back
258 * on the idle list. We update SK_BUSY atomically because
259 * it also guards against trying to enqueue the svc_sock twice.
261 if (test_and_set_bit(SK_BUSY
, &svsk
->sk_flags
)) {
262 /* Don't enqueue socket while already enqueued */
263 dprintk("svc: socket %p busy, not enqueued\n", svsk
->sk_sk
);
266 BUG_ON(svsk
->sk_pool
!= NULL
);
267 svsk
->sk_pool
= pool
;
269 set_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
270 if (((atomic_read(&svsk
->sk_reserved
) + serv
->sv_max_mesg
)*2
271 > svc_sock_wspace(svsk
))
272 && !test_bit(SK_CLOSE
, &svsk
->sk_flags
)
273 && !test_bit(SK_CONN
, &svsk
->sk_flags
)) {
274 /* Don't enqueue while not enough space for reply */
275 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
276 svsk
->sk_sk
, atomic_read(&svsk
->sk_reserved
)+serv
->sv_max_mesg
,
277 svc_sock_wspace(svsk
));
278 svsk
->sk_pool
= NULL
;
279 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
282 clear_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
285 if (!list_empty(&pool
->sp_threads
)) {
286 rqstp
= list_entry(pool
->sp_threads
.next
,
289 dprintk("svc: socket %p served by daemon %p\n",
291 svc_thread_dequeue(pool
, rqstp
);
294 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
295 rqstp
, rqstp
->rq_sock
);
296 rqstp
->rq_sock
= svsk
;
297 atomic_inc(&svsk
->sk_inuse
);
298 rqstp
->rq_reserved
= serv
->sv_max_mesg
;
299 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
300 BUG_ON(svsk
->sk_pool
!= pool
);
301 wake_up(&rqstp
->rq_wait
);
303 dprintk("svc: socket %p put into queue\n", svsk
->sk_sk
);
304 list_add_tail(&svsk
->sk_ready
, &pool
->sp_sockets
);
305 BUG_ON(svsk
->sk_pool
!= pool
);
309 spin_unlock_bh(&pool
->sp_lock
);
313 * Dequeue the first socket. Must be called with the pool->sp_lock held.
315 static inline struct svc_sock
*
316 svc_sock_dequeue(struct svc_pool
*pool
)
318 struct svc_sock
*svsk
;
320 if (list_empty(&pool
->sp_sockets
))
323 svsk
= list_entry(pool
->sp_sockets
.next
,
324 struct svc_sock
, sk_ready
);
325 list_del_init(&svsk
->sk_ready
);
327 dprintk("svc: socket %p dequeued, inuse=%d\n",
328 svsk
->sk_sk
, atomic_read(&svsk
->sk_inuse
));
334 * Having read something from a socket, check whether it
335 * needs to be re-enqueued.
336 * Note: SK_DATA only gets cleared when a read-attempt finds
337 * no (or insufficient) data.
340 svc_sock_received(struct svc_sock
*svsk
)
342 svsk
->sk_pool
= NULL
;
343 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
344 svc_sock_enqueue(svsk
);
349 * svc_reserve - change the space reserved for the reply to a request.
350 * @rqstp: The request in question
351 * @space: new max space to reserve
353 * Each request reserves some space on the output queue of the socket
354 * to make sure the reply fits. This function reduces that reserved
355 * space to be the amount of space used already, plus @space.
358 void svc_reserve(struct svc_rqst
*rqstp
, int space
)
360 space
+= rqstp
->rq_res
.head
[0].iov_len
;
362 if (space
< rqstp
->rq_reserved
) {
363 struct svc_sock
*svsk
= rqstp
->rq_sock
;
364 atomic_sub((rqstp
->rq_reserved
- space
), &svsk
->sk_reserved
);
365 rqstp
->rq_reserved
= space
;
367 svc_sock_enqueue(svsk
);
372 * Release a socket after use.
375 svc_sock_put(struct svc_sock
*svsk
)
377 if (atomic_dec_and_test(&svsk
->sk_inuse
)) {
378 BUG_ON(! test_bit(SK_DEAD
, &svsk
->sk_flags
));
380 dprintk("svc: releasing dead socket\n");
381 if (svsk
->sk_sock
->file
)
382 sockfd_put(svsk
->sk_sock
);
384 sock_release(svsk
->sk_sock
);
385 if (svsk
->sk_info_authunix
!= NULL
)
386 svcauth_unix_info_release(svsk
->sk_info_authunix
);
392 svc_sock_release(struct svc_rqst
*rqstp
)
394 struct svc_sock
*svsk
= rqstp
->rq_sock
;
396 svc_release_skb(rqstp
);
398 svc_free_res_pages(rqstp
);
399 rqstp
->rq_res
.page_len
= 0;
400 rqstp
->rq_res
.page_base
= 0;
403 /* Reset response buffer and release
405 * But first, check that enough space was reserved
406 * for the reply, otherwise we have a bug!
408 if ((rqstp
->rq_res
.len
) > rqstp
->rq_reserved
)
409 printk(KERN_ERR
"RPC request reserved %d but used %d\n",
413 rqstp
->rq_res
.head
[0].iov_len
= 0;
414 svc_reserve(rqstp
, 0);
415 rqstp
->rq_sock
= NULL
;
421 * External function to wake up a server waiting for data
422 * This really only makes sense for services like lockd
423 * which have exactly one thread anyway.
426 svc_wake_up(struct svc_serv
*serv
)
428 struct svc_rqst
*rqstp
;
430 struct svc_pool
*pool
;
432 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
433 pool
= &serv
->sv_pools
[i
];
435 spin_lock_bh(&pool
->sp_lock
);
436 if (!list_empty(&pool
->sp_threads
)) {
437 rqstp
= list_entry(pool
->sp_threads
.next
,
440 dprintk("svc: daemon %p woken up.\n", rqstp
);
442 svc_thread_dequeue(pool, rqstp);
443 rqstp->rq_sock = NULL;
445 wake_up(&rqstp
->rq_wait
);
447 spin_unlock_bh(&pool
->sp_lock
);
451 union svc_pktinfo_u
{
452 struct in_pktinfo pkti
;
453 struct in6_pktinfo pkti6
;
456 static void svc_set_cmsg_data(struct svc_rqst
*rqstp
, struct cmsghdr
*cmh
)
458 switch (rqstp
->rq_sock
->sk_sk
->sk_family
) {
460 struct in_pktinfo
*pki
= CMSG_DATA(cmh
);
462 cmh
->cmsg_level
= SOL_IP
;
463 cmh
->cmsg_type
= IP_PKTINFO
;
464 pki
->ipi_ifindex
= 0;
465 pki
->ipi_spec_dst
.s_addr
= rqstp
->rq_daddr
.addr
.s_addr
;
466 cmh
->cmsg_len
= CMSG_LEN(sizeof(*pki
));
471 struct in6_pktinfo
*pki
= CMSG_DATA(cmh
);
473 cmh
->cmsg_level
= SOL_IPV6
;
474 cmh
->cmsg_type
= IPV6_PKTINFO
;
475 pki
->ipi6_ifindex
= 0;
476 ipv6_addr_copy(&pki
->ipi6_addr
,
477 &rqstp
->rq_daddr
.addr6
);
478 cmh
->cmsg_len
= CMSG_LEN(sizeof(*pki
));
486 * Generic sendto routine
489 svc_sendto(struct svc_rqst
*rqstp
, struct xdr_buf
*xdr
)
491 struct svc_sock
*svsk
= rqstp
->rq_sock
;
492 struct socket
*sock
= svsk
->sk_sock
;
494 char buffer
[CMSG_SPACE(sizeof(union svc_pktinfo_u
))];
495 struct cmsghdr
*cmh
= (struct cmsghdr
*)buffer
;
499 struct page
**ppage
= xdr
->pages
;
500 size_t base
= xdr
->page_base
;
501 unsigned int pglen
= xdr
->page_len
;
502 unsigned int flags
= MSG_MORE
;
503 char buf
[RPC_MAX_ADDRBUFLEN
];
507 if (rqstp
->rq_prot
== IPPROTO_UDP
) {
508 struct msghdr msg
= {
509 .msg_name
= &rqstp
->rq_addr
,
510 .msg_namelen
= rqstp
->rq_addrlen
,
512 .msg_controllen
= sizeof(buffer
),
513 .msg_flags
= MSG_MORE
,
516 svc_set_cmsg_data(rqstp
, cmh
);
518 if (sock_sendmsg(sock
, &msg
, 0) < 0)
523 if (slen
== xdr
->head
[0].iov_len
)
525 len
= kernel_sendpage(sock
, rqstp
->rq_respages
[0], 0,
526 xdr
->head
[0].iov_len
, flags
);
527 if (len
!= xdr
->head
[0].iov_len
)
529 slen
-= xdr
->head
[0].iov_len
;
534 size
= PAGE_SIZE
- base
< pglen
? PAGE_SIZE
- base
: pglen
;
538 result
= kernel_sendpage(sock
, *ppage
, base
, size
, flags
);
545 size
= PAGE_SIZE
< pglen
? PAGE_SIZE
: pglen
;
550 if (xdr
->tail
[0].iov_len
) {
551 result
= kernel_sendpage(sock
, rqstp
->rq_respages
[0],
552 ((unsigned long)xdr
->tail
[0].iov_base
)
554 xdr
->tail
[0].iov_len
, 0);
560 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
561 rqstp
->rq_sock
, xdr
->head
[0].iov_base
, xdr
->head
[0].iov_len
,
562 xdr
->len
, len
, svc_print_addr(rqstp
, buf
, sizeof(buf
)));
568 * Report socket names for nfsdfs
570 static int one_sock_name(char *buf
, struct svc_sock
*svsk
)
574 switch(svsk
->sk_sk
->sk_family
) {
576 len
= sprintf(buf
, "ipv4 %s %u.%u.%u.%u %d\n",
577 svsk
->sk_sk
->sk_protocol
==IPPROTO_UDP
?
579 NIPQUAD(inet_sk(svsk
->sk_sk
)->rcv_saddr
),
580 inet_sk(svsk
->sk_sk
)->num
);
583 len
= sprintf(buf
, "*unknown-%d*\n",
584 svsk
->sk_sk
->sk_family
);
590 svc_sock_names(char *buf
, struct svc_serv
*serv
, char *toclose
)
592 struct svc_sock
*svsk
, *closesk
= NULL
;
597 spin_lock_bh(&serv
->sv_lock
);
598 list_for_each_entry(svsk
, &serv
->sv_permsocks
, sk_list
) {
599 int onelen
= one_sock_name(buf
+len
, svsk
);
600 if (toclose
&& strcmp(toclose
, buf
+len
) == 0)
605 spin_unlock_bh(&serv
->sv_lock
);
607 /* Should unregister with portmap, but you cannot
608 * unregister just one protocol...
610 svc_close_socket(closesk
);
615 EXPORT_SYMBOL(svc_sock_names
);
618 * Check input queue length
621 svc_recv_available(struct svc_sock
*svsk
)
623 struct socket
*sock
= svsk
->sk_sock
;
626 err
= kernel_sock_ioctl(sock
, TIOCINQ
, (unsigned long) &avail
);
628 return (err
>= 0)? avail
: err
;
632 * Generic recvfrom routine.
635 svc_recvfrom(struct svc_rqst
*rqstp
, struct kvec
*iov
, int nr
, int buflen
)
637 struct svc_sock
*svsk
= rqstp
->rq_sock
;
638 struct msghdr msg
= {
639 .msg_flags
= MSG_DONTWAIT
,
643 len
= kernel_recvmsg(svsk
->sk_sock
, &msg
, iov
, nr
, buflen
,
646 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
648 memcpy(&rqstp
->rq_addr
, &svsk
->sk_remote
, svsk
->sk_remotelen
);
649 rqstp
->rq_addrlen
= svsk
->sk_remotelen
;
651 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
652 svsk
, iov
[0].iov_base
, iov
[0].iov_len
, len
);
658 * Set socket snd and rcv buffer lengths
661 svc_sock_setbufsize(struct socket
*sock
, unsigned int snd
, unsigned int rcv
)
665 oldfs
= get_fs(); set_fs(KERNEL_DS
);
666 sock_setsockopt(sock
, SOL_SOCKET
, SO_SNDBUF
,
667 (char*)&snd
, sizeof(snd
));
668 sock_setsockopt(sock
, SOL_SOCKET
, SO_RCVBUF
,
669 (char*)&rcv
, sizeof(rcv
));
671 /* sock_setsockopt limits use to sysctl_?mem_max,
672 * which isn't acceptable. Until that is made conditional
673 * on not having CAP_SYS_RESOURCE or similar, we go direct...
674 * DaveM said I could!
677 sock
->sk
->sk_sndbuf
= snd
* 2;
678 sock
->sk
->sk_rcvbuf
= rcv
* 2;
679 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
|SOCK_RCVBUF_LOCK
;
680 release_sock(sock
->sk
);
684 * INET callback when data has been received on the socket.
687 svc_udp_data_ready(struct sock
*sk
, int count
)
689 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
692 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
693 svsk
, sk
, count
, test_bit(SK_BUSY
, &svsk
->sk_flags
));
694 set_bit(SK_DATA
, &svsk
->sk_flags
);
695 svc_sock_enqueue(svsk
);
697 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
698 wake_up_interruptible(sk
->sk_sleep
);
702 * INET callback when space is newly available on the socket.
705 svc_write_space(struct sock
*sk
)
707 struct svc_sock
*svsk
= (struct svc_sock
*)(sk
->sk_user_data
);
710 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
711 svsk
, sk
, test_bit(SK_BUSY
, &svsk
->sk_flags
));
712 svc_sock_enqueue(svsk
);
715 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
)) {
716 dprintk("RPC svc_write_space: someone sleeping on %p\n",
718 wake_up_interruptible(sk
->sk_sleep
);
722 static inline void svc_udp_get_dest_address(struct svc_rqst
*rqstp
,
725 switch (rqstp
->rq_sock
->sk_sk
->sk_family
) {
727 struct in_pktinfo
*pki
= CMSG_DATA(cmh
);
728 rqstp
->rq_daddr
.addr
.s_addr
= pki
->ipi_spec_dst
.s_addr
;
732 struct in6_pktinfo
*pki
= CMSG_DATA(cmh
);
733 ipv6_addr_copy(&rqstp
->rq_daddr
.addr6
, &pki
->ipi6_addr
);
740 * Receive a datagram from a UDP socket.
743 svc_udp_recvfrom(struct svc_rqst
*rqstp
)
745 struct svc_sock
*svsk
= rqstp
->rq_sock
;
746 struct svc_serv
*serv
= svsk
->sk_server
;
748 char buffer
[CMSG_SPACE(sizeof(union svc_pktinfo_u
))];
749 struct cmsghdr
*cmh
= (struct cmsghdr
*)buffer
;
751 struct msghdr msg
= {
752 .msg_name
= svc_addr(rqstp
),
754 .msg_controllen
= sizeof(buffer
),
755 .msg_flags
= MSG_DONTWAIT
,
758 if (test_and_clear_bit(SK_CHNGBUF
, &svsk
->sk_flags
))
759 /* udp sockets need large rcvbuf as all pending
760 * requests are still in that buffer. sndbuf must
761 * also be large enough that there is enough space
762 * for one reply per thread. We count all threads
763 * rather than threads in a particular pool, which
764 * provides an upper bound on the number of threads
765 * which will access the socket.
767 svc_sock_setbufsize(svsk
->sk_sock
,
768 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
,
769 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
);
771 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
772 svc_sock_received(svsk
);
773 return svc_deferred_recv(rqstp
);
776 if (test_bit(SK_CLOSE
, &svsk
->sk_flags
)) {
777 svc_delete_socket(svsk
);
781 clear_bit(SK_DATA
, &svsk
->sk_flags
);
782 while ((err
== kernel_recvmsg(svsk
->sk_sock
, &msg
, NULL
,
783 0, 0, MSG_PEEK
| MSG_DONTWAIT
)) < 0 ||
784 (skb
= skb_recv_datagram(svsk
->sk_sk
, 0, 1, &err
)) == NULL
) {
785 if (err
== -EAGAIN
) {
786 svc_sock_received(svsk
);
789 /* possibly an icmp error */
790 dprintk("svc: recvfrom returned error %d\n", -err
);
792 rqstp
->rq_addrlen
= sizeof(rqstp
->rq_addr
);
793 if (skb
->tstamp
.off_sec
== 0) {
796 tv
.tv_sec
= xtime
.tv_sec
;
797 tv
.tv_usec
= xtime
.tv_nsec
/ NSEC_PER_USEC
;
798 skb_set_timestamp(skb
, &tv
);
799 /* Don't enable netstamp, sunrpc doesn't
800 need that much accuracy */
802 skb_get_timestamp(skb
, &svsk
->sk_sk
->sk_stamp
);
803 set_bit(SK_DATA
, &svsk
->sk_flags
); /* there may be more data... */
806 * Maybe more packets - kick another thread ASAP.
808 svc_sock_received(svsk
);
810 len
= skb
->len
- sizeof(struct udphdr
);
811 rqstp
->rq_arg
.len
= len
;
813 rqstp
->rq_prot
= IPPROTO_UDP
;
815 if (cmh
->cmsg_level
!= IPPROTO_IP
||
816 cmh
->cmsg_type
!= IP_PKTINFO
) {
818 printk("rpcsvc: received unknown control message:"
820 cmh
->cmsg_level
, cmh
->cmsg_type
);
821 skb_free_datagram(svsk
->sk_sk
, skb
);
824 svc_udp_get_dest_address(rqstp
, cmh
);
826 if (skb_is_nonlinear(skb
)) {
827 /* we have to copy */
829 if (csum_partial_copy_to_xdr(&rqstp
->rq_arg
, skb
)) {
832 skb_free_datagram(svsk
->sk_sk
, skb
);
836 skb_free_datagram(svsk
->sk_sk
, skb
);
838 /* we can use it in-place */
839 rqstp
->rq_arg
.head
[0].iov_base
= skb
->data
+ sizeof(struct udphdr
);
840 rqstp
->rq_arg
.head
[0].iov_len
= len
;
841 if (skb_checksum_complete(skb
)) {
842 skb_free_datagram(svsk
->sk_sk
, skb
);
845 rqstp
->rq_skbuff
= skb
;
848 rqstp
->rq_arg
.page_base
= 0;
849 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
850 rqstp
->rq_arg
.head
[0].iov_len
= len
;
851 rqstp
->rq_arg
.page_len
= 0;
852 rqstp
->rq_respages
= rqstp
->rq_pages
+1;
854 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
855 rqstp
->rq_respages
= rqstp
->rq_pages
+ 1 +
856 (rqstp
->rq_arg
.page_len
+ PAGE_SIZE
- 1)/ PAGE_SIZE
;
860 serv
->sv_stats
->netudpcnt
++;
866 svc_udp_sendto(struct svc_rqst
*rqstp
)
870 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
871 if (error
== -ECONNREFUSED
)
872 /* ICMP error on earlier request. */
873 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
879 svc_udp_init(struct svc_sock
*svsk
)
884 svsk
->sk_sk
->sk_data_ready
= svc_udp_data_ready
;
885 svsk
->sk_sk
->sk_write_space
= svc_write_space
;
886 svsk
->sk_recvfrom
= svc_udp_recvfrom
;
887 svsk
->sk_sendto
= svc_udp_sendto
;
889 /* initialise setting must have enough space to
890 * receive and respond to one request.
891 * svc_udp_recvfrom will re-adjust if necessary
893 svc_sock_setbufsize(svsk
->sk_sock
,
894 3 * svsk
->sk_server
->sv_max_mesg
,
895 3 * svsk
->sk_server
->sv_max_mesg
);
897 set_bit(SK_DATA
, &svsk
->sk_flags
); /* might have come in before data_ready set up */
898 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
902 /* make sure we get destination address info */
903 svsk
->sk_sock
->ops
->setsockopt(svsk
->sk_sock
, IPPROTO_IP
, IP_PKTINFO
,
904 (char __user
*)&one
, sizeof(one
));
909 * A data_ready event on a listening socket means there's a connection
910 * pending. Do not use state_change as a substitute for it.
913 svc_tcp_listen_data_ready(struct sock
*sk
, int count_unused
)
915 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
917 dprintk("svc: socket %p TCP (listen) state change %d\n",
921 * This callback may called twice when a new connection
922 * is established as a child socket inherits everything
923 * from a parent LISTEN socket.
924 * 1) data_ready method of the parent socket will be called
925 * when one of child sockets become ESTABLISHED.
926 * 2) data_ready method of the child socket may be called
927 * when it receives data before the socket is accepted.
928 * In case of 2, we should ignore it silently.
930 if (sk
->sk_state
== TCP_LISTEN
) {
932 set_bit(SK_CONN
, &svsk
->sk_flags
);
933 svc_sock_enqueue(svsk
);
935 printk("svc: socket %p: no user data\n", sk
);
938 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
939 wake_up_interruptible_all(sk
->sk_sleep
);
943 * A state change on a connected socket means it's dying or dead.
946 svc_tcp_state_change(struct sock
*sk
)
948 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
950 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
951 sk
, sk
->sk_state
, sk
->sk_user_data
);
954 printk("svc: socket %p: no user data\n", sk
);
956 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
957 svc_sock_enqueue(svsk
);
959 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
960 wake_up_interruptible_all(sk
->sk_sleep
);
964 svc_tcp_data_ready(struct sock
*sk
, int count
)
966 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
968 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
969 sk
, sk
->sk_user_data
);
971 set_bit(SK_DATA
, &svsk
->sk_flags
);
972 svc_sock_enqueue(svsk
);
974 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
975 wake_up_interruptible(sk
->sk_sleep
);
978 static inline int svc_port_is_privileged(struct sockaddr
*sin
)
980 switch (sin
->sa_family
) {
982 return ntohs(((struct sockaddr_in
*)sin
)->sin_port
)
985 return ntohs(((struct sockaddr_in6
*)sin
)->sin6_port
)
993 * Accept a TCP connection
996 svc_tcp_accept(struct svc_sock
*svsk
)
998 struct sockaddr_storage addr
;
999 struct sockaddr
*sin
= (struct sockaddr
*) &addr
;
1000 struct svc_serv
*serv
= svsk
->sk_server
;
1001 struct socket
*sock
= svsk
->sk_sock
;
1002 struct socket
*newsock
;
1003 struct svc_sock
*newsvsk
;
1005 char buf
[RPC_MAX_ADDRBUFLEN
];
1007 dprintk("svc: tcp_accept %p sock %p\n", svsk
, sock
);
1011 clear_bit(SK_CONN
, &svsk
->sk_flags
);
1012 err
= kernel_accept(sock
, &newsock
, O_NONBLOCK
);
1015 printk(KERN_WARNING
"%s: no more sockets!\n",
1017 else if (err
!= -EAGAIN
&& net_ratelimit())
1018 printk(KERN_WARNING
"%s: accept failed (err %d)!\n",
1019 serv
->sv_name
, -err
);
1023 set_bit(SK_CONN
, &svsk
->sk_flags
);
1024 svc_sock_enqueue(svsk
);
1026 err
= kernel_getpeername(newsock
, sin
, &slen
);
1028 if (net_ratelimit())
1029 printk(KERN_WARNING
"%s: peername failed (err %d)!\n",
1030 serv
->sv_name
, -err
);
1031 goto failed
; /* aborted connection or whatever */
1034 /* Ideally, we would want to reject connections from unauthorized
1035 * hosts here, but when we get encryption, the IP of the host won't
1036 * tell us anything. For now just warn about unpriv connections.
1038 if (!svc_port_is_privileged(sin
)) {
1039 dprintk(KERN_WARNING
1040 "%s: connect from unprivileged port: %s\n",
1042 __svc_print_addr(sin
, buf
, sizeof(buf
)));
1044 dprintk("%s: connect from %s\n", serv
->sv_name
,
1045 __svc_print_addr(sin
, buf
, sizeof(buf
)));
1047 /* make sure that a write doesn't block forever when
1050 newsock
->sk
->sk_sndtimeo
= HZ
*30;
1052 if (!(newsvsk
= svc_setup_socket(serv
, newsock
, &err
,
1053 (SVC_SOCK_ANONYMOUS
| SVC_SOCK_TEMPORARY
))))
1055 memcpy(&newsvsk
->sk_remote
, sin
, slen
);
1056 newsvsk
->sk_remotelen
= slen
;
1058 svc_sock_received(newsvsk
);
1060 /* make sure that we don't have too many active connections.
1061 * If we have, something must be dropped.
1063 * There's no point in trying to do random drop here for
1064 * DoS prevention. The NFS clients does 1 reconnect in 15
1065 * seconds. An attacker can easily beat that.
1067 * The only somewhat efficient mechanism would be if drop
1068 * old connections from the same IP first. But right now
1069 * we don't even record the client IP in svc_sock.
1071 if (serv
->sv_tmpcnt
> (serv
->sv_nrthreads
+3)*20) {
1072 struct svc_sock
*svsk
= NULL
;
1073 spin_lock_bh(&serv
->sv_lock
);
1074 if (!list_empty(&serv
->sv_tempsocks
)) {
1075 if (net_ratelimit()) {
1076 /* Try to help the admin */
1077 printk(KERN_NOTICE
"%s: too many open TCP "
1078 "sockets, consider increasing the "
1079 "number of nfsd threads\n",
1082 "%s: last TCP connect from %s\n",
1083 serv
->sv_name
, buf
);
1086 * Always select the oldest socket. It's not fair,
1089 svsk
= list_entry(serv
->sv_tempsocks
.prev
,
1092 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1093 atomic_inc(&svsk
->sk_inuse
);
1095 spin_unlock_bh(&serv
->sv_lock
);
1098 svc_sock_enqueue(svsk
);
1105 serv
->sv_stats
->nettcpconn
++;
1110 sock_release(newsock
);
1115 * Receive data from a TCP socket.
1118 svc_tcp_recvfrom(struct svc_rqst
*rqstp
)
1120 struct svc_sock
*svsk
= rqstp
->rq_sock
;
1121 struct svc_serv
*serv
= svsk
->sk_server
;
1126 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1127 svsk
, test_bit(SK_DATA
, &svsk
->sk_flags
),
1128 test_bit(SK_CONN
, &svsk
->sk_flags
),
1129 test_bit(SK_CLOSE
, &svsk
->sk_flags
));
1131 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
1132 svc_sock_received(svsk
);
1133 return svc_deferred_recv(rqstp
);
1136 if (test_bit(SK_CLOSE
, &svsk
->sk_flags
)) {
1137 svc_delete_socket(svsk
);
1141 if (svsk
->sk_sk
->sk_state
== TCP_LISTEN
) {
1142 svc_tcp_accept(svsk
);
1143 svc_sock_received(svsk
);
1147 if (test_and_clear_bit(SK_CHNGBUF
, &svsk
->sk_flags
))
1148 /* sndbuf needs to have room for one request
1149 * per thread, otherwise we can stall even when the
1150 * network isn't a bottleneck.
1152 * We count all threads rather than threads in a
1153 * particular pool, which provides an upper bound
1154 * on the number of threads which will access the socket.
1156 * rcvbuf just needs to be able to hold a few requests.
1157 * Normally they will be removed from the queue
1158 * as soon a a complete request arrives.
1160 svc_sock_setbufsize(svsk
->sk_sock
,
1161 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
,
1162 3 * serv
->sv_max_mesg
);
1164 clear_bit(SK_DATA
, &svsk
->sk_flags
);
1166 /* Receive data. If we haven't got the record length yet, get
1167 * the next four bytes. Otherwise try to gobble up as much as
1168 * possible up to the complete record length.
1170 if (svsk
->sk_tcplen
< 4) {
1171 unsigned long want
= 4 - svsk
->sk_tcplen
;
1174 iov
.iov_base
= ((char *) &svsk
->sk_reclen
) + svsk
->sk_tcplen
;
1176 if ((len
= svc_recvfrom(rqstp
, &iov
, 1, want
)) < 0)
1178 svsk
->sk_tcplen
+= len
;
1181 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1183 svc_sock_received(svsk
);
1184 return -EAGAIN
; /* record header not complete */
1187 svsk
->sk_reclen
= ntohl(svsk
->sk_reclen
);
1188 if (!(svsk
->sk_reclen
& 0x80000000)) {
1189 /* FIXME: technically, a record can be fragmented,
1190 * and non-terminal fragments will not have the top
1191 * bit set in the fragment length header.
1192 * But apparently no known nfs clients send fragmented
1194 if (net_ratelimit())
1195 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx"
1196 " (non-terminal)\n",
1197 (unsigned long) svsk
->sk_reclen
);
1200 svsk
->sk_reclen
&= 0x7fffffff;
1201 dprintk("svc: TCP record, %d bytes\n", svsk
->sk_reclen
);
1202 if (svsk
->sk_reclen
> serv
->sv_max_mesg
) {
1203 if (net_ratelimit())
1204 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx"
1206 (unsigned long) svsk
->sk_reclen
);
1211 /* Check whether enough data is available */
1212 len
= svc_recv_available(svsk
);
1216 if (len
< svsk
->sk_reclen
) {
1217 dprintk("svc: incomplete TCP record (%d of %d)\n",
1218 len
, svsk
->sk_reclen
);
1219 svc_sock_received(svsk
);
1220 return -EAGAIN
; /* record not complete */
1222 len
= svsk
->sk_reclen
;
1223 set_bit(SK_DATA
, &svsk
->sk_flags
);
1225 vec
= rqstp
->rq_vec
;
1226 vec
[0] = rqstp
->rq_arg
.head
[0];
1229 while (vlen
< len
) {
1230 vec
[pnum
].iov_base
= page_address(rqstp
->rq_pages
[pnum
]);
1231 vec
[pnum
].iov_len
= PAGE_SIZE
;
1235 rqstp
->rq_respages
= &rqstp
->rq_pages
[pnum
];
1237 /* Now receive data */
1238 len
= svc_recvfrom(rqstp
, vec
, pnum
, len
);
1242 dprintk("svc: TCP complete record (%d bytes)\n", len
);
1243 rqstp
->rq_arg
.len
= len
;
1244 rqstp
->rq_arg
.page_base
= 0;
1245 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
1246 rqstp
->rq_arg
.head
[0].iov_len
= len
;
1247 rqstp
->rq_arg
.page_len
= 0;
1249 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
1252 rqstp
->rq_skbuff
= NULL
;
1253 rqstp
->rq_prot
= IPPROTO_TCP
;
1255 /* Reset TCP read info */
1256 svsk
->sk_reclen
= 0;
1257 svsk
->sk_tcplen
= 0;
1259 svc_sock_received(svsk
);
1261 serv
->sv_stats
->nettcpcnt
++;
1266 svc_delete_socket(svsk
);
1270 if (len
== -EAGAIN
) {
1271 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1272 svc_sock_received(svsk
);
1274 printk(KERN_NOTICE
"%s: recvfrom returned errno %d\n",
1275 svsk
->sk_server
->sv_name
, -len
);
1283 * Send out data on TCP socket.
1286 svc_tcp_sendto(struct svc_rqst
*rqstp
)
1288 struct xdr_buf
*xbufp
= &rqstp
->rq_res
;
1292 /* Set up the first element of the reply kvec.
1293 * Any other kvecs that may be in use have been taken
1294 * care of by the server implementation itself.
1296 reclen
= htonl(0x80000000|((xbufp
->len
) - 4));
1297 memcpy(xbufp
->head
[0].iov_base
, &reclen
, 4);
1299 if (test_bit(SK_DEAD
, &rqstp
->rq_sock
->sk_flags
))
1302 sent
= svc_sendto(rqstp
, &rqstp
->rq_res
);
1303 if (sent
!= xbufp
->len
) {
1304 printk(KERN_NOTICE
"rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1305 rqstp
->rq_sock
->sk_server
->sv_name
,
1306 (sent
<0)?"got error":"sent only",
1308 set_bit(SK_CLOSE
, &rqstp
->rq_sock
->sk_flags
);
1309 svc_sock_enqueue(rqstp
->rq_sock
);
1316 svc_tcp_init(struct svc_sock
*svsk
)
1318 struct sock
*sk
= svsk
->sk_sk
;
1319 struct tcp_sock
*tp
= tcp_sk(sk
);
1321 svsk
->sk_recvfrom
= svc_tcp_recvfrom
;
1322 svsk
->sk_sendto
= svc_tcp_sendto
;
1324 if (sk
->sk_state
== TCP_LISTEN
) {
1325 dprintk("setting up TCP socket for listening\n");
1326 sk
->sk_data_ready
= svc_tcp_listen_data_ready
;
1327 set_bit(SK_CONN
, &svsk
->sk_flags
);
1329 dprintk("setting up TCP socket for reading\n");
1330 sk
->sk_state_change
= svc_tcp_state_change
;
1331 sk
->sk_data_ready
= svc_tcp_data_ready
;
1332 sk
->sk_write_space
= svc_write_space
;
1334 svsk
->sk_reclen
= 0;
1335 svsk
->sk_tcplen
= 0;
1337 tp
->nonagle
= 1; /* disable Nagle's algorithm */
1339 /* initialise setting must have enough space to
1340 * receive and respond to one request.
1341 * svc_tcp_recvfrom will re-adjust if necessary
1343 svc_sock_setbufsize(svsk
->sk_sock
,
1344 3 * svsk
->sk_server
->sv_max_mesg
,
1345 3 * svsk
->sk_server
->sv_max_mesg
);
1347 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1348 set_bit(SK_DATA
, &svsk
->sk_flags
);
1349 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1350 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1355 svc_sock_update_bufs(struct svc_serv
*serv
)
1358 * The number of server threads has changed. Update
1359 * rcvbuf and sndbuf accordingly on all sockets
1361 struct list_head
*le
;
1363 spin_lock_bh(&serv
->sv_lock
);
1364 list_for_each(le
, &serv
->sv_permsocks
) {
1365 struct svc_sock
*svsk
=
1366 list_entry(le
, struct svc_sock
, sk_list
);
1367 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1369 list_for_each(le
, &serv
->sv_tempsocks
) {
1370 struct svc_sock
*svsk
=
1371 list_entry(le
, struct svc_sock
, sk_list
);
1372 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1374 spin_unlock_bh(&serv
->sv_lock
);
1378 * Receive the next request on any socket. This code is carefully
1379 * organised not to touch any cachelines in the shared svc_serv
1380 * structure, only cachelines in the local svc_pool.
1383 svc_recv(struct svc_rqst
*rqstp
, long timeout
)
1385 struct svc_sock
*svsk
= NULL
;
1386 struct svc_serv
*serv
= rqstp
->rq_server
;
1387 struct svc_pool
*pool
= rqstp
->rq_pool
;
1390 struct xdr_buf
*arg
;
1391 DECLARE_WAITQUEUE(wait
, current
);
1393 dprintk("svc: server %p waiting for data (to = %ld)\n",
1398 "svc_recv: service %p, socket not NULL!\n",
1400 if (waitqueue_active(&rqstp
->rq_wait
))
1402 "svc_recv: service %p, wait queue active!\n",
1406 /* now allocate needed pages. If we get a failure, sleep briefly */
1407 pages
= (serv
->sv_max_mesg
+ PAGE_SIZE
) / PAGE_SIZE
;
1408 for (i
=0; i
< pages
; i
++)
1409 while (rqstp
->rq_pages
[i
] == NULL
) {
1410 struct page
*p
= alloc_page(GFP_KERNEL
);
1412 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1413 rqstp
->rq_pages
[i
] = p
;
1415 rqstp
->rq_pages
[i
++] = NULL
; /* this might be seen in nfs_read_actor */
1416 BUG_ON(pages
>= RPCSVC_MAXPAGES
);
1418 /* Make arg->head point to first page and arg->pages point to rest */
1419 arg
= &rqstp
->rq_arg
;
1420 arg
->head
[0].iov_base
= page_address(rqstp
->rq_pages
[0]);
1421 arg
->head
[0].iov_len
= PAGE_SIZE
;
1422 arg
->pages
= rqstp
->rq_pages
+ 1;
1424 /* save at least one page for response */
1425 arg
->page_len
= (pages
-2)*PAGE_SIZE
;
1426 arg
->len
= (pages
-1)*PAGE_SIZE
;
1427 arg
->tail
[0].iov_len
= 0;
1434 spin_lock_bh(&pool
->sp_lock
);
1435 if ((svsk
= svc_sock_dequeue(pool
)) != NULL
) {
1436 rqstp
->rq_sock
= svsk
;
1437 atomic_inc(&svsk
->sk_inuse
);
1438 rqstp
->rq_reserved
= serv
->sv_max_mesg
;
1439 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
1441 /* No data pending. Go to sleep */
1442 svc_thread_enqueue(pool
, rqstp
);
1445 * We have to be able to interrupt this wait
1446 * to bring down the daemons ...
1448 set_current_state(TASK_INTERRUPTIBLE
);
1449 add_wait_queue(&rqstp
->rq_wait
, &wait
);
1450 spin_unlock_bh(&pool
->sp_lock
);
1452 schedule_timeout(timeout
);
1456 spin_lock_bh(&pool
->sp_lock
);
1457 remove_wait_queue(&rqstp
->rq_wait
, &wait
);
1459 if (!(svsk
= rqstp
->rq_sock
)) {
1460 svc_thread_dequeue(pool
, rqstp
);
1461 spin_unlock_bh(&pool
->sp_lock
);
1462 dprintk("svc: server %p, no data yet\n", rqstp
);
1463 return signalled()? -EINTR
: -EAGAIN
;
1466 spin_unlock_bh(&pool
->sp_lock
);
1468 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1469 rqstp
, pool
->sp_id
, svsk
, atomic_read(&svsk
->sk_inuse
));
1470 len
= svsk
->sk_recvfrom(rqstp
);
1471 dprintk("svc: got len=%d\n", len
);
1473 /* No data, incomplete (TCP) read, or accept() */
1474 if (len
== 0 || len
== -EAGAIN
) {
1475 rqstp
->rq_res
.len
= 0;
1476 svc_sock_release(rqstp
);
1479 svsk
->sk_lastrecv
= get_seconds();
1480 clear_bit(SK_OLD
, &svsk
->sk_flags
);
1482 rqstp
->rq_secure
= svc_port_is_privileged(svc_addr(rqstp
));
1483 rqstp
->rq_chandle
.defer
= svc_defer
;
1486 serv
->sv_stats
->netcnt
++;
1494 svc_drop(struct svc_rqst
*rqstp
)
1496 dprintk("svc: socket %p dropped request\n", rqstp
->rq_sock
);
1497 svc_sock_release(rqstp
);
1501 * Return reply to client.
1504 svc_send(struct svc_rqst
*rqstp
)
1506 struct svc_sock
*svsk
;
1510 if ((svsk
= rqstp
->rq_sock
) == NULL
) {
1511 printk(KERN_WARNING
"NULL socket pointer in %s:%d\n",
1512 __FILE__
, __LINE__
);
1516 /* release the receive skb before sending the reply */
1517 svc_release_skb(rqstp
);
1519 /* calculate over-all length */
1520 xb
= & rqstp
->rq_res
;
1521 xb
->len
= xb
->head
[0].iov_len
+
1523 xb
->tail
[0].iov_len
;
1525 /* Grab svsk->sk_mutex to serialize outgoing data. */
1526 mutex_lock(&svsk
->sk_mutex
);
1527 if (test_bit(SK_DEAD
, &svsk
->sk_flags
))
1530 len
= svsk
->sk_sendto(rqstp
);
1531 mutex_unlock(&svsk
->sk_mutex
);
1532 svc_sock_release(rqstp
);
1534 if (len
== -ECONNREFUSED
|| len
== -ENOTCONN
|| len
== -EAGAIN
)
1540 * Timer function to close old temporary sockets, using
1541 * a mark-and-sweep algorithm.
1544 svc_age_temp_sockets(unsigned long closure
)
1546 struct svc_serv
*serv
= (struct svc_serv
*)closure
;
1547 struct svc_sock
*svsk
;
1548 struct list_head
*le
, *next
;
1549 LIST_HEAD(to_be_aged
);
1551 dprintk("svc_age_temp_sockets\n");
1553 if (!spin_trylock_bh(&serv
->sv_lock
)) {
1554 /* busy, try again 1 sec later */
1555 dprintk("svc_age_temp_sockets: busy\n");
1556 mod_timer(&serv
->sv_temptimer
, jiffies
+ HZ
);
1560 list_for_each_safe(le
, next
, &serv
->sv_tempsocks
) {
1561 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1563 if (!test_and_set_bit(SK_OLD
, &svsk
->sk_flags
))
1565 if (atomic_read(&svsk
->sk_inuse
) || test_bit(SK_BUSY
, &svsk
->sk_flags
))
1567 atomic_inc(&svsk
->sk_inuse
);
1568 list_move(le
, &to_be_aged
);
1569 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1570 set_bit(SK_DETACHED
, &svsk
->sk_flags
);
1572 spin_unlock_bh(&serv
->sv_lock
);
1574 while (!list_empty(&to_be_aged
)) {
1575 le
= to_be_aged
.next
;
1576 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1578 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1580 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1581 svsk
, get_seconds() - svsk
->sk_lastrecv
);
1583 /* a thread will dequeue and close it soon */
1584 svc_sock_enqueue(svsk
);
1588 mod_timer(&serv
->sv_temptimer
, jiffies
+ svc_conn_age_period
* HZ
);
1592 * Initialize socket for RPC use and create svc_sock struct
1593 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1595 static struct svc_sock
*svc_setup_socket(struct svc_serv
*serv
,
1596 struct socket
*sock
,
1597 int *errp
, int flags
)
1599 struct svc_sock
*svsk
;
1601 int pmap_register
= !(flags
& SVC_SOCK_ANONYMOUS
);
1602 int is_temporary
= flags
& SVC_SOCK_TEMPORARY
;
1604 dprintk("svc: svc_setup_socket %p\n", sock
);
1605 if (!(svsk
= kzalloc(sizeof(*svsk
), GFP_KERNEL
))) {
1612 /* Register socket with portmapper */
1613 if (*errp
>= 0 && pmap_register
)
1614 *errp
= svc_register(serv
, inet
->sk_protocol
,
1615 ntohs(inet_sk(inet
)->sport
));
1622 set_bit(SK_BUSY
, &svsk
->sk_flags
);
1623 inet
->sk_user_data
= svsk
;
1624 svsk
->sk_sock
= sock
;
1626 svsk
->sk_ostate
= inet
->sk_state_change
;
1627 svsk
->sk_odata
= inet
->sk_data_ready
;
1628 svsk
->sk_owspace
= inet
->sk_write_space
;
1629 svsk
->sk_server
= serv
;
1630 atomic_set(&svsk
->sk_inuse
, 1);
1631 svsk
->sk_lastrecv
= get_seconds();
1632 spin_lock_init(&svsk
->sk_defer_lock
);
1633 INIT_LIST_HEAD(&svsk
->sk_deferred
);
1634 INIT_LIST_HEAD(&svsk
->sk_ready
);
1635 mutex_init(&svsk
->sk_mutex
);
1637 /* Initialize the socket */
1638 if (sock
->type
== SOCK_DGRAM
)
1643 spin_lock_bh(&serv
->sv_lock
);
1645 set_bit(SK_TEMP
, &svsk
->sk_flags
);
1646 list_add(&svsk
->sk_list
, &serv
->sv_tempsocks
);
1648 if (serv
->sv_temptimer
.function
== NULL
) {
1649 /* setup timer to age temp sockets */
1650 setup_timer(&serv
->sv_temptimer
, svc_age_temp_sockets
,
1651 (unsigned long)serv
);
1652 mod_timer(&serv
->sv_temptimer
,
1653 jiffies
+ svc_conn_age_period
* HZ
);
1656 clear_bit(SK_TEMP
, &svsk
->sk_flags
);
1657 list_add(&svsk
->sk_list
, &serv
->sv_permsocks
);
1659 spin_unlock_bh(&serv
->sv_lock
);
1661 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1667 int svc_addsock(struct svc_serv
*serv
,
1673 struct socket
*so
= sockfd_lookup(fd
, &err
);
1674 struct svc_sock
*svsk
= NULL
;
1678 if (so
->sk
->sk_family
!= AF_INET
)
1679 err
= -EAFNOSUPPORT
;
1680 else if (so
->sk
->sk_protocol
!= IPPROTO_TCP
&&
1681 so
->sk
->sk_protocol
!= IPPROTO_UDP
)
1682 err
= -EPROTONOSUPPORT
;
1683 else if (so
->state
> SS_UNCONNECTED
)
1686 svsk
= svc_setup_socket(serv
, so
, &err
, SVC_SOCK_DEFAULTS
);
1688 svc_sock_received(svsk
);
1696 if (proto
) *proto
= so
->sk
->sk_protocol
;
1697 return one_sock_name(name_return
, svsk
);
1699 EXPORT_SYMBOL_GPL(svc_addsock
);
1702 * Create socket for RPC service.
1704 static int svc_create_socket(struct svc_serv
*serv
, int protocol
,
1705 struct sockaddr
*sin
, int len
, int flags
)
1707 struct svc_sock
*svsk
;
1708 struct socket
*sock
;
1711 char buf
[RPC_MAX_ADDRBUFLEN
];
1713 dprintk("svc: svc_create_socket(%s, %d, %s)\n",
1714 serv
->sv_program
->pg_name
, protocol
,
1715 __svc_print_addr(sin
, buf
, sizeof(buf
)));
1717 if (protocol
!= IPPROTO_UDP
&& protocol
!= IPPROTO_TCP
) {
1718 printk(KERN_WARNING
"svc: only UDP and TCP "
1719 "sockets supported\n");
1722 type
= (protocol
== IPPROTO_UDP
)? SOCK_DGRAM
: SOCK_STREAM
;
1724 error
= sock_create_kern(sin
->sa_family
, type
, protocol
, &sock
);
1728 svc_reclassify_socket(sock
);
1730 if (type
== SOCK_STREAM
)
1731 sock
->sk
->sk_reuse
= 1; /* allow address reuse */
1732 error
= kernel_bind(sock
, sin
, len
);
1736 if (protocol
== IPPROTO_TCP
) {
1737 if ((error
= kernel_listen(sock
, 64)) < 0)
1741 if ((svsk
= svc_setup_socket(serv
, sock
, &error
, flags
)) != NULL
) {
1742 svc_sock_received(svsk
);
1743 return ntohs(inet_sk(svsk
->sk_sk
)->sport
);
1747 dprintk("svc: svc_create_socket error = %d\n", -error
);
1753 * Remove a dead socket
1756 svc_delete_socket(struct svc_sock
*svsk
)
1758 struct svc_serv
*serv
;
1761 dprintk("svc: svc_delete_socket(%p)\n", svsk
);
1763 serv
= svsk
->sk_server
;
1766 sk
->sk_state_change
= svsk
->sk_ostate
;
1767 sk
->sk_data_ready
= svsk
->sk_odata
;
1768 sk
->sk_write_space
= svsk
->sk_owspace
;
1770 spin_lock_bh(&serv
->sv_lock
);
1772 if (!test_and_set_bit(SK_DETACHED
, &svsk
->sk_flags
))
1773 list_del_init(&svsk
->sk_list
);
1775 * We used to delete the svc_sock from whichever list
1776 * it's sk_ready node was on, but we don't actually
1777 * need to. This is because the only time we're called
1778 * while still attached to a queue, the queue itself
1779 * is about to be destroyed (in svc_destroy).
1781 if (!test_and_set_bit(SK_DEAD
, &svsk
->sk_flags
)) {
1782 BUG_ON(atomic_read(&svsk
->sk_inuse
)<2);
1783 atomic_dec(&svsk
->sk_inuse
);
1784 if (test_bit(SK_TEMP
, &svsk
->sk_flags
))
1788 spin_unlock_bh(&serv
->sv_lock
);
1791 static void svc_close_socket(struct svc_sock
*svsk
)
1793 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1794 if (test_and_set_bit(SK_BUSY
, &svsk
->sk_flags
))
1795 /* someone else will have to effect the close */
1798 atomic_inc(&svsk
->sk_inuse
);
1799 svc_delete_socket(svsk
);
1800 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
1804 void svc_force_close_socket(struct svc_sock
*svsk
)
1806 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1807 if (test_bit(SK_BUSY
, &svsk
->sk_flags
)) {
1808 /* Waiting to be processed, but no threads left,
1809 * So just remove it from the waiting list
1811 list_del_init(&svsk
->sk_ready
);
1812 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
1814 svc_close_socket(svsk
);
1818 * svc_makesock - Make a socket for nfsd and lockd
1819 * @serv: RPC server structure
1820 * @protocol: transport protocol to use
1821 * @port: port to use
1822 * @flags: requested socket characteristics
1825 int svc_makesock(struct svc_serv
*serv
, int protocol
, unsigned short port
,
1828 struct sockaddr_in sin
= {
1829 .sin_family
= AF_INET
,
1830 .sin_addr
.s_addr
= INADDR_ANY
,
1831 .sin_port
= htons(port
),
1834 dprintk("svc: creating socket proto = %d\n", protocol
);
1835 return svc_create_socket(serv
, protocol
, (struct sockaddr
*) &sin
,
1836 sizeof(sin
), flags
);
1840 * Handle defer and revisit of requests
1843 static void svc_revisit(struct cache_deferred_req
*dreq
, int too_many
)
1845 struct svc_deferred_req
*dr
= container_of(dreq
, struct svc_deferred_req
, handle
);
1846 struct svc_sock
*svsk
;
1849 svc_sock_put(dr
->svsk
);
1853 dprintk("revisit queued\n");
1856 spin_lock_bh(&svsk
->sk_defer_lock
);
1857 list_add(&dr
->handle
.recent
, &svsk
->sk_deferred
);
1858 spin_unlock_bh(&svsk
->sk_defer_lock
);
1859 set_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1860 svc_sock_enqueue(svsk
);
1864 static struct cache_deferred_req
*
1865 svc_defer(struct cache_req
*req
)
1867 struct svc_rqst
*rqstp
= container_of(req
, struct svc_rqst
, rq_chandle
);
1868 int size
= sizeof(struct svc_deferred_req
) + (rqstp
->rq_arg
.len
);
1869 struct svc_deferred_req
*dr
;
1871 if (rqstp
->rq_arg
.page_len
)
1872 return NULL
; /* if more than a page, give up FIXME */
1873 if (rqstp
->rq_deferred
) {
1874 dr
= rqstp
->rq_deferred
;
1875 rqstp
->rq_deferred
= NULL
;
1877 int skip
= rqstp
->rq_arg
.len
- rqstp
->rq_arg
.head
[0].iov_len
;
1878 /* FIXME maybe discard if size too large */
1879 dr
= kmalloc(size
, GFP_KERNEL
);
1883 dr
->handle
.owner
= rqstp
->rq_server
;
1884 dr
->prot
= rqstp
->rq_prot
;
1885 memcpy(&dr
->addr
, &rqstp
->rq_addr
, rqstp
->rq_addrlen
);
1886 dr
->addrlen
= rqstp
->rq_addrlen
;
1887 dr
->daddr
= rqstp
->rq_daddr
;
1888 dr
->argslen
= rqstp
->rq_arg
.len
>> 2;
1889 memcpy(dr
->args
, rqstp
->rq_arg
.head
[0].iov_base
-skip
, dr
->argslen
<<2);
1891 atomic_inc(&rqstp
->rq_sock
->sk_inuse
);
1892 dr
->svsk
= rqstp
->rq_sock
;
1894 dr
->handle
.revisit
= svc_revisit
;
1899 * recv data from a deferred request into an active one
1901 static int svc_deferred_recv(struct svc_rqst
*rqstp
)
1903 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
1905 rqstp
->rq_arg
.head
[0].iov_base
= dr
->args
;
1906 rqstp
->rq_arg
.head
[0].iov_len
= dr
->argslen
<<2;
1907 rqstp
->rq_arg
.page_len
= 0;
1908 rqstp
->rq_arg
.len
= dr
->argslen
<<2;
1909 rqstp
->rq_prot
= dr
->prot
;
1910 memcpy(&rqstp
->rq_addr
, &dr
->addr
, dr
->addrlen
);
1911 rqstp
->rq_addrlen
= dr
->addrlen
;
1912 rqstp
->rq_daddr
= dr
->daddr
;
1913 rqstp
->rq_respages
= rqstp
->rq_pages
;
1914 return dr
->argslen
<<2;
1918 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
)
1920 struct svc_deferred_req
*dr
= NULL
;
1922 if (!test_bit(SK_DEFERRED
, &svsk
->sk_flags
))
1924 spin_lock_bh(&svsk
->sk_defer_lock
);
1925 clear_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1926 if (!list_empty(&svsk
->sk_deferred
)) {
1927 dr
= list_entry(svsk
->sk_deferred
.next
,
1928 struct svc_deferred_req
,
1930 list_del_init(&dr
->handle
.recent
);
1931 set_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1933 spin_unlock_bh(&svsk
->sk_defer_lock
);