2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/net.h>
27 #include <linux/inet.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/unistd.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/file.h>
35 #include <linux/freezer.h>
37 #include <net/checksum.h>
39 #include <net/tcp_states.h>
40 #include <asm/uaccess.h>
41 #include <asm/ioctls.h>
43 #include <linux/sunrpc/types.h>
44 #include <linux/sunrpc/xdr.h>
45 #include <linux/sunrpc/svcsock.h>
46 #include <linux/sunrpc/stats.h>
48 /* SMP locking strategy:
50 * svc_pool->sp_lock protects most of the fields of that pool.
51 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
52 * when both need to be taken (rare), svc_serv->sv_lock is first.
53 * BKL protects svc_serv->sv_nrthread.
54 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
55 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
57 * Some flags can be set to certain values at any time
58 * providing that certain rules are followed:
60 * SK_CONN, SK_DATA, can be set or cleared at any time.
61 * after a set, svc_sock_enqueue must be called.
62 * after a clear, the socket must be read/accepted
63 * if this succeeds, it must be set again.
64 * SK_CLOSE can set at any time. It is never cleared.
68 #define RPCDBG_FACILITY RPCDBG_SVCSOCK
71 static struct svc_sock
*svc_setup_socket(struct svc_serv
*, struct socket
*,
72 int *errp
, int pmap_reg
);
73 static void svc_udp_data_ready(struct sock
*, int);
74 static int svc_udp_recvfrom(struct svc_rqst
*);
75 static int svc_udp_sendto(struct svc_rqst
*);
77 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
);
78 static int svc_deferred_recv(struct svc_rqst
*rqstp
);
79 static struct cache_deferred_req
*svc_defer(struct cache_req
*req
);
81 /* apparently the "standard" is that clients close
82 * idle connections after 5 minutes, servers after
84 * http://www.connectathon.org/talks96/nfstcp.pdf
86 static int svc_conn_age_period
= 6*60;
88 #ifdef CONFIG_DEBUG_LOCK_ALLOC
89 static struct lock_class_key svc_key
[2];
90 static struct lock_class_key svc_slock_key
[2];
92 static inline void svc_reclassify_socket(struct socket
*sock
)
94 struct sock
*sk
= sock
->sk
;
95 BUG_ON(sk
->sk_lock
.owner
!= NULL
);
96 switch (sk
->sk_family
) {
98 sock_lock_init_class_and_name(sk
, "slock-AF_INET-NFSD",
99 &svc_slock_key
[0], "sk_lock-AF_INET-NFSD", &svc_key
[0]);
103 sock_lock_init_class_and_name(sk
, "slock-AF_INET6-NFSD",
104 &svc_slock_key
[1], "sk_lock-AF_INET6-NFSD", &svc_key
[1]);
112 static inline void svc_reclassify_socket(struct socket
*sock
)
118 * Queue up an idle server thread. Must have pool->sp_lock held.
119 * Note: this is really a stack rather than a queue, so that we only
120 * use as many different threads as we need, and the rest don't pollute
124 svc_thread_enqueue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
126 list_add(&rqstp
->rq_list
, &pool
->sp_threads
);
130 * Dequeue an nfsd thread. Must have pool->sp_lock held.
133 svc_thread_dequeue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
135 list_del(&rqstp
->rq_list
);
139 * Release an skbuff after use
142 svc_release_skb(struct svc_rqst
*rqstp
)
144 struct sk_buff
*skb
= rqstp
->rq_skbuff
;
145 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
148 rqstp
->rq_skbuff
= NULL
;
150 dprintk("svc: service %p, releasing skb %p\n", rqstp
, skb
);
151 skb_free_datagram(rqstp
->rq_sock
->sk_sk
, skb
);
154 rqstp
->rq_deferred
= NULL
;
160 * Any space to write?
162 static inline unsigned long
163 svc_sock_wspace(struct svc_sock
*svsk
)
167 if (svsk
->sk_sock
->type
== SOCK_STREAM
)
168 wspace
= sk_stream_wspace(svsk
->sk_sk
);
170 wspace
= sock_wspace(svsk
->sk_sk
);
176 * Queue up a socket with data pending. If there are idle nfsd
177 * processes, wake 'em up.
181 svc_sock_enqueue(struct svc_sock
*svsk
)
183 struct svc_serv
*serv
= svsk
->sk_server
;
184 struct svc_pool
*pool
;
185 struct svc_rqst
*rqstp
;
188 if (!(svsk
->sk_flags
&
189 ( (1<<SK_CONN
)|(1<<SK_DATA
)|(1<<SK_CLOSE
)|(1<<SK_DEFERRED
)) ))
191 if (test_bit(SK_DEAD
, &svsk
->sk_flags
))
195 pool
= svc_pool_for_cpu(svsk
->sk_server
, cpu
);
198 spin_lock_bh(&pool
->sp_lock
);
200 if (!list_empty(&pool
->sp_threads
) &&
201 !list_empty(&pool
->sp_sockets
))
203 "svc_sock_enqueue: threads and sockets both waiting??\n");
205 if (test_bit(SK_DEAD
, &svsk
->sk_flags
)) {
206 /* Don't enqueue dead sockets */
207 dprintk("svc: socket %p is dead, not enqueued\n", svsk
->sk_sk
);
211 /* Mark socket as busy. It will remain in this state until the
212 * server has processed all pending data and put the socket back
213 * on the idle list. We update SK_BUSY atomically because
214 * it also guards against trying to enqueue the svc_sock twice.
216 if (test_and_set_bit(SK_BUSY
, &svsk
->sk_flags
)) {
217 /* Don't enqueue socket while already enqueued */
218 dprintk("svc: socket %p busy, not enqueued\n", svsk
->sk_sk
);
221 BUG_ON(svsk
->sk_pool
!= NULL
);
222 svsk
->sk_pool
= pool
;
224 set_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
225 if (((atomic_read(&svsk
->sk_reserved
) + serv
->sv_max_mesg
)*2
226 > svc_sock_wspace(svsk
))
227 && !test_bit(SK_CLOSE
, &svsk
->sk_flags
)
228 && !test_bit(SK_CONN
, &svsk
->sk_flags
)) {
229 /* Don't enqueue while not enough space for reply */
230 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
231 svsk
->sk_sk
, atomic_read(&svsk
->sk_reserved
)+serv
->sv_max_mesg
,
232 svc_sock_wspace(svsk
));
233 svsk
->sk_pool
= NULL
;
234 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
237 clear_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
240 if (!list_empty(&pool
->sp_threads
)) {
241 rqstp
= list_entry(pool
->sp_threads
.next
,
244 dprintk("svc: socket %p served by daemon %p\n",
246 svc_thread_dequeue(pool
, rqstp
);
249 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
250 rqstp
, rqstp
->rq_sock
);
251 rqstp
->rq_sock
= svsk
;
252 atomic_inc(&svsk
->sk_inuse
);
253 rqstp
->rq_reserved
= serv
->sv_max_mesg
;
254 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
255 BUG_ON(svsk
->sk_pool
!= pool
);
256 wake_up(&rqstp
->rq_wait
);
258 dprintk("svc: socket %p put into queue\n", svsk
->sk_sk
);
259 list_add_tail(&svsk
->sk_ready
, &pool
->sp_sockets
);
260 BUG_ON(svsk
->sk_pool
!= pool
);
264 spin_unlock_bh(&pool
->sp_lock
);
268 * Dequeue the first socket. Must be called with the pool->sp_lock held.
270 static inline struct svc_sock
*
271 svc_sock_dequeue(struct svc_pool
*pool
)
273 struct svc_sock
*svsk
;
275 if (list_empty(&pool
->sp_sockets
))
278 svsk
= list_entry(pool
->sp_sockets
.next
,
279 struct svc_sock
, sk_ready
);
280 list_del_init(&svsk
->sk_ready
);
282 dprintk("svc: socket %p dequeued, inuse=%d\n",
283 svsk
->sk_sk
, atomic_read(&svsk
->sk_inuse
));
289 * Having read something from a socket, check whether it
290 * needs to be re-enqueued.
291 * Note: SK_DATA only gets cleared when a read-attempt finds
292 * no (or insufficient) data.
295 svc_sock_received(struct svc_sock
*svsk
)
297 svsk
->sk_pool
= NULL
;
298 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
299 svc_sock_enqueue(svsk
);
304 * svc_reserve - change the space reserved for the reply to a request.
305 * @rqstp: The request in question
306 * @space: new max space to reserve
308 * Each request reserves some space on the output queue of the socket
309 * to make sure the reply fits. This function reduces that reserved
310 * space to be the amount of space used already, plus @space.
313 void svc_reserve(struct svc_rqst
*rqstp
, int space
)
315 space
+= rqstp
->rq_res
.head
[0].iov_len
;
317 if (space
< rqstp
->rq_reserved
) {
318 struct svc_sock
*svsk
= rqstp
->rq_sock
;
319 atomic_sub((rqstp
->rq_reserved
- space
), &svsk
->sk_reserved
);
320 rqstp
->rq_reserved
= space
;
322 svc_sock_enqueue(svsk
);
327 * Release a socket after use.
330 svc_sock_put(struct svc_sock
*svsk
)
332 if (atomic_dec_and_test(&svsk
->sk_inuse
) &&
333 test_bit(SK_DEAD
, &svsk
->sk_flags
)) {
334 dprintk("svc: releasing dead socket\n");
335 if (svsk
->sk_sock
->file
)
336 sockfd_put(svsk
->sk_sock
);
338 sock_release(svsk
->sk_sock
);
339 if (svsk
->sk_info_authunix
!= NULL
)
340 svcauth_unix_info_release(svsk
->sk_info_authunix
);
346 svc_sock_release(struct svc_rqst
*rqstp
)
348 struct svc_sock
*svsk
= rqstp
->rq_sock
;
350 svc_release_skb(rqstp
);
352 svc_free_res_pages(rqstp
);
353 rqstp
->rq_res
.page_len
= 0;
354 rqstp
->rq_res
.page_base
= 0;
357 /* Reset response buffer and release
359 * But first, check that enough space was reserved
360 * for the reply, otherwise we have a bug!
362 if ((rqstp
->rq_res
.len
) > rqstp
->rq_reserved
)
363 printk(KERN_ERR
"RPC request reserved %d but used %d\n",
367 rqstp
->rq_res
.head
[0].iov_len
= 0;
368 svc_reserve(rqstp
, 0);
369 rqstp
->rq_sock
= NULL
;
375 * External function to wake up a server waiting for data
376 * This really only makes sense for services like lockd
377 * which have exactly one thread anyway.
380 svc_wake_up(struct svc_serv
*serv
)
382 struct svc_rqst
*rqstp
;
384 struct svc_pool
*pool
;
386 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
387 pool
= &serv
->sv_pools
[i
];
389 spin_lock_bh(&pool
->sp_lock
);
390 if (!list_empty(&pool
->sp_threads
)) {
391 rqstp
= list_entry(pool
->sp_threads
.next
,
394 dprintk("svc: daemon %p woken up.\n", rqstp
);
396 svc_thread_dequeue(pool, rqstp);
397 rqstp->rq_sock = NULL;
399 wake_up(&rqstp
->rq_wait
);
401 spin_unlock_bh(&pool
->sp_lock
);
406 * Generic sendto routine
409 svc_sendto(struct svc_rqst
*rqstp
, struct xdr_buf
*xdr
)
411 struct svc_sock
*svsk
= rqstp
->rq_sock
;
412 struct socket
*sock
= svsk
->sk_sock
;
414 char buffer
[CMSG_SPACE(sizeof(struct in_pktinfo
))];
415 struct cmsghdr
*cmh
= (struct cmsghdr
*)buffer
;
416 struct in_pktinfo
*pki
= (struct in_pktinfo
*)CMSG_DATA(cmh
);
420 struct page
**ppage
= xdr
->pages
;
421 size_t base
= xdr
->page_base
;
422 unsigned int pglen
= xdr
->page_len
;
423 unsigned int flags
= MSG_MORE
;
427 if (rqstp
->rq_prot
== IPPROTO_UDP
) {
428 /* set the source and destination */
430 msg
.msg_name
= &rqstp
->rq_addr
;
431 msg
.msg_namelen
= sizeof(rqstp
->rq_addr
);
434 msg
.msg_flags
= MSG_MORE
;
436 msg
.msg_control
= cmh
;
437 msg
.msg_controllen
= sizeof(buffer
);
438 cmh
->cmsg_len
= CMSG_LEN(sizeof(*pki
));
439 cmh
->cmsg_level
= SOL_IP
;
440 cmh
->cmsg_type
= IP_PKTINFO
;
441 pki
->ipi_ifindex
= 0;
442 pki
->ipi_spec_dst
.s_addr
= rqstp
->rq_daddr
;
444 if (sock_sendmsg(sock
, &msg
, 0) < 0)
449 if (slen
== xdr
->head
[0].iov_len
)
451 len
= kernel_sendpage(sock
, rqstp
->rq_respages
[0], 0,
452 xdr
->head
[0].iov_len
, flags
);
453 if (len
!= xdr
->head
[0].iov_len
)
455 slen
-= xdr
->head
[0].iov_len
;
460 size
= PAGE_SIZE
- base
< pglen
? PAGE_SIZE
- base
: pglen
;
464 result
= kernel_sendpage(sock
, *ppage
, base
, size
, flags
);
471 size
= PAGE_SIZE
< pglen
? PAGE_SIZE
: pglen
;
476 if (xdr
->tail
[0].iov_len
) {
477 result
= kernel_sendpage(sock
, rqstp
->rq_respages
[0],
478 ((unsigned long)xdr
->tail
[0].iov_base
)
480 xdr
->tail
[0].iov_len
, 0);
486 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
487 rqstp
->rq_sock
, xdr
->head
[0].iov_base
, xdr
->head
[0].iov_len
, xdr
->len
, len
,
488 rqstp
->rq_addr
.sin_addr
.s_addr
);
494 * Report socket names for nfsdfs
496 static int one_sock_name(char *buf
, struct svc_sock
*svsk
)
500 switch(svsk
->sk_sk
->sk_family
) {
502 len
= sprintf(buf
, "ipv4 %s %u.%u.%u.%u %d\n",
503 svsk
->sk_sk
->sk_protocol
==IPPROTO_UDP
?
505 NIPQUAD(inet_sk(svsk
->sk_sk
)->rcv_saddr
),
506 inet_sk(svsk
->sk_sk
)->num
);
509 len
= sprintf(buf
, "*unknown-%d*\n",
510 svsk
->sk_sk
->sk_family
);
516 svc_sock_names(char *buf
, struct svc_serv
*serv
, char *toclose
)
518 struct svc_sock
*svsk
, *closesk
= NULL
;
523 spin_lock(&serv
->sv_lock
);
524 list_for_each_entry(svsk
, &serv
->sv_permsocks
, sk_list
) {
525 int onelen
= one_sock_name(buf
+len
, svsk
);
526 if (toclose
&& strcmp(toclose
, buf
+len
) == 0)
531 spin_unlock(&serv
->sv_lock
);
533 /* Should unregister with portmap, but you cannot
534 * unregister just one protocol...
536 svc_delete_socket(closesk
);
541 EXPORT_SYMBOL(svc_sock_names
);
544 * Check input queue length
547 svc_recv_available(struct svc_sock
*svsk
)
549 struct socket
*sock
= svsk
->sk_sock
;
552 err
= kernel_sock_ioctl(sock
, TIOCINQ
, (unsigned long) &avail
);
554 return (err
>= 0)? avail
: err
;
558 * Generic recvfrom routine.
561 svc_recvfrom(struct svc_rqst
*rqstp
, struct kvec
*iov
, int nr
, int buflen
)
567 rqstp
->rq_addrlen
= sizeof(rqstp
->rq_addr
);
568 sock
= rqstp
->rq_sock
->sk_sock
;
570 msg
.msg_name
= &rqstp
->rq_addr
;
571 msg
.msg_namelen
= sizeof(rqstp
->rq_addr
);
572 msg
.msg_control
= NULL
;
573 msg
.msg_controllen
= 0;
575 msg
.msg_flags
= MSG_DONTWAIT
;
577 len
= kernel_recvmsg(sock
, &msg
, iov
, nr
, buflen
, MSG_DONTWAIT
);
579 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
580 * possibly we should cache this in the svc_sock structure
581 * at accept time. FIXME
583 alen
= sizeof(rqstp
->rq_addr
);
584 kernel_getpeername(sock
, (struct sockaddr
*)&rqstp
->rq_addr
, &alen
);
586 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
587 rqstp
->rq_sock
, iov
[0].iov_base
, iov
[0].iov_len
, len
);
593 * Set socket snd and rcv buffer lengths
596 svc_sock_setbufsize(struct socket
*sock
, unsigned int snd
, unsigned int rcv
)
600 oldfs
= get_fs(); set_fs(KERNEL_DS
);
601 sock_setsockopt(sock
, SOL_SOCKET
, SO_SNDBUF
,
602 (char*)&snd
, sizeof(snd
));
603 sock_setsockopt(sock
, SOL_SOCKET
, SO_RCVBUF
,
604 (char*)&rcv
, sizeof(rcv
));
606 /* sock_setsockopt limits use to sysctl_?mem_max,
607 * which isn't acceptable. Until that is made conditional
608 * on not having CAP_SYS_RESOURCE or similar, we go direct...
609 * DaveM said I could!
612 sock
->sk
->sk_sndbuf
= snd
* 2;
613 sock
->sk
->sk_rcvbuf
= rcv
* 2;
614 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
|SOCK_RCVBUF_LOCK
;
615 release_sock(sock
->sk
);
619 * INET callback when data has been received on the socket.
622 svc_udp_data_ready(struct sock
*sk
, int count
)
624 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
627 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
628 svsk
, sk
, count
, test_bit(SK_BUSY
, &svsk
->sk_flags
));
629 set_bit(SK_DATA
, &svsk
->sk_flags
);
630 svc_sock_enqueue(svsk
);
632 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
633 wake_up_interruptible(sk
->sk_sleep
);
637 * INET callback when space is newly available on the socket.
640 svc_write_space(struct sock
*sk
)
642 struct svc_sock
*svsk
= (struct svc_sock
*)(sk
->sk_user_data
);
645 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
646 svsk
, sk
, test_bit(SK_BUSY
, &svsk
->sk_flags
));
647 svc_sock_enqueue(svsk
);
650 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
)) {
651 dprintk("RPC svc_write_space: someone sleeping on %p\n",
653 wake_up_interruptible(sk
->sk_sleep
);
658 * Receive a datagram from a UDP socket.
661 svc_udp_recvfrom(struct svc_rqst
*rqstp
)
663 struct svc_sock
*svsk
= rqstp
->rq_sock
;
664 struct svc_serv
*serv
= svsk
->sk_server
;
668 if (test_and_clear_bit(SK_CHNGBUF
, &svsk
->sk_flags
))
669 /* udp sockets need large rcvbuf as all pending
670 * requests are still in that buffer. sndbuf must
671 * also be large enough that there is enough space
672 * for one reply per thread. We count all threads
673 * rather than threads in a particular pool, which
674 * provides an upper bound on the number of threads
675 * which will access the socket.
677 svc_sock_setbufsize(svsk
->sk_sock
,
678 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
,
679 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
);
681 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
682 svc_sock_received(svsk
);
683 return svc_deferred_recv(rqstp
);
686 clear_bit(SK_DATA
, &svsk
->sk_flags
);
687 while ((skb
= skb_recv_datagram(svsk
->sk_sk
, 0, 1, &err
)) == NULL
) {
688 if (err
== -EAGAIN
) {
689 svc_sock_received(svsk
);
692 /* possibly an icmp error */
693 dprintk("svc: recvfrom returned error %d\n", -err
);
695 if (skb
->tstamp
.off_sec
== 0) {
698 tv
.tv_sec
= xtime
.tv_sec
;
699 tv
.tv_usec
= xtime
.tv_nsec
/ NSEC_PER_USEC
;
700 skb_set_timestamp(skb
, &tv
);
701 /* Don't enable netstamp, sunrpc doesn't
702 need that much accuracy */
704 skb_get_timestamp(skb
, &svsk
->sk_sk
->sk_stamp
);
705 set_bit(SK_DATA
, &svsk
->sk_flags
); /* there may be more data... */
708 * Maybe more packets - kick another thread ASAP.
710 svc_sock_received(svsk
);
712 len
= skb
->len
- sizeof(struct udphdr
);
713 rqstp
->rq_arg
.len
= len
;
715 rqstp
->rq_prot
= IPPROTO_UDP
;
717 /* Get sender address */
718 rqstp
->rq_addr
.sin_family
= AF_INET
;
719 rqstp
->rq_addr
.sin_port
= skb
->h
.uh
->source
;
720 rqstp
->rq_addr
.sin_addr
.s_addr
= skb
->nh
.iph
->saddr
;
721 rqstp
->rq_daddr
= skb
->nh
.iph
->daddr
;
723 if (skb_is_nonlinear(skb
)) {
724 /* we have to copy */
726 if (csum_partial_copy_to_xdr(&rqstp
->rq_arg
, skb
)) {
729 skb_free_datagram(svsk
->sk_sk
, skb
);
733 skb_free_datagram(svsk
->sk_sk
, skb
);
735 /* we can use it in-place */
736 rqstp
->rq_arg
.head
[0].iov_base
= skb
->data
+ sizeof(struct udphdr
);
737 rqstp
->rq_arg
.head
[0].iov_len
= len
;
738 if (skb_checksum_complete(skb
)) {
739 skb_free_datagram(svsk
->sk_sk
, skb
);
742 rqstp
->rq_skbuff
= skb
;
745 rqstp
->rq_arg
.page_base
= 0;
746 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
747 rqstp
->rq_arg
.head
[0].iov_len
= len
;
748 rqstp
->rq_arg
.page_len
= 0;
749 rqstp
->rq_respages
= rqstp
->rq_pages
+1;
751 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
752 rqstp
->rq_respages
= rqstp
->rq_pages
+ 1 +
753 (rqstp
->rq_arg
.page_len
+ PAGE_SIZE
- 1)/ PAGE_SIZE
;
757 serv
->sv_stats
->netudpcnt
++;
763 svc_udp_sendto(struct svc_rqst
*rqstp
)
767 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
768 if (error
== -ECONNREFUSED
)
769 /* ICMP error on earlier request. */
770 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
776 svc_udp_init(struct svc_sock
*svsk
)
778 svsk
->sk_sk
->sk_data_ready
= svc_udp_data_ready
;
779 svsk
->sk_sk
->sk_write_space
= svc_write_space
;
780 svsk
->sk_recvfrom
= svc_udp_recvfrom
;
781 svsk
->sk_sendto
= svc_udp_sendto
;
783 /* initialise setting must have enough space to
784 * receive and respond to one request.
785 * svc_udp_recvfrom will re-adjust if necessary
787 svc_sock_setbufsize(svsk
->sk_sock
,
788 3 * svsk
->sk_server
->sv_max_mesg
,
789 3 * svsk
->sk_server
->sv_max_mesg
);
791 set_bit(SK_DATA
, &svsk
->sk_flags
); /* might have come in before data_ready set up */
792 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
796 * A data_ready event on a listening socket means there's a connection
797 * pending. Do not use state_change as a substitute for it.
800 svc_tcp_listen_data_ready(struct sock
*sk
, int count_unused
)
802 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
804 dprintk("svc: socket %p TCP (listen) state change %d\n",
808 * This callback may called twice when a new connection
809 * is established as a child socket inherits everything
810 * from a parent LISTEN socket.
811 * 1) data_ready method of the parent socket will be called
812 * when one of child sockets become ESTABLISHED.
813 * 2) data_ready method of the child socket may be called
814 * when it receives data before the socket is accepted.
815 * In case of 2, we should ignore it silently.
817 if (sk
->sk_state
== TCP_LISTEN
) {
819 set_bit(SK_CONN
, &svsk
->sk_flags
);
820 svc_sock_enqueue(svsk
);
822 printk("svc: socket %p: no user data\n", sk
);
825 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
826 wake_up_interruptible_all(sk
->sk_sleep
);
830 * A state change on a connected socket means it's dying or dead.
833 svc_tcp_state_change(struct sock
*sk
)
835 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
837 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
838 sk
, sk
->sk_state
, sk
->sk_user_data
);
841 printk("svc: socket %p: no user data\n", sk
);
843 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
844 svc_sock_enqueue(svsk
);
846 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
847 wake_up_interruptible_all(sk
->sk_sleep
);
851 svc_tcp_data_ready(struct sock
*sk
, int count
)
853 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
855 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
856 sk
, sk
->sk_user_data
);
858 set_bit(SK_DATA
, &svsk
->sk_flags
);
859 svc_sock_enqueue(svsk
);
861 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
862 wake_up_interruptible(sk
->sk_sleep
);
866 * Accept a TCP connection
869 svc_tcp_accept(struct svc_sock
*svsk
)
871 struct sockaddr_in sin
;
872 struct svc_serv
*serv
= svsk
->sk_server
;
873 struct socket
*sock
= svsk
->sk_sock
;
874 struct socket
*newsock
;
875 struct svc_sock
*newsvsk
;
878 dprintk("svc: tcp_accept %p sock %p\n", svsk
, sock
);
882 clear_bit(SK_CONN
, &svsk
->sk_flags
);
883 err
= kernel_accept(sock
, &newsock
, O_NONBLOCK
);
886 printk(KERN_WARNING
"%s: no more sockets!\n",
888 else if (err
!= -EAGAIN
&& net_ratelimit())
889 printk(KERN_WARNING
"%s: accept failed (err %d)!\n",
890 serv
->sv_name
, -err
);
894 set_bit(SK_CONN
, &svsk
->sk_flags
);
895 svc_sock_enqueue(svsk
);
898 err
= kernel_getpeername(newsock
, (struct sockaddr
*) &sin
, &slen
);
901 printk(KERN_WARNING
"%s: peername failed (err %d)!\n",
902 serv
->sv_name
, -err
);
903 goto failed
; /* aborted connection or whatever */
906 /* Ideally, we would want to reject connections from unauthorized
907 * hosts here, but when we get encription, the IP of the host won't
908 * tell us anything. For now just warn about unpriv connections.
910 if (ntohs(sin
.sin_port
) >= 1024) {
912 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
914 NIPQUAD(sin
.sin_addr
.s_addr
), ntohs(sin
.sin_port
));
917 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv
->sv_name
,
918 NIPQUAD(sin
.sin_addr
.s_addr
), ntohs(sin
.sin_port
));
920 /* make sure that a write doesn't block forever when
923 newsock
->sk
->sk_sndtimeo
= HZ
*30;
925 if (!(newsvsk
= svc_setup_socket(serv
, newsock
, &err
, 0)))
929 /* make sure that we don't have too many active connections.
930 * If we have, something must be dropped.
932 * There's no point in trying to do random drop here for
933 * DoS prevention. The NFS clients does 1 reconnect in 15
934 * seconds. An attacker can easily beat that.
936 * The only somewhat efficient mechanism would be if drop
937 * old connections from the same IP first. But right now
938 * we don't even record the client IP in svc_sock.
940 if (serv
->sv_tmpcnt
> (serv
->sv_nrthreads
+3)*20) {
941 struct svc_sock
*svsk
= NULL
;
942 spin_lock_bh(&serv
->sv_lock
);
943 if (!list_empty(&serv
->sv_tempsocks
)) {
944 if (net_ratelimit()) {
945 /* Try to help the admin */
946 printk(KERN_NOTICE
"%s: too many open TCP "
947 "sockets, consider increasing the "
948 "number of nfsd threads\n",
950 printk(KERN_NOTICE
"%s: last TCP connect from "
953 NIPQUAD(sin
.sin_addr
.s_addr
),
954 ntohs(sin
.sin_port
));
957 * Always select the oldest socket. It's not fair,
960 svsk
= list_entry(serv
->sv_tempsocks
.prev
,
963 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
964 atomic_inc(&svsk
->sk_inuse
);
966 spin_unlock_bh(&serv
->sv_lock
);
969 svc_sock_enqueue(svsk
);
976 serv
->sv_stats
->nettcpconn
++;
981 sock_release(newsock
);
986 * Receive data from a TCP socket.
989 svc_tcp_recvfrom(struct svc_rqst
*rqstp
)
991 struct svc_sock
*svsk
= rqstp
->rq_sock
;
992 struct svc_serv
*serv
= svsk
->sk_server
;
997 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
998 svsk
, test_bit(SK_DATA
, &svsk
->sk_flags
),
999 test_bit(SK_CONN
, &svsk
->sk_flags
),
1000 test_bit(SK_CLOSE
, &svsk
->sk_flags
));
1002 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
1003 svc_sock_received(svsk
);
1004 return svc_deferred_recv(rqstp
);
1007 if (test_bit(SK_CLOSE
, &svsk
->sk_flags
)) {
1008 svc_delete_socket(svsk
);
1012 if (svsk
->sk_sk
->sk_state
== TCP_LISTEN
) {
1013 svc_tcp_accept(svsk
);
1014 svc_sock_received(svsk
);
1018 if (test_and_clear_bit(SK_CHNGBUF
, &svsk
->sk_flags
))
1019 /* sndbuf needs to have room for one request
1020 * per thread, otherwise we can stall even when the
1021 * network isn't a bottleneck.
1023 * We count all threads rather than threads in a
1024 * particular pool, which provides an upper bound
1025 * on the number of threads which will access the socket.
1027 * rcvbuf just needs to be able to hold a few requests.
1028 * Normally they will be removed from the queue
1029 * as soon a a complete request arrives.
1031 svc_sock_setbufsize(svsk
->sk_sock
,
1032 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
,
1033 3 * serv
->sv_max_mesg
);
1035 clear_bit(SK_DATA
, &svsk
->sk_flags
);
1037 /* Receive data. If we haven't got the record length yet, get
1038 * the next four bytes. Otherwise try to gobble up as much as
1039 * possible up to the complete record length.
1041 if (svsk
->sk_tcplen
< 4) {
1042 unsigned long want
= 4 - svsk
->sk_tcplen
;
1045 iov
.iov_base
= ((char *) &svsk
->sk_reclen
) + svsk
->sk_tcplen
;
1047 if ((len
= svc_recvfrom(rqstp
, &iov
, 1, want
)) < 0)
1049 svsk
->sk_tcplen
+= len
;
1052 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1054 svc_sock_received(svsk
);
1055 return -EAGAIN
; /* record header not complete */
1058 svsk
->sk_reclen
= ntohl(svsk
->sk_reclen
);
1059 if (!(svsk
->sk_reclen
& 0x80000000)) {
1060 /* FIXME: technically, a record can be fragmented,
1061 * and non-terminal fragments will not have the top
1062 * bit set in the fragment length header.
1063 * But apparently no known nfs clients send fragmented
1065 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx (non-terminal)\n",
1066 (unsigned long) svsk
->sk_reclen
);
1069 svsk
->sk_reclen
&= 0x7fffffff;
1070 dprintk("svc: TCP record, %d bytes\n", svsk
->sk_reclen
);
1071 if (svsk
->sk_reclen
> serv
->sv_max_mesg
) {
1072 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx (large)\n",
1073 (unsigned long) svsk
->sk_reclen
);
1078 /* Check whether enough data is available */
1079 len
= svc_recv_available(svsk
);
1083 if (len
< svsk
->sk_reclen
) {
1084 dprintk("svc: incomplete TCP record (%d of %d)\n",
1085 len
, svsk
->sk_reclen
);
1086 svc_sock_received(svsk
);
1087 return -EAGAIN
; /* record not complete */
1089 len
= svsk
->sk_reclen
;
1090 set_bit(SK_DATA
, &svsk
->sk_flags
);
1092 vec
= rqstp
->rq_vec
;
1093 vec
[0] = rqstp
->rq_arg
.head
[0];
1096 while (vlen
< len
) {
1097 vec
[pnum
].iov_base
= page_address(rqstp
->rq_pages
[pnum
]);
1098 vec
[pnum
].iov_len
= PAGE_SIZE
;
1102 rqstp
->rq_respages
= &rqstp
->rq_pages
[pnum
];
1104 /* Now receive data */
1105 len
= svc_recvfrom(rqstp
, vec
, pnum
, len
);
1109 dprintk("svc: TCP complete record (%d bytes)\n", len
);
1110 rqstp
->rq_arg
.len
= len
;
1111 rqstp
->rq_arg
.page_base
= 0;
1112 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
1113 rqstp
->rq_arg
.head
[0].iov_len
= len
;
1114 rqstp
->rq_arg
.page_len
= 0;
1116 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
1119 rqstp
->rq_skbuff
= NULL
;
1120 rqstp
->rq_prot
= IPPROTO_TCP
;
1122 /* Reset TCP read info */
1123 svsk
->sk_reclen
= 0;
1124 svsk
->sk_tcplen
= 0;
1126 svc_sock_received(svsk
);
1128 serv
->sv_stats
->nettcpcnt
++;
1133 svc_delete_socket(svsk
);
1137 if (len
== -EAGAIN
) {
1138 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1139 svc_sock_received(svsk
);
1141 printk(KERN_NOTICE
"%s: recvfrom returned errno %d\n",
1142 svsk
->sk_server
->sv_name
, -len
);
1150 * Send out data on TCP socket.
1153 svc_tcp_sendto(struct svc_rqst
*rqstp
)
1155 struct xdr_buf
*xbufp
= &rqstp
->rq_res
;
1159 /* Set up the first element of the reply kvec.
1160 * Any other kvecs that may be in use have been taken
1161 * care of by the server implementation itself.
1163 reclen
= htonl(0x80000000|((xbufp
->len
) - 4));
1164 memcpy(xbufp
->head
[0].iov_base
, &reclen
, 4);
1166 if (test_bit(SK_DEAD
, &rqstp
->rq_sock
->sk_flags
))
1169 sent
= svc_sendto(rqstp
, &rqstp
->rq_res
);
1170 if (sent
!= xbufp
->len
) {
1171 printk(KERN_NOTICE
"rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1172 rqstp
->rq_sock
->sk_server
->sv_name
,
1173 (sent
<0)?"got error":"sent only",
1175 svc_delete_socket(rqstp
->rq_sock
);
1182 svc_tcp_init(struct svc_sock
*svsk
)
1184 struct sock
*sk
= svsk
->sk_sk
;
1185 struct tcp_sock
*tp
= tcp_sk(sk
);
1187 svsk
->sk_recvfrom
= svc_tcp_recvfrom
;
1188 svsk
->sk_sendto
= svc_tcp_sendto
;
1190 if (sk
->sk_state
== TCP_LISTEN
) {
1191 dprintk("setting up TCP socket for listening\n");
1192 sk
->sk_data_ready
= svc_tcp_listen_data_ready
;
1193 set_bit(SK_CONN
, &svsk
->sk_flags
);
1195 dprintk("setting up TCP socket for reading\n");
1196 sk
->sk_state_change
= svc_tcp_state_change
;
1197 sk
->sk_data_ready
= svc_tcp_data_ready
;
1198 sk
->sk_write_space
= svc_write_space
;
1200 svsk
->sk_reclen
= 0;
1201 svsk
->sk_tcplen
= 0;
1203 tp
->nonagle
= 1; /* disable Nagle's algorithm */
1205 /* initialise setting must have enough space to
1206 * receive and respond to one request.
1207 * svc_tcp_recvfrom will re-adjust if necessary
1209 svc_sock_setbufsize(svsk
->sk_sock
,
1210 3 * svsk
->sk_server
->sv_max_mesg
,
1211 3 * svsk
->sk_server
->sv_max_mesg
);
1213 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1214 set_bit(SK_DATA
, &svsk
->sk_flags
);
1215 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1216 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1221 svc_sock_update_bufs(struct svc_serv
*serv
)
1224 * The number of server threads has changed. Update
1225 * rcvbuf and sndbuf accordingly on all sockets
1227 struct list_head
*le
;
1229 spin_lock_bh(&serv
->sv_lock
);
1230 list_for_each(le
, &serv
->sv_permsocks
) {
1231 struct svc_sock
*svsk
=
1232 list_entry(le
, struct svc_sock
, sk_list
);
1233 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1235 list_for_each(le
, &serv
->sv_tempsocks
) {
1236 struct svc_sock
*svsk
=
1237 list_entry(le
, struct svc_sock
, sk_list
);
1238 set_bit(SK_CHNGBUF
, &svsk
->sk_flags
);
1240 spin_unlock_bh(&serv
->sv_lock
);
1244 * Receive the next request on any socket. This code is carefully
1245 * organised not to touch any cachelines in the shared svc_serv
1246 * structure, only cachelines in the local svc_pool.
1249 svc_recv(struct svc_rqst
*rqstp
, long timeout
)
1251 struct svc_sock
*svsk
=NULL
;
1252 struct svc_serv
*serv
= rqstp
->rq_server
;
1253 struct svc_pool
*pool
= rqstp
->rq_pool
;
1256 struct xdr_buf
*arg
;
1257 DECLARE_WAITQUEUE(wait
, current
);
1259 dprintk("svc: server %p waiting for data (to = %ld)\n",
1264 "svc_recv: service %p, socket not NULL!\n",
1266 if (waitqueue_active(&rqstp
->rq_wait
))
1268 "svc_recv: service %p, wait queue active!\n",
1272 /* now allocate needed pages. If we get a failure, sleep briefly */
1273 pages
= (serv
->sv_max_mesg
+ PAGE_SIZE
) / PAGE_SIZE
;
1274 for (i
=0; i
< pages
; i
++)
1275 while (rqstp
->rq_pages
[i
] == NULL
) {
1276 struct page
*p
= alloc_page(GFP_KERNEL
);
1278 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1279 rqstp
->rq_pages
[i
] = p
;
1282 /* Make arg->head point to first page and arg->pages point to rest */
1283 arg
= &rqstp
->rq_arg
;
1284 arg
->head
[0].iov_base
= page_address(rqstp
->rq_pages
[0]);
1285 arg
->head
[0].iov_len
= PAGE_SIZE
;
1286 arg
->pages
= rqstp
->rq_pages
+ 1;
1288 /* save at least one page for response */
1289 arg
->page_len
= (pages
-2)*PAGE_SIZE
;
1290 arg
->len
= (pages
-1)*PAGE_SIZE
;
1291 arg
->tail
[0].iov_len
= 0;
1298 spin_lock_bh(&pool
->sp_lock
);
1299 if ((svsk
= svc_sock_dequeue(pool
)) != NULL
) {
1300 rqstp
->rq_sock
= svsk
;
1301 atomic_inc(&svsk
->sk_inuse
);
1302 rqstp
->rq_reserved
= serv
->sv_max_mesg
;
1303 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
1305 /* No data pending. Go to sleep */
1306 svc_thread_enqueue(pool
, rqstp
);
1309 * We have to be able to interrupt this wait
1310 * to bring down the daemons ...
1312 set_current_state(TASK_INTERRUPTIBLE
);
1313 add_wait_queue(&rqstp
->rq_wait
, &wait
);
1314 spin_unlock_bh(&pool
->sp_lock
);
1316 schedule_timeout(timeout
);
1320 spin_lock_bh(&pool
->sp_lock
);
1321 remove_wait_queue(&rqstp
->rq_wait
, &wait
);
1323 if (!(svsk
= rqstp
->rq_sock
)) {
1324 svc_thread_dequeue(pool
, rqstp
);
1325 spin_unlock_bh(&pool
->sp_lock
);
1326 dprintk("svc: server %p, no data yet\n", rqstp
);
1327 return signalled()? -EINTR
: -EAGAIN
;
1330 spin_unlock_bh(&pool
->sp_lock
);
1332 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1333 rqstp
, pool
->sp_id
, svsk
, atomic_read(&svsk
->sk_inuse
));
1334 len
= svsk
->sk_recvfrom(rqstp
);
1335 dprintk("svc: got len=%d\n", len
);
1337 /* No data, incomplete (TCP) read, or accept() */
1338 if (len
== 0 || len
== -EAGAIN
) {
1339 rqstp
->rq_res
.len
= 0;
1340 svc_sock_release(rqstp
);
1343 svsk
->sk_lastrecv
= get_seconds();
1344 clear_bit(SK_OLD
, &svsk
->sk_flags
);
1346 rqstp
->rq_secure
= ntohs(rqstp
->rq_addr
.sin_port
) < 1024;
1347 rqstp
->rq_chandle
.defer
= svc_defer
;
1350 serv
->sv_stats
->netcnt
++;
1358 svc_drop(struct svc_rqst
*rqstp
)
1360 dprintk("svc: socket %p dropped request\n", rqstp
->rq_sock
);
1361 svc_sock_release(rqstp
);
1365 * Return reply to client.
1368 svc_send(struct svc_rqst
*rqstp
)
1370 struct svc_sock
*svsk
;
1374 if ((svsk
= rqstp
->rq_sock
) == NULL
) {
1375 printk(KERN_WARNING
"NULL socket pointer in %s:%d\n",
1376 __FILE__
, __LINE__
);
1380 /* release the receive skb before sending the reply */
1381 svc_release_skb(rqstp
);
1383 /* calculate over-all length */
1384 xb
= & rqstp
->rq_res
;
1385 xb
->len
= xb
->head
[0].iov_len
+
1387 xb
->tail
[0].iov_len
;
1389 /* Grab svsk->sk_mutex to serialize outgoing data. */
1390 mutex_lock(&svsk
->sk_mutex
);
1391 if (test_bit(SK_DEAD
, &svsk
->sk_flags
))
1394 len
= svsk
->sk_sendto(rqstp
);
1395 mutex_unlock(&svsk
->sk_mutex
);
1396 svc_sock_release(rqstp
);
1398 if (len
== -ECONNREFUSED
|| len
== -ENOTCONN
|| len
== -EAGAIN
)
1404 * Timer function to close old temporary sockets, using
1405 * a mark-and-sweep algorithm.
1408 svc_age_temp_sockets(unsigned long closure
)
1410 struct svc_serv
*serv
= (struct svc_serv
*)closure
;
1411 struct svc_sock
*svsk
;
1412 struct list_head
*le
, *next
;
1413 LIST_HEAD(to_be_aged
);
1415 dprintk("svc_age_temp_sockets\n");
1417 if (!spin_trylock_bh(&serv
->sv_lock
)) {
1418 /* busy, try again 1 sec later */
1419 dprintk("svc_age_temp_sockets: busy\n");
1420 mod_timer(&serv
->sv_temptimer
, jiffies
+ HZ
);
1424 list_for_each_safe(le
, next
, &serv
->sv_tempsocks
) {
1425 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1427 if (!test_and_set_bit(SK_OLD
, &svsk
->sk_flags
))
1429 if (atomic_read(&svsk
->sk_inuse
) || test_bit(SK_BUSY
, &svsk
->sk_flags
))
1431 atomic_inc(&svsk
->sk_inuse
);
1432 list_move(le
, &to_be_aged
);
1433 set_bit(SK_CLOSE
, &svsk
->sk_flags
);
1434 set_bit(SK_DETACHED
, &svsk
->sk_flags
);
1436 spin_unlock_bh(&serv
->sv_lock
);
1438 while (!list_empty(&to_be_aged
)) {
1439 le
= to_be_aged
.next
;
1440 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1442 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1444 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1445 svsk
, get_seconds() - svsk
->sk_lastrecv
);
1447 /* a thread will dequeue and close it soon */
1448 svc_sock_enqueue(svsk
);
1452 mod_timer(&serv
->sv_temptimer
, jiffies
+ svc_conn_age_period
* HZ
);
1456 * Initialize socket for RPC use and create svc_sock struct
1457 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1459 static struct svc_sock
*
1460 svc_setup_socket(struct svc_serv
*serv
, struct socket
*sock
,
1461 int *errp
, int pmap_register
)
1463 struct svc_sock
*svsk
;
1466 dprintk("svc: svc_setup_socket %p\n", sock
);
1467 if (!(svsk
= kzalloc(sizeof(*svsk
), GFP_KERNEL
))) {
1474 /* Register socket with portmapper */
1475 if (*errp
>= 0 && pmap_register
)
1476 *errp
= svc_register(serv
, inet
->sk_protocol
,
1477 ntohs(inet_sk(inet
)->sport
));
1484 set_bit(SK_BUSY
, &svsk
->sk_flags
);
1485 inet
->sk_user_data
= svsk
;
1486 svsk
->sk_sock
= sock
;
1488 svsk
->sk_ostate
= inet
->sk_state_change
;
1489 svsk
->sk_odata
= inet
->sk_data_ready
;
1490 svsk
->sk_owspace
= inet
->sk_write_space
;
1491 svsk
->sk_server
= serv
;
1492 atomic_set(&svsk
->sk_inuse
, 0);
1493 svsk
->sk_lastrecv
= get_seconds();
1494 spin_lock_init(&svsk
->sk_defer_lock
);
1495 INIT_LIST_HEAD(&svsk
->sk_deferred
);
1496 INIT_LIST_HEAD(&svsk
->sk_ready
);
1497 mutex_init(&svsk
->sk_mutex
);
1499 /* Initialize the socket */
1500 if (sock
->type
== SOCK_DGRAM
)
1505 spin_lock_bh(&serv
->sv_lock
);
1506 if (!pmap_register
) {
1507 set_bit(SK_TEMP
, &svsk
->sk_flags
);
1508 list_add(&svsk
->sk_list
, &serv
->sv_tempsocks
);
1510 if (serv
->sv_temptimer
.function
== NULL
) {
1511 /* setup timer to age temp sockets */
1512 setup_timer(&serv
->sv_temptimer
, svc_age_temp_sockets
,
1513 (unsigned long)serv
);
1514 mod_timer(&serv
->sv_temptimer
,
1515 jiffies
+ svc_conn_age_period
* HZ
);
1518 clear_bit(SK_TEMP
, &svsk
->sk_flags
);
1519 list_add(&svsk
->sk_list
, &serv
->sv_permsocks
);
1521 spin_unlock_bh(&serv
->sv_lock
);
1523 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1526 clear_bit(SK_BUSY
, &svsk
->sk_flags
);
1527 svc_sock_enqueue(svsk
);
1531 int svc_addsock(struct svc_serv
*serv
,
1537 struct socket
*so
= sockfd_lookup(fd
, &err
);
1538 struct svc_sock
*svsk
= NULL
;
1542 if (so
->sk
->sk_family
!= AF_INET
)
1543 err
= -EAFNOSUPPORT
;
1544 else if (so
->sk
->sk_protocol
!= IPPROTO_TCP
&&
1545 so
->sk
->sk_protocol
!= IPPROTO_UDP
)
1546 err
= -EPROTONOSUPPORT
;
1547 else if (so
->state
> SS_UNCONNECTED
)
1550 svsk
= svc_setup_socket(serv
, so
, &err
, 1);
1558 if (proto
) *proto
= so
->sk
->sk_protocol
;
1559 return one_sock_name(name_return
, svsk
);
1561 EXPORT_SYMBOL_GPL(svc_addsock
);
1564 * Create socket for RPC service.
1567 svc_create_socket(struct svc_serv
*serv
, int protocol
, struct sockaddr_in
*sin
)
1569 struct svc_sock
*svsk
;
1570 struct socket
*sock
;
1574 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
1575 serv
->sv_program
->pg_name
, protocol
,
1576 NIPQUAD(sin
->sin_addr
.s_addr
),
1577 ntohs(sin
->sin_port
));
1579 if (protocol
!= IPPROTO_UDP
&& protocol
!= IPPROTO_TCP
) {
1580 printk(KERN_WARNING
"svc: only UDP and TCP "
1581 "sockets supported\n");
1584 type
= (protocol
== IPPROTO_UDP
)? SOCK_DGRAM
: SOCK_STREAM
;
1586 if ((error
= sock_create_kern(PF_INET
, type
, protocol
, &sock
)) < 0)
1589 svc_reclassify_socket(sock
);
1591 if (type
== SOCK_STREAM
)
1592 sock
->sk
->sk_reuse
= 1; /* allow address reuse */
1593 error
= kernel_bind(sock
, (struct sockaddr
*) sin
,
1598 if (protocol
== IPPROTO_TCP
) {
1599 if ((error
= kernel_listen(sock
, 64)) < 0)
1603 if ((svsk
= svc_setup_socket(serv
, sock
, &error
, 1)) != NULL
)
1607 dprintk("svc: svc_create_socket error = %d\n", -error
);
1613 * Remove a dead socket
1616 svc_delete_socket(struct svc_sock
*svsk
)
1618 struct svc_serv
*serv
;
1621 dprintk("svc: svc_delete_socket(%p)\n", svsk
);
1623 serv
= svsk
->sk_server
;
1626 sk
->sk_state_change
= svsk
->sk_ostate
;
1627 sk
->sk_data_ready
= svsk
->sk_odata
;
1628 sk
->sk_write_space
= svsk
->sk_owspace
;
1630 spin_lock_bh(&serv
->sv_lock
);
1632 if (!test_and_set_bit(SK_DETACHED
, &svsk
->sk_flags
))
1633 list_del_init(&svsk
->sk_list
);
1635 * We used to delete the svc_sock from whichever list
1636 * it's sk_ready node was on, but we don't actually
1637 * need to. This is because the only time we're called
1638 * while still attached to a queue, the queue itself
1639 * is about to be destroyed (in svc_destroy).
1641 if (!test_and_set_bit(SK_DEAD
, &svsk
->sk_flags
))
1642 if (test_bit(SK_TEMP
, &svsk
->sk_flags
))
1645 /* This atomic_inc should be needed - svc_delete_socket
1646 * should have the semantic of dropping a reference.
1647 * But it doesn't yet....
1649 atomic_inc(&svsk
->sk_inuse
);
1650 spin_unlock_bh(&serv
->sv_lock
);
1655 * Make a socket for nfsd and lockd
1658 svc_makesock(struct svc_serv
*serv
, int protocol
, unsigned short port
)
1660 struct sockaddr_in sin
;
1662 dprintk("svc: creating socket proto = %d\n", protocol
);
1663 sin
.sin_family
= AF_INET
;
1664 sin
.sin_addr
.s_addr
= INADDR_ANY
;
1665 sin
.sin_port
= htons(port
);
1666 return svc_create_socket(serv
, protocol
, &sin
);
1670 * Handle defer and revisit of requests
1673 static void svc_revisit(struct cache_deferred_req
*dreq
, int too_many
)
1675 struct svc_deferred_req
*dr
= container_of(dreq
, struct svc_deferred_req
, handle
);
1676 struct svc_sock
*svsk
;
1679 svc_sock_put(dr
->svsk
);
1683 dprintk("revisit queued\n");
1686 spin_lock_bh(&svsk
->sk_defer_lock
);
1687 list_add(&dr
->handle
.recent
, &svsk
->sk_deferred
);
1688 spin_unlock_bh(&svsk
->sk_defer_lock
);
1689 set_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1690 svc_sock_enqueue(svsk
);
1694 static struct cache_deferred_req
*
1695 svc_defer(struct cache_req
*req
)
1697 struct svc_rqst
*rqstp
= container_of(req
, struct svc_rqst
, rq_chandle
);
1698 int size
= sizeof(struct svc_deferred_req
) + (rqstp
->rq_arg
.len
);
1699 struct svc_deferred_req
*dr
;
1701 if (rqstp
->rq_arg
.page_len
)
1702 return NULL
; /* if more than a page, give up FIXME */
1703 if (rqstp
->rq_deferred
) {
1704 dr
= rqstp
->rq_deferred
;
1705 rqstp
->rq_deferred
= NULL
;
1707 int skip
= rqstp
->rq_arg
.len
- rqstp
->rq_arg
.head
[0].iov_len
;
1708 /* FIXME maybe discard if size too large */
1709 dr
= kmalloc(size
, GFP_KERNEL
);
1713 dr
->handle
.owner
= rqstp
->rq_server
;
1714 dr
->prot
= rqstp
->rq_prot
;
1715 dr
->addr
= rqstp
->rq_addr
;
1716 dr
->daddr
= rqstp
->rq_daddr
;
1717 dr
->argslen
= rqstp
->rq_arg
.len
>> 2;
1718 memcpy(dr
->args
, rqstp
->rq_arg
.head
[0].iov_base
-skip
, dr
->argslen
<<2);
1720 atomic_inc(&rqstp
->rq_sock
->sk_inuse
);
1721 dr
->svsk
= rqstp
->rq_sock
;
1723 dr
->handle
.revisit
= svc_revisit
;
1728 * recv data from a deferred request into an active one
1730 static int svc_deferred_recv(struct svc_rqst
*rqstp
)
1732 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
1734 rqstp
->rq_arg
.head
[0].iov_base
= dr
->args
;
1735 rqstp
->rq_arg
.head
[0].iov_len
= dr
->argslen
<<2;
1736 rqstp
->rq_arg
.page_len
= 0;
1737 rqstp
->rq_arg
.len
= dr
->argslen
<<2;
1738 rqstp
->rq_prot
= dr
->prot
;
1739 rqstp
->rq_addr
= dr
->addr
;
1740 rqstp
->rq_daddr
= dr
->daddr
;
1741 rqstp
->rq_respages
= rqstp
->rq_pages
;
1742 return dr
->argslen
<<2;
1746 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
)
1748 struct svc_deferred_req
*dr
= NULL
;
1750 if (!test_bit(SK_DEFERRED
, &svsk
->sk_flags
))
1752 spin_lock_bh(&svsk
->sk_defer_lock
);
1753 clear_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1754 if (!list_empty(&svsk
->sk_deferred
)) {
1755 dr
= list_entry(svsk
->sk_deferred
.next
,
1756 struct svc_deferred_req
,
1758 list_del_init(&dr
->handle
.recent
);
1759 set_bit(SK_DEFERRED
, &svsk
->sk_flags
);
1761 spin_unlock_bh(&svsk
->sk_defer_lock
);