Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / net / core / sock.c
blobd15bd82f4892f85cefd886ef8be4c917983e7bde
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
10 * Version: $Id: sock.c,v 1.102 2000/12/11 23:00:24 davem Exp $
12 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk>
17 * Fixes:
18 * Alan Cox : Numerous verify_area() problems
19 * Alan Cox : Connecting on a connecting socket
20 * now returns an error for tcp.
21 * Alan Cox : sock->protocol is set correctly.
22 * and is not sometimes left as 0.
23 * Alan Cox : connect handles icmp errors on a
24 * connect properly. Unfortunately there
25 * is a restart syscall nasty there. I
26 * can't match BSD without hacking the C
27 * library. Ideas urgently sought!
28 * Alan Cox : Disallow bind() to addresses that are
29 * not ours - especially broadcast ones!!
30 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
31 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
32 * instead they leave that for the DESTROY timer.
33 * Alan Cox : Clean up error flag in accept
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer
37 * code. The ACK stuff can wait and needs major
38 * TCP layer surgery.
39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race.
41 * Alan Cox : Added zapped flag for TCP
42 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
43 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
45 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
46 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47 * Rick Sladkey : Relaxed UDP rules for matching packets.
48 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
49 * Pauline Middelink : identd support
50 * Alan Cox : Fixed connect() taking signals I think.
51 * Alan Cox : SO_LINGER supported
52 * Alan Cox : Error reporting fixes
53 * Anonymous : inet_create tidied up (sk->reuse setting)
54 * Alan Cox : inet sockets don't set sk->type!
55 * Alan Cox : Split socket option code
56 * Alan Cox : Callbacks
57 * Alan Cox : Nagle flag for Charles & Johannes stuff
58 * Alex : Removed restriction on inet fioctl
59 * Alan Cox : Splitting INET from NET core
60 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
61 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
62 * Alan Cox : Split IP from generic code
63 * Alan Cox : New kfree_skbmem()
64 * Alan Cox : Make SO_DEBUG superuser only.
65 * Alan Cox : Allow anyone to clear SO_DEBUG
66 * (compatibility fix)
67 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
68 * Alan Cox : Allocator for a socket is settable.
69 * Alan Cox : SO_ERROR includes soft errors.
70 * Alan Cox : Allow NULL arguments on some SO_ opts
71 * Alan Cox : Generic socket allocation to make hooks
72 * easier (suggested by Craig Metz).
73 * Michael Pall : SO_ERROR returns positive errno again
74 * Steve Whitehouse: Added default destructor to free
75 * protocol private data.
76 * Steve Whitehouse: Added various other default routines
77 * common to several socket families.
78 * Chris Evans : Call suser() check last on F_SETOWN
79 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
81 * Andi Kleen : Fix write_space callback
83 * To Fix:
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
92 #include <linux/config.h>
93 #include <linux/errno.h>
94 #include <linux/types.h>
95 #include <linux/socket.h>
96 #include <linux/in.h>
97 #include <linux/kernel.h>
98 #include <linux/major.h>
99 #include <linux/sched.h>
100 #include <linux/timer.h>
101 #include <linux/string.h>
102 #include <linux/sockios.h>
103 #include <linux/net.h>
104 #include <linux/fcntl.h>
105 #include <linux/mm.h>
106 #include <linux/slab.h>
107 #include <linux/interrupt.h>
108 #include <linux/poll.h>
109 #include <linux/init.h>
111 #include <asm/uaccess.h>
112 #include <asm/system.h>
114 #include <linux/inet.h>
115 #include <linux/netdevice.h>
116 #include <net/ip.h>
117 #include <net/protocol.h>
118 #include <net/arp.h>
119 #include <net/route.h>
120 #include <net/tcp.h>
121 #include <net/udp.h>
122 #include <linux/skbuff.h>
123 #include <net/sock.h>
124 #include <net/raw.h>
125 #include <net/icmp.h>
126 #include <linux/ipsec.h>
128 #ifdef CONFIG_FILTER
129 #include <linux/filter.h>
130 #endif
132 #define min(a,b) ((a)<(b)?(a):(b))
134 /* Run time adjustable parameters. */
135 __u32 sysctl_wmem_max = SK_WMEM_MAX;
136 __u32 sysctl_rmem_max = SK_RMEM_MAX;
137 __u32 sysctl_wmem_default = SK_WMEM_MAX;
138 __u32 sysctl_rmem_default = SK_RMEM_MAX;
140 /* Maximal space eaten by iovec or ancilliary data plus some space */
141 int sysctl_optmem_max = sizeof(unsigned long)*(2*UIO_MAXIOV + 512);
143 static int sock_set_timeout(long *timeo_p, char *optval, int optlen)
145 struct timeval tv;
147 if (optlen < sizeof(tv))
148 return -EINVAL;
149 if (copy_from_user(&tv, optval, sizeof(tv)))
150 return -EFAULT;
152 *timeo_p = MAX_SCHEDULE_TIMEOUT;
153 if (tv.tv_sec == 0 && tv.tv_usec == 0)
154 return 0;
155 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
156 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
157 return 0;
161 * This is meant for all protocols to use and covers goings on
162 * at the socket level. Everything here is generic.
165 int sock_setsockopt(struct socket *sock, int level, int optname,
166 char *optval, int optlen)
168 struct sock *sk=sock->sk;
169 #ifdef CONFIG_FILTER
170 struct sk_filter *filter;
171 #endif
172 int val;
173 int valbool;
174 int err;
175 struct linger ling;
176 int ret = 0;
179 * Options without arguments
182 #ifdef SO_DONTLINGER /* Compatibility item... */
183 switch(optname)
185 case SO_DONTLINGER:
186 sk->linger=0;
187 return 0;
189 #endif
191 if(optlen<sizeof(int))
192 return(-EINVAL);
194 err = get_user(val, (int *)optval);
195 if (err)
196 return err;
198 valbool = val?1:0;
200 lock_sock(sk);
202 switch(optname)
204 case SO_DEBUG:
205 if(val && !capable(CAP_NET_ADMIN))
207 ret = -EACCES;
209 else
210 sk->debug=valbool;
211 break;
212 case SO_REUSEADDR:
213 sk->reuse = valbool;
214 break;
215 case SO_TYPE:
216 case SO_ERROR:
217 ret = -ENOPROTOOPT;
218 break;
219 case SO_DONTROUTE:
220 sk->localroute=valbool;
221 break;
222 case SO_BROADCAST:
223 sk->broadcast=valbool;
224 break;
225 case SO_SNDBUF:
226 /* Don't error on this BSD doesn't and if you think
227 about it this is right. Otherwise apps have to
228 play 'guess the biggest size' games. RCVBUF/SNDBUF
229 are treated in BSD as hints */
231 if (val > sysctl_wmem_max)
232 val = sysctl_wmem_max;
234 sk->userlocks |= SOCK_SNDBUF_LOCK;
235 sk->sndbuf = max(val*2,SOCK_MIN_SNDBUF);
238 * Wake up sending tasks if we
239 * upped the value.
241 sk->write_space(sk);
242 break;
244 case SO_RCVBUF:
245 /* Don't error on this BSD doesn't and if you think
246 about it this is right. Otherwise apps have to
247 play 'guess the biggest size' games. RCVBUF/SNDBUF
248 are treated in BSD as hints */
250 if (val > sysctl_rmem_max)
251 val = sysctl_rmem_max;
253 sk->userlocks |= SOCK_RCVBUF_LOCK;
254 /* FIXME: is this lower bound the right one? */
255 sk->rcvbuf = max(val*2,SOCK_MIN_RCVBUF);
256 break;
258 case SO_KEEPALIVE:
259 #ifdef CONFIG_INET
260 if (sk->protocol == IPPROTO_TCP)
262 tcp_set_keepalive(sk, valbool);
264 #endif
265 sk->keepopen = valbool;
266 break;
268 case SO_OOBINLINE:
269 sk->urginline = valbool;
270 break;
272 case SO_NO_CHECK:
273 sk->no_check = valbool;
274 break;
276 case SO_PRIORITY:
277 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
278 sk->priority = val;
279 else
280 ret = -EPERM;
281 break;
283 case SO_LINGER:
284 if(optlen<sizeof(ling)) {
285 ret = -EINVAL; /* 1003.1g */
286 break;
288 if (copy_from_user(&ling,optval,sizeof(ling))) {
289 ret = -EFAULT;
290 break;
292 if(ling.l_onoff==0) {
293 sk->linger=0;
294 } else {
295 #if (BITS_PER_LONG == 32)
296 if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
297 sk->lingertime=MAX_SCHEDULE_TIMEOUT;
298 else
299 #endif
300 sk->lingertime=ling.l_linger*HZ;
301 sk->linger=1;
303 break;
305 case SO_BSDCOMPAT:
306 sk->bsdism = valbool;
307 break;
309 case SO_PASSCRED:
310 sock->passcred = valbool;
311 break;
313 case SO_TIMESTAMP:
314 sk->rcvtstamp = valbool;
315 break;
317 case SO_RCVLOWAT:
318 if (val < 0)
319 val = INT_MAX;
320 sk->rcvlowat = val ? : 1;
321 break;
323 case SO_RCVTIMEO:
324 ret = sock_set_timeout(&sk->rcvtimeo, optval, optlen);
325 break;
327 case SO_SNDTIMEO:
328 ret = sock_set_timeout(&sk->sndtimeo, optval, optlen);
329 break;
331 #ifdef CONFIG_NETDEVICES
332 case SO_BINDTODEVICE:
334 char devname[IFNAMSIZ];
336 /* Sorry... */
337 if (!capable(CAP_NET_RAW)) {
338 ret = -EPERM;
339 break;
342 /* Bind this socket to a particular device like "eth0",
343 * as specified in the passed interface name. If the
344 * name is "" or the option length is zero the socket
345 * is not bound.
348 if (!valbool) {
349 sk->bound_dev_if = 0;
350 } else {
351 if (optlen > IFNAMSIZ)
352 optlen = IFNAMSIZ;
353 if (copy_from_user(devname, optval, optlen)) {
354 ret = -EFAULT;
355 break;
358 /* Remove any cached route for this socket. */
359 sk_dst_reset(sk);
361 if (devname[0] == '\0') {
362 sk->bound_dev_if = 0;
363 } else {
364 struct net_device *dev = dev_get_by_name(devname);
365 if (!dev) {
366 ret = -ENODEV;
367 break;
369 sk->bound_dev_if = dev->ifindex;
370 dev_put(dev);
373 break;
375 #endif
378 #ifdef CONFIG_FILTER
379 case SO_ATTACH_FILTER:
380 ret = -EINVAL;
381 if (optlen == sizeof(struct sock_fprog)) {
382 struct sock_fprog fprog;
384 ret = -EFAULT;
385 if (copy_from_user(&fprog, optval, sizeof(fprog)))
386 break;
388 ret = sk_attach_filter(&fprog, sk);
390 break;
392 case SO_DETACH_FILTER:
393 spin_lock_bh(&sk->lock.slock);
394 filter = sk->filter;
395 if (filter) {
396 sk->filter = NULL;
397 spin_unlock_bh(&sk->lock.slock);
398 sk_filter_release(sk, filter);
399 break;
401 spin_unlock_bh(&sk->lock.slock);
402 ret = -ENONET;
403 break;
404 #endif
405 /* We implement the SO_SNDLOWAT etc to
406 not be settable (1003.1g 5.3) */
407 default:
408 ret = -ENOPROTOOPT;
409 break;
411 release_sock(sk);
412 return ret;
416 int sock_getsockopt(struct socket *sock, int level, int optname,
417 char *optval, int *optlen)
419 struct sock *sk = sock->sk;
421 union
423 int val;
424 struct linger ling;
425 struct timeval tm;
426 } v;
428 int lv=sizeof(int),len;
430 if(get_user(len,optlen))
431 return -EFAULT;
433 switch(optname)
435 case SO_DEBUG:
436 v.val = sk->debug;
437 break;
439 case SO_DONTROUTE:
440 v.val = sk->localroute;
441 break;
443 case SO_BROADCAST:
444 v.val= sk->broadcast;
445 break;
447 case SO_SNDBUF:
448 v.val=sk->sndbuf;
449 break;
451 case SO_RCVBUF:
452 v.val =sk->rcvbuf;
453 break;
455 case SO_REUSEADDR:
456 v.val = sk->reuse;
457 break;
459 case SO_KEEPALIVE:
460 v.val = sk->keepopen;
461 break;
463 case SO_TYPE:
464 v.val = sk->type;
465 break;
467 case SO_ERROR:
468 v.val = -sock_error(sk);
469 if(v.val==0)
470 v.val=xchg(&sk->err_soft,0);
471 break;
473 case SO_OOBINLINE:
474 v.val = sk->urginline;
475 break;
477 case SO_NO_CHECK:
478 v.val = sk->no_check;
479 break;
481 case SO_PRIORITY:
482 v.val = sk->priority;
483 break;
485 case SO_LINGER:
486 lv=sizeof(v.ling);
487 v.ling.l_onoff=sk->linger;
488 v.ling.l_linger=sk->lingertime/HZ;
489 break;
491 case SO_BSDCOMPAT:
492 v.val = sk->bsdism;
493 break;
495 case SO_TIMESTAMP:
496 v.val = sk->rcvtstamp;
497 break;
499 case SO_RCVTIMEO:
500 lv=sizeof(struct timeval);
501 if (sk->rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
502 v.tm.tv_sec = 0;
503 v.tm.tv_usec = 0;
504 } else {
505 v.tm.tv_sec = sk->rcvtimeo/HZ;
506 v.tm.tv_usec = ((sk->rcvtimeo%HZ)*1000)/HZ;
508 break;
510 case SO_SNDTIMEO:
511 lv=sizeof(struct timeval);
512 if (sk->sndtimeo == MAX_SCHEDULE_TIMEOUT) {
513 v.tm.tv_sec = 0;
514 v.tm.tv_usec = 0;
515 } else {
516 v.tm.tv_sec = sk->sndtimeo/HZ;
517 v.tm.tv_usec = ((sk->sndtimeo%HZ)*1000)/HZ;
519 break;
521 case SO_RCVLOWAT:
522 v.val = sk->rcvlowat;
523 break;
525 case SO_SNDLOWAT:
526 v.val=1;
527 break;
529 case SO_PASSCRED:
530 v.val = sock->passcred;
531 break;
533 case SO_PEERCRED:
534 lv=sizeof(sk->peercred);
535 len=min(len, lv);
536 if(copy_to_user((void*)optval, &sk->peercred, len))
537 return -EFAULT;
538 goto lenout;
540 case SO_PEERNAME:
542 char address[128];
544 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
545 return -ENOTCONN;
546 if (lv < len)
547 return -EINVAL;
548 if(copy_to_user((void*)optval, address, len))
549 return -EFAULT;
550 goto lenout;
553 default:
554 return(-ENOPROTOOPT);
556 len=min(len,lv);
557 if(copy_to_user(optval,&v,len))
558 return -EFAULT;
559 lenout:
560 if(put_user(len, optlen))
561 return -EFAULT;
562 return 0;
565 static kmem_cache_t *sk_cachep;
568 * All socket objects are allocated here. This is for future
569 * usage.
572 struct sock *sk_alloc(int family, int priority, int zero_it)
574 struct sock *sk = kmem_cache_alloc(sk_cachep, priority);
576 if(sk && zero_it) {
577 memset(sk, 0, sizeof(struct sock));
578 sk->family = family;
579 sock_lock_init(sk);
582 return sk;
585 void sk_free(struct sock *sk)
587 #ifdef CONFIG_FILTER
588 struct sk_filter *filter;
589 #endif
591 if (sk->destruct)
592 sk->destruct(sk);
594 #ifdef CONFIG_FILTER
595 filter = sk->filter;
596 if (filter) {
597 sk_filter_release(sk, filter);
598 sk->filter = NULL;
600 #endif
602 if (atomic_read(&sk->omem_alloc))
603 printk(KERN_DEBUG "sk_free: optmem leakage (%d bytes) detected.\n", atomic_read(&sk->omem_alloc));
605 kmem_cache_free(sk_cachep, sk);
608 void __init sk_init(void)
610 sk_cachep = kmem_cache_create("sock", sizeof(struct sock), 0,
611 SLAB_HWCACHE_ALIGN, 0, 0);
612 if (!sk_cachep)
613 printk(KERN_CRIT "sk_init: Cannot create sock SLAB cache!");
615 if (num_physpages <= 4096) {
616 sysctl_wmem_max = 32767;
617 sysctl_rmem_max = 32767;
618 sysctl_wmem_default = 32767;
619 sysctl_wmem_default = 32767;
620 } else if (num_physpages >= 131072) {
621 sysctl_wmem_max = 131071;
622 sysctl_rmem_max = 131071;
627 * Simple resource managers for sockets.
632 * Write buffer destructor automatically called from kfree_skb.
634 void sock_wfree(struct sk_buff *skb)
636 struct sock *sk = skb->sk;
638 /* In case it might be waiting for more memory. */
639 atomic_sub(skb->truesize, &sk->wmem_alloc);
640 sk->write_space(sk);
641 sock_put(sk);
645 * Read buffer destructor automatically called from kfree_skb.
647 void sock_rfree(struct sk_buff *skb)
649 struct sock *sk = skb->sk;
651 atomic_sub(skb->truesize, &sk->rmem_alloc);
655 * Allocate a skb from the socket's send buffer.
657 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority)
659 if (force || atomic_read(&sk->wmem_alloc) < sk->sndbuf) {
660 struct sk_buff * skb = alloc_skb(size, priority);
661 if (skb) {
662 skb_set_owner_w(skb, sk);
663 return skb;
666 return NULL;
670 * Allocate a skb from the socket's receive buffer.
672 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority)
674 if (force || atomic_read(&sk->rmem_alloc) < sk->rcvbuf) {
675 struct sk_buff *skb = alloc_skb(size, priority);
676 if (skb) {
677 skb_set_owner_r(skb, sk);
678 return skb;
681 return NULL;
685 * Allocate a memory block from the socket's option memory buffer.
687 void *sock_kmalloc(struct sock *sk, int size, int priority)
689 if ((unsigned)size <= sysctl_optmem_max &&
690 atomic_read(&sk->omem_alloc)+size < sysctl_optmem_max) {
691 void *mem;
692 /* First do the add, to avoid the race if kmalloc
693 * might sleep.
695 atomic_add(size, &sk->omem_alloc);
696 mem = kmalloc(size, priority);
697 if (mem)
698 return mem;
699 atomic_sub(size, &sk->omem_alloc);
701 return NULL;
705 * Free an option memory block.
707 void sock_kfree_s(struct sock *sk, void *mem, int size)
709 kfree(mem);
710 atomic_sub(size, &sk->omem_alloc);
713 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
714 I think, these locks should be removed for datagram sockets.
716 static long sock_wait_for_wmem(struct sock * sk, long timeo)
718 DECLARE_WAITQUEUE(wait, current);
720 clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
721 add_wait_queue(sk->sleep, &wait);
722 for (;;) {
723 if (signal_pending(current))
724 break;
725 set_bit(SOCK_NOSPACE, &sk->socket->flags);
726 set_current_state(TASK_INTERRUPTIBLE);
727 if (atomic_read(&sk->wmem_alloc) < sk->sndbuf)
728 break;
729 if (sk->shutdown & SEND_SHUTDOWN)
730 break;
731 if (sk->err)
732 break;
733 timeo = schedule_timeout(timeo);
735 __set_current_state(TASK_RUNNING);
736 remove_wait_queue(sk->sleep, &wait);
737 return timeo;
742 * Generic send/receive buffer handlers
745 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
746 unsigned long fallback, int noblock, int *errcode)
748 int err;
749 struct sk_buff *skb;
750 long timeo;
752 timeo = sock_sndtimeo(sk, noblock);
754 while (1) {
755 unsigned long try_size = size;
757 err = sock_error(sk);
758 if (err != 0)
759 goto failure;
762 * We should send SIGPIPE in these cases according to
763 * 1003.1g draft 6.4. If we (the user) did a shutdown()
764 * call however we should not.
766 * Note: This routine isnt just used for datagrams and
767 * anyway some datagram protocols have a notion of
768 * close down.
771 err = -EPIPE;
772 if (sk->shutdown&SEND_SHUTDOWN)
773 goto failure;
775 if (atomic_read(&sk->wmem_alloc) < sk->sndbuf) {
776 if (fallback) {
777 /* The buffer get won't block, or use the atomic queue.
778 * It does produce annoying no free page messages still.
780 skb = alloc_skb(size, GFP_BUFFER);
781 if (skb)
782 break;
783 try_size = fallback;
785 skb = alloc_skb(try_size, sk->allocation);
786 if (skb)
787 break;
788 err = -ENOBUFS;
789 goto failure;
793 * This means we have too many buffers for this socket already.
796 set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
797 set_bit(SOCK_NOSPACE, &sk->socket->flags);
798 err = -EAGAIN;
799 if (!timeo)
800 goto failure;
801 if (signal_pending(current))
802 goto interrupted;
803 timeo = sock_wait_for_wmem(sk, timeo);
806 skb_set_owner_w(skb, sk);
807 return skb;
809 interrupted:
810 err = sock_intr_errno(timeo);
811 failure:
812 *errcode = err;
813 return NULL;
816 void __lock_sock(struct sock *sk)
818 DECLARE_WAITQUEUE(wait, current);
820 add_wait_queue_exclusive(&sk->lock.wq, &wait);
821 for(;;) {
822 current->state = TASK_UNINTERRUPTIBLE;
823 spin_unlock_bh(&sk->lock.slock);
824 schedule();
825 spin_lock_bh(&sk->lock.slock);
826 if(!sk->lock.users)
827 break;
829 current->state = TASK_RUNNING;
830 remove_wait_queue(&sk->lock.wq, &wait);
833 void __release_sock(struct sock *sk)
835 struct sk_buff *skb = sk->backlog.head;
837 do {
838 sk->backlog.head = sk->backlog.tail = NULL;
839 bh_unlock_sock(sk);
841 do {
842 struct sk_buff *next = skb->next;
844 skb->next = NULL;
845 sk->backlog_rcv(sk, skb);
846 skb = next;
847 } while (skb != NULL);
849 bh_lock_sock(sk);
850 } while((skb = sk->backlog.head) != NULL);
854 * Generic socket manager library. Most simpler socket families
855 * use this to manage their socket lists. At some point we should
856 * hash these. By making this generic we get the lot hashed for free.
858 * It is broken by design. All the protocols using it must be fixed. --ANK
861 rwlock_t net_big_sklist_lock = RW_LOCK_UNLOCKED;
863 void sklist_remove_socket(struct sock **list, struct sock *sk)
865 struct sock *s;
867 write_lock_bh(&net_big_sklist_lock);
869 while ((s = *list) != NULL) {
870 if (s == sk) {
871 *list = s->next;
872 break;
874 list = &s->next;
877 write_unlock_bh(&net_big_sklist_lock);
878 if (s)
879 sock_put(s);
882 void sklist_insert_socket(struct sock **list, struct sock *sk)
884 write_lock_bh(&net_big_sklist_lock);
885 sk->next= *list;
886 *list=sk;
887 sock_hold(sk);
888 write_unlock_bh(&net_big_sklist_lock);
892 * This is only called from user mode. Thus it protects itself against
893 * interrupt users but doesn't worry about being called during work.
894 * Once it is removed from the queue no interrupt or bottom half will
895 * touch it and we are (fairly 8-) ) safe.
898 void sklist_destroy_socket(struct sock **list, struct sock *sk);
901 * Handler for deferred kills.
904 static void sklist_destroy_timer(unsigned long data)
906 struct sock *sk=(struct sock *)data;
907 sklist_destroy_socket(NULL,sk);
911 * Destroy a socket. We pass NULL for a list if we know the
912 * socket is not on a list.
915 void sklist_destroy_socket(struct sock **list,struct sock *sk)
917 struct sk_buff *skb;
918 if(list)
919 sklist_remove_socket(list, sk);
921 while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
923 kfree_skb(skb);
926 if(atomic_read(&sk->wmem_alloc) == 0 &&
927 atomic_read(&sk->rmem_alloc) == 0 &&
928 sk->dead)
930 sock_put(sk);
932 else
935 * Someone is using our buffers still.. defer
937 init_timer(&sk->timer);
938 sk->timer.expires=jiffies+SOCK_DESTROY_TIME;
939 sk->timer.function=sklist_destroy_timer;
940 sk->timer.data = (unsigned long)sk;
941 add_timer(&sk->timer);
946 * Set of default routines for initialising struct proto_ops when
947 * the protocol does not support a particular function. In certain
948 * cases where it makes no sense for a protocol to have a "do nothing"
949 * function, some default processing is provided.
952 int sock_no_release(struct socket *sock)
954 return 0;
957 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
959 return -EOPNOTSUPP;
962 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
963 int len, int flags)
965 return -EOPNOTSUPP;
968 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
970 return -EOPNOTSUPP;
973 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
975 return -EOPNOTSUPP;
978 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
979 int *len, int peer)
981 return -EOPNOTSUPP;
984 unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
986 return 0;
989 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
991 return -EOPNOTSUPP;
994 int sock_no_listen(struct socket *sock, int backlog)
996 return -EOPNOTSUPP;
999 int sock_no_shutdown(struct socket *sock, int how)
1001 return -EOPNOTSUPP;
1004 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1005 char *optval, int optlen)
1007 return -EOPNOTSUPP;
1010 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1011 char *optval, int *optlen)
1013 return -EOPNOTSUPP;
1017 * Note: if you add something that sleeps here then change sock_fcntl()
1018 * to do proper fd locking.
1020 int sock_no_fcntl(struct socket *sock, unsigned int cmd, unsigned long arg)
1022 struct sock *sk = sock->sk;
1024 switch(cmd)
1026 case F_SETOWN:
1028 * This is a little restrictive, but it's the only
1029 * way to make sure that you can't send a sigurg to
1030 * another process.
1032 if (current->pgrp != -arg &&
1033 current->pid != arg &&
1034 !capable(CAP_KILL)) return(-EPERM);
1035 sk->proc = arg;
1036 return(0);
1037 case F_GETOWN:
1038 return(sk->proc);
1039 default:
1040 return(-EINVAL);
1044 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, int flags,
1045 struct scm_cookie *scm)
1047 return -EOPNOTSUPP;
1050 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, int len, int flags,
1051 struct scm_cookie *scm)
1053 return -EOPNOTSUPP;
1056 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1058 /* Mirror missing mmap method error code */
1059 return -ENODEV;
1063 * Default Socket Callbacks
1066 void sock_def_wakeup(struct sock *sk)
1068 read_lock(&sk->callback_lock);
1069 if (sk->sleep && waitqueue_active(sk->sleep))
1070 wake_up_interruptible_all(sk->sleep);
1071 read_unlock(&sk->callback_lock);
1074 void sock_def_error_report(struct sock *sk)
1076 read_lock(&sk->callback_lock);
1077 if (sk->sleep && waitqueue_active(sk->sleep))
1078 wake_up_interruptible(sk->sleep);
1079 sk_wake_async(sk,0,POLL_ERR);
1080 read_unlock(&sk->callback_lock);
1083 void sock_def_readable(struct sock *sk, int len)
1085 read_lock(&sk->callback_lock);
1086 if (sk->sleep && waitqueue_active(sk->sleep))
1087 wake_up_interruptible(sk->sleep);
1088 sk_wake_async(sk,1,POLL_IN);
1089 read_unlock(&sk->callback_lock);
1092 void sock_def_write_space(struct sock *sk)
1094 read_lock(&sk->callback_lock);
1096 /* Do not wake up a writer until he can make "significant"
1097 * progress. --DaveM
1099 if((atomic_read(&sk->wmem_alloc) << 1) <= sk->sndbuf) {
1100 if (sk->sleep && waitqueue_active(sk->sleep))
1101 wake_up_interruptible(sk->sleep);
1103 /* Should agree with poll, otherwise some programs break */
1104 if (sock_writeable(sk))
1105 sk_wake_async(sk, 2, POLL_OUT);
1108 read_unlock(&sk->callback_lock);
1111 void sock_def_destruct(struct sock *sk)
1113 if (sk->protinfo.destruct_hook)
1114 kfree(sk->protinfo.destruct_hook);
1117 void sock_init_data(struct socket *sock, struct sock *sk)
1119 skb_queue_head_init(&sk->receive_queue);
1120 skb_queue_head_init(&sk->write_queue);
1121 skb_queue_head_init(&sk->error_queue);
1123 init_timer(&sk->timer);
1125 sk->allocation = GFP_KERNEL;
1126 sk->rcvbuf = sysctl_rmem_default;
1127 sk->sndbuf = sysctl_wmem_default;
1128 sk->state = TCP_CLOSE;
1129 sk->zapped = 1;
1130 sk->socket = sock;
1132 if(sock)
1134 sk->type = sock->type;
1135 sk->sleep = &sock->wait;
1136 sock->sk = sk;
1137 } else
1138 sk->sleep = NULL;
1140 sk->dst_lock = RW_LOCK_UNLOCKED;
1141 sk->callback_lock = RW_LOCK_UNLOCKED;
1143 sk->state_change = sock_def_wakeup;
1144 sk->data_ready = sock_def_readable;
1145 sk->write_space = sock_def_write_space;
1146 sk->error_report = sock_def_error_report;
1147 sk->destruct = sock_def_destruct;
1149 sk->peercred.pid = 0;
1150 sk->peercred.uid = -1;
1151 sk->peercred.gid = -1;
1152 sk->rcvlowat = 1;
1153 sk->rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1154 sk->sndtimeo = MAX_SCHEDULE_TIMEOUT;
1156 atomic_set(&sk->refcnt, 1);