2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
10 * Version: $Id: sock.c,v 1.102 2000/12/11 23:00:24 davem Exp $
12 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk>
18 * Alan Cox : Numerous verify_area() problems
19 * Alan Cox : Connecting on a connecting socket
20 * now returns an error for tcp.
21 * Alan Cox : sock->protocol is set correctly.
22 * and is not sometimes left as 0.
23 * Alan Cox : connect handles icmp errors on a
24 * connect properly. Unfortunately there
25 * is a restart syscall nasty there. I
26 * can't match BSD without hacking the C
27 * library. Ideas urgently sought!
28 * Alan Cox : Disallow bind() to addresses that are
29 * not ours - especially broadcast ones!!
30 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
31 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
32 * instead they leave that for the DESTROY timer.
33 * Alan Cox : Clean up error flag in accept
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer
37 * code. The ACK stuff can wait and needs major
39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race.
41 * Alan Cox : Added zapped flag for TCP
42 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
43 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
45 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
46 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47 * Rick Sladkey : Relaxed UDP rules for matching packets.
48 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
49 * Pauline Middelink : identd support
50 * Alan Cox : Fixed connect() taking signals I think.
51 * Alan Cox : SO_LINGER supported
52 * Alan Cox : Error reporting fixes
53 * Anonymous : inet_create tidied up (sk->reuse setting)
54 * Alan Cox : inet sockets don't set sk->type!
55 * Alan Cox : Split socket option code
56 * Alan Cox : Callbacks
57 * Alan Cox : Nagle flag for Charles & Johannes stuff
58 * Alex : Removed restriction on inet fioctl
59 * Alan Cox : Splitting INET from NET core
60 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
61 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
62 * Alan Cox : Split IP from generic code
63 * Alan Cox : New kfree_skbmem()
64 * Alan Cox : Make SO_DEBUG superuser only.
65 * Alan Cox : Allow anyone to clear SO_DEBUG
67 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
68 * Alan Cox : Allocator for a socket is settable.
69 * Alan Cox : SO_ERROR includes soft errors.
70 * Alan Cox : Allow NULL arguments on some SO_ opts
71 * Alan Cox : Generic socket allocation to make hooks
72 * easier (suggested by Craig Metz).
73 * Michael Pall : SO_ERROR returns positive errno again
74 * Steve Whitehouse: Added default destructor to free
75 * protocol private data.
76 * Steve Whitehouse: Added various other default routines
77 * common to several socket families.
78 * Chris Evans : Call suser() check last on F_SETOWN
79 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
81 * Andi Kleen : Fix write_space callback
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
92 #include <linux/config.h>
93 #include <linux/errno.h>
94 #include <linux/types.h>
95 #include <linux/socket.h>
97 #include <linux/kernel.h>
98 #include <linux/major.h>
99 #include <linux/sched.h>
100 #include <linux/timer.h>
101 #include <linux/string.h>
102 #include <linux/sockios.h>
103 #include <linux/net.h>
104 #include <linux/fcntl.h>
105 #include <linux/mm.h>
106 #include <linux/slab.h>
107 #include <linux/interrupt.h>
108 #include <linux/poll.h>
109 #include <linux/init.h>
111 #include <asm/uaccess.h>
112 #include <asm/system.h>
114 #include <linux/inet.h>
115 #include <linux/netdevice.h>
117 #include <net/protocol.h>
119 #include <net/route.h>
122 #include <linux/skbuff.h>
123 #include <net/sock.h>
125 #include <net/icmp.h>
126 #include <linux/ipsec.h>
129 #include <linux/filter.h>
132 #define min(a,b) ((a)<(b)?(a):(b))
134 /* Run time adjustable parameters. */
135 __u32 sysctl_wmem_max
= SK_WMEM_MAX
;
136 __u32 sysctl_rmem_max
= SK_RMEM_MAX
;
137 __u32 sysctl_wmem_default
= SK_WMEM_MAX
;
138 __u32 sysctl_rmem_default
= SK_RMEM_MAX
;
140 /* Maximal space eaten by iovec or ancilliary data plus some space */
141 int sysctl_optmem_max
= sizeof(unsigned long)*(2*UIO_MAXIOV
+ 512);
143 static int sock_set_timeout(long *timeo_p
, char *optval
, int optlen
)
147 if (optlen
< sizeof(tv
))
149 if (copy_from_user(&tv
, optval
, sizeof(tv
)))
152 *timeo_p
= MAX_SCHEDULE_TIMEOUT
;
153 if (tv
.tv_sec
== 0 && tv
.tv_usec
== 0)
155 if (tv
.tv_sec
< (MAX_SCHEDULE_TIMEOUT
/HZ
- 1))
156 *timeo_p
= tv
.tv_sec
*HZ
+ (tv
.tv_usec
+(1000000/HZ
-1))/(1000000/HZ
);
161 * This is meant for all protocols to use and covers goings on
162 * at the socket level. Everything here is generic.
165 int sock_setsockopt(struct socket
*sock
, int level
, int optname
,
166 char *optval
, int optlen
)
168 struct sock
*sk
=sock
->sk
;
170 struct sk_filter
*filter
;
179 * Options without arguments
182 #ifdef SO_DONTLINGER /* Compatibility item... */
191 if(optlen
<sizeof(int))
194 err
= get_user(val
, (int *)optval
);
205 if(val
&& !capable(CAP_NET_ADMIN
))
220 sk
->localroute
=valbool
;
223 sk
->broadcast
=valbool
;
226 /* Don't error on this BSD doesn't and if you think
227 about it this is right. Otherwise apps have to
228 play 'guess the biggest size' games. RCVBUF/SNDBUF
229 are treated in BSD as hints */
231 if (val
> sysctl_wmem_max
)
232 val
= sysctl_wmem_max
;
234 sk
->userlocks
|= SOCK_SNDBUF_LOCK
;
235 sk
->sndbuf
= max(val
*2,SOCK_MIN_SNDBUF
);
238 * Wake up sending tasks if we
245 /* Don't error on this BSD doesn't and if you think
246 about it this is right. Otherwise apps have to
247 play 'guess the biggest size' games. RCVBUF/SNDBUF
248 are treated in BSD as hints */
250 if (val
> sysctl_rmem_max
)
251 val
= sysctl_rmem_max
;
253 sk
->userlocks
|= SOCK_RCVBUF_LOCK
;
254 /* FIXME: is this lower bound the right one? */
255 sk
->rcvbuf
= max(val
*2,SOCK_MIN_RCVBUF
);
260 if (sk
->protocol
== IPPROTO_TCP
)
262 tcp_set_keepalive(sk
, valbool
);
265 sk
->keepopen
= valbool
;
269 sk
->urginline
= valbool
;
273 sk
->no_check
= valbool
;
277 if ((val
>= 0 && val
<= 6) || capable(CAP_NET_ADMIN
))
284 if(optlen
<sizeof(ling
)) {
285 ret
= -EINVAL
; /* 1003.1g */
288 if (copy_from_user(&ling
,optval
,sizeof(ling
))) {
292 if(ling
.l_onoff
==0) {
295 #if (BITS_PER_LONG == 32)
296 if (ling
.l_linger
>= MAX_SCHEDULE_TIMEOUT
/HZ
)
297 sk
->lingertime
=MAX_SCHEDULE_TIMEOUT
;
300 sk
->lingertime
=ling
.l_linger
*HZ
;
306 sk
->bsdism
= valbool
;
310 sock
->passcred
= valbool
;
314 sk
->rcvtstamp
= valbool
;
320 sk
->rcvlowat
= val
? : 1;
324 ret
= sock_set_timeout(&sk
->rcvtimeo
, optval
, optlen
);
328 ret
= sock_set_timeout(&sk
->sndtimeo
, optval
, optlen
);
331 #ifdef CONFIG_NETDEVICES
332 case SO_BINDTODEVICE
:
334 char devname
[IFNAMSIZ
];
337 if (!capable(CAP_NET_RAW
)) {
342 /* Bind this socket to a particular device like "eth0",
343 * as specified in the passed interface name. If the
344 * name is "" or the option length is zero the socket
349 sk
->bound_dev_if
= 0;
351 if (optlen
> IFNAMSIZ
)
353 if (copy_from_user(devname
, optval
, optlen
)) {
358 /* Remove any cached route for this socket. */
361 if (devname
[0] == '\0') {
362 sk
->bound_dev_if
= 0;
364 struct net_device
*dev
= dev_get_by_name(devname
);
369 sk
->bound_dev_if
= dev
->ifindex
;
379 case SO_ATTACH_FILTER
:
381 if (optlen
== sizeof(struct sock_fprog
)) {
382 struct sock_fprog fprog
;
385 if (copy_from_user(&fprog
, optval
, sizeof(fprog
)))
388 ret
= sk_attach_filter(&fprog
, sk
);
392 case SO_DETACH_FILTER
:
393 spin_lock_bh(&sk
->lock
.slock
);
397 spin_unlock_bh(&sk
->lock
.slock
);
398 sk_filter_release(sk
, filter
);
401 spin_unlock_bh(&sk
->lock
.slock
);
405 /* We implement the SO_SNDLOWAT etc to
406 not be settable (1003.1g 5.3) */
416 int sock_getsockopt(struct socket
*sock
, int level
, int optname
,
417 char *optval
, int *optlen
)
419 struct sock
*sk
= sock
->sk
;
428 int lv
=sizeof(int),len
;
430 if(get_user(len
,optlen
))
440 v
.val
= sk
->localroute
;
444 v
.val
= sk
->broadcast
;
460 v
.val
= sk
->keepopen
;
468 v
.val
= -sock_error(sk
);
470 v
.val
=xchg(&sk
->err_soft
,0);
474 v
.val
= sk
->urginline
;
478 v
.val
= sk
->no_check
;
482 v
.val
= sk
->priority
;
487 v
.ling
.l_onoff
=sk
->linger
;
488 v
.ling
.l_linger
=sk
->lingertime
/HZ
;
496 v
.val
= sk
->rcvtstamp
;
500 lv
=sizeof(struct timeval
);
501 if (sk
->rcvtimeo
== MAX_SCHEDULE_TIMEOUT
) {
505 v
.tm
.tv_sec
= sk
->rcvtimeo
/HZ
;
506 v
.tm
.tv_usec
= ((sk
->rcvtimeo
%HZ
)*1000)/HZ
;
511 lv
=sizeof(struct timeval
);
512 if (sk
->sndtimeo
== MAX_SCHEDULE_TIMEOUT
) {
516 v
.tm
.tv_sec
= sk
->sndtimeo
/HZ
;
517 v
.tm
.tv_usec
= ((sk
->sndtimeo
%HZ
)*1000)/HZ
;
522 v
.val
= sk
->rcvlowat
;
530 v
.val
= sock
->passcred
;
534 lv
=sizeof(sk
->peercred
);
536 if(copy_to_user((void*)optval
, &sk
->peercred
, len
))
544 if (sock
->ops
->getname(sock
, (struct sockaddr
*)address
, &lv
, 2))
548 if(copy_to_user((void*)optval
, address
, len
))
554 return(-ENOPROTOOPT
);
557 if(copy_to_user(optval
,&v
,len
))
560 if(put_user(len
, optlen
))
565 static kmem_cache_t
*sk_cachep
;
568 * All socket objects are allocated here. This is for future
572 struct sock
*sk_alloc(int family
, int priority
, int zero_it
)
574 struct sock
*sk
= kmem_cache_alloc(sk_cachep
, priority
);
577 memset(sk
, 0, sizeof(struct sock
));
585 void sk_free(struct sock
*sk
)
588 struct sk_filter
*filter
;
597 sk_filter_release(sk
, filter
);
602 if (atomic_read(&sk
->omem_alloc
))
603 printk(KERN_DEBUG
"sk_free: optmem leakage (%d bytes) detected.\n", atomic_read(&sk
->omem_alloc
));
605 kmem_cache_free(sk_cachep
, sk
);
608 void __init
sk_init(void)
610 sk_cachep
= kmem_cache_create("sock", sizeof(struct sock
), 0,
611 SLAB_HWCACHE_ALIGN
, 0, 0);
613 printk(KERN_CRIT
"sk_init: Cannot create sock SLAB cache!");
615 if (num_physpages
<= 4096) {
616 sysctl_wmem_max
= 32767;
617 sysctl_rmem_max
= 32767;
618 sysctl_wmem_default
= 32767;
619 sysctl_wmem_default
= 32767;
620 } else if (num_physpages
>= 131072) {
621 sysctl_wmem_max
= 131071;
622 sysctl_rmem_max
= 131071;
627 * Simple resource managers for sockets.
632 * Write buffer destructor automatically called from kfree_skb.
634 void sock_wfree(struct sk_buff
*skb
)
636 struct sock
*sk
= skb
->sk
;
638 /* In case it might be waiting for more memory. */
639 atomic_sub(skb
->truesize
, &sk
->wmem_alloc
);
645 * Read buffer destructor automatically called from kfree_skb.
647 void sock_rfree(struct sk_buff
*skb
)
649 struct sock
*sk
= skb
->sk
;
651 atomic_sub(skb
->truesize
, &sk
->rmem_alloc
);
655 * Allocate a skb from the socket's send buffer.
657 struct sk_buff
*sock_wmalloc(struct sock
*sk
, unsigned long size
, int force
, int priority
)
659 if (force
|| atomic_read(&sk
->wmem_alloc
) < sk
->sndbuf
) {
660 struct sk_buff
* skb
= alloc_skb(size
, priority
);
662 skb_set_owner_w(skb
, sk
);
670 * Allocate a skb from the socket's receive buffer.
672 struct sk_buff
*sock_rmalloc(struct sock
*sk
, unsigned long size
, int force
, int priority
)
674 if (force
|| atomic_read(&sk
->rmem_alloc
) < sk
->rcvbuf
) {
675 struct sk_buff
*skb
= alloc_skb(size
, priority
);
677 skb_set_owner_r(skb
, sk
);
685 * Allocate a memory block from the socket's option memory buffer.
687 void *sock_kmalloc(struct sock
*sk
, int size
, int priority
)
689 if ((unsigned)size
<= sysctl_optmem_max
&&
690 atomic_read(&sk
->omem_alloc
)+size
< sysctl_optmem_max
) {
692 /* First do the add, to avoid the race if kmalloc
695 atomic_add(size
, &sk
->omem_alloc
);
696 mem
= kmalloc(size
, priority
);
699 atomic_sub(size
, &sk
->omem_alloc
);
705 * Free an option memory block.
707 void sock_kfree_s(struct sock
*sk
, void *mem
, int size
)
710 atomic_sub(size
, &sk
->omem_alloc
);
713 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
714 I think, these locks should be removed for datagram sockets.
716 static long sock_wait_for_wmem(struct sock
* sk
, long timeo
)
718 DECLARE_WAITQUEUE(wait
, current
);
720 clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->socket
->flags
);
721 add_wait_queue(sk
->sleep
, &wait
);
723 if (signal_pending(current
))
725 set_bit(SOCK_NOSPACE
, &sk
->socket
->flags
);
726 set_current_state(TASK_INTERRUPTIBLE
);
727 if (atomic_read(&sk
->wmem_alloc
) < sk
->sndbuf
)
729 if (sk
->shutdown
& SEND_SHUTDOWN
)
733 timeo
= schedule_timeout(timeo
);
735 __set_current_state(TASK_RUNNING
);
736 remove_wait_queue(sk
->sleep
, &wait
);
742 * Generic send/receive buffer handlers
745 struct sk_buff
*sock_alloc_send_skb(struct sock
*sk
, unsigned long size
,
746 unsigned long fallback
, int noblock
, int *errcode
)
752 timeo
= sock_sndtimeo(sk
, noblock
);
755 unsigned long try_size
= size
;
757 err
= sock_error(sk
);
762 * We should send SIGPIPE in these cases according to
763 * 1003.1g draft 6.4. If we (the user) did a shutdown()
764 * call however we should not.
766 * Note: This routine isnt just used for datagrams and
767 * anyway some datagram protocols have a notion of
772 if (sk
->shutdown
&SEND_SHUTDOWN
)
775 if (atomic_read(&sk
->wmem_alloc
) < sk
->sndbuf
) {
777 /* The buffer get won't block, or use the atomic queue.
778 * It does produce annoying no free page messages still.
780 skb
= alloc_skb(size
, GFP_BUFFER
);
785 skb
= alloc_skb(try_size
, sk
->allocation
);
793 * This means we have too many buffers for this socket already.
796 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->socket
->flags
);
797 set_bit(SOCK_NOSPACE
, &sk
->socket
->flags
);
801 if (signal_pending(current
))
803 timeo
= sock_wait_for_wmem(sk
, timeo
);
806 skb_set_owner_w(skb
, sk
);
810 err
= sock_intr_errno(timeo
);
816 void __lock_sock(struct sock
*sk
)
818 DECLARE_WAITQUEUE(wait
, current
);
820 add_wait_queue_exclusive(&sk
->lock
.wq
, &wait
);
822 current
->state
= TASK_UNINTERRUPTIBLE
;
823 spin_unlock_bh(&sk
->lock
.slock
);
825 spin_lock_bh(&sk
->lock
.slock
);
829 current
->state
= TASK_RUNNING
;
830 remove_wait_queue(&sk
->lock
.wq
, &wait
);
833 void __release_sock(struct sock
*sk
)
835 struct sk_buff
*skb
= sk
->backlog
.head
;
838 sk
->backlog
.head
= sk
->backlog
.tail
= NULL
;
842 struct sk_buff
*next
= skb
->next
;
845 sk
->backlog_rcv(sk
, skb
);
847 } while (skb
!= NULL
);
850 } while((skb
= sk
->backlog
.head
) != NULL
);
854 * Generic socket manager library. Most simpler socket families
855 * use this to manage their socket lists. At some point we should
856 * hash these. By making this generic we get the lot hashed for free.
858 * It is broken by design. All the protocols using it must be fixed. --ANK
861 rwlock_t net_big_sklist_lock
= RW_LOCK_UNLOCKED
;
863 void sklist_remove_socket(struct sock
**list
, struct sock
*sk
)
867 write_lock_bh(&net_big_sklist_lock
);
869 while ((s
= *list
) != NULL
) {
877 write_unlock_bh(&net_big_sklist_lock
);
882 void sklist_insert_socket(struct sock
**list
, struct sock
*sk
)
884 write_lock_bh(&net_big_sklist_lock
);
888 write_unlock_bh(&net_big_sklist_lock
);
892 * This is only called from user mode. Thus it protects itself against
893 * interrupt users but doesn't worry about being called during work.
894 * Once it is removed from the queue no interrupt or bottom half will
895 * touch it and we are (fairly 8-) ) safe.
898 void sklist_destroy_socket(struct sock
**list
, struct sock
*sk
);
901 * Handler for deferred kills.
904 static void sklist_destroy_timer(unsigned long data
)
906 struct sock
*sk
=(struct sock
*)data
;
907 sklist_destroy_socket(NULL
,sk
);
911 * Destroy a socket. We pass NULL for a list if we know the
912 * socket is not on a list.
915 void sklist_destroy_socket(struct sock
**list
,struct sock
*sk
)
919 sklist_remove_socket(list
, sk
);
921 while((skb
=skb_dequeue(&sk
->receive_queue
))!=NULL
)
926 if(atomic_read(&sk
->wmem_alloc
) == 0 &&
927 atomic_read(&sk
->rmem_alloc
) == 0 &&
935 * Someone is using our buffers still.. defer
937 init_timer(&sk
->timer
);
938 sk
->timer
.expires
=jiffies
+SOCK_DESTROY_TIME
;
939 sk
->timer
.function
=sklist_destroy_timer
;
940 sk
->timer
.data
= (unsigned long)sk
;
941 add_timer(&sk
->timer
);
946 * Set of default routines for initialising struct proto_ops when
947 * the protocol does not support a particular function. In certain
948 * cases where it makes no sense for a protocol to have a "do nothing"
949 * function, some default processing is provided.
952 int sock_no_release(struct socket
*sock
)
957 int sock_no_bind(struct socket
*sock
, struct sockaddr
*saddr
, int len
)
962 int sock_no_connect(struct socket
*sock
, struct sockaddr
*saddr
,
968 int sock_no_socketpair(struct socket
*sock1
, struct socket
*sock2
)
973 int sock_no_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
978 int sock_no_getname(struct socket
*sock
, struct sockaddr
*saddr
,
984 unsigned int sock_no_poll(struct file
* file
, struct socket
*sock
, poll_table
*pt
)
989 int sock_no_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
994 int sock_no_listen(struct socket
*sock
, int backlog
)
999 int sock_no_shutdown(struct socket
*sock
, int how
)
1004 int sock_no_setsockopt(struct socket
*sock
, int level
, int optname
,
1005 char *optval
, int optlen
)
1010 int sock_no_getsockopt(struct socket
*sock
, int level
, int optname
,
1011 char *optval
, int *optlen
)
1017 * Note: if you add something that sleeps here then change sock_fcntl()
1018 * to do proper fd locking.
1020 int sock_no_fcntl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1022 struct sock
*sk
= sock
->sk
;
1028 * This is a little restrictive, but it's the only
1029 * way to make sure that you can't send a sigurg to
1032 if (current
->pgrp
!= -arg
&&
1033 current
->pid
!= arg
&&
1034 !capable(CAP_KILL
)) return(-EPERM
);
1044 int sock_no_sendmsg(struct socket
*sock
, struct msghdr
*m
, int flags
,
1045 struct scm_cookie
*scm
)
1050 int sock_no_recvmsg(struct socket
*sock
, struct msghdr
*m
, int len
, int flags
,
1051 struct scm_cookie
*scm
)
1056 int sock_no_mmap(struct file
*file
, struct socket
*sock
, struct vm_area_struct
*vma
)
1058 /* Mirror missing mmap method error code */
1063 * Default Socket Callbacks
1066 void sock_def_wakeup(struct sock
*sk
)
1068 read_lock(&sk
->callback_lock
);
1069 if (sk
->sleep
&& waitqueue_active(sk
->sleep
))
1070 wake_up_interruptible_all(sk
->sleep
);
1071 read_unlock(&sk
->callback_lock
);
1074 void sock_def_error_report(struct sock
*sk
)
1076 read_lock(&sk
->callback_lock
);
1077 if (sk
->sleep
&& waitqueue_active(sk
->sleep
))
1078 wake_up_interruptible(sk
->sleep
);
1079 sk_wake_async(sk
,0,POLL_ERR
);
1080 read_unlock(&sk
->callback_lock
);
1083 void sock_def_readable(struct sock
*sk
, int len
)
1085 read_lock(&sk
->callback_lock
);
1086 if (sk
->sleep
&& waitqueue_active(sk
->sleep
))
1087 wake_up_interruptible(sk
->sleep
);
1088 sk_wake_async(sk
,1,POLL_IN
);
1089 read_unlock(&sk
->callback_lock
);
1092 void sock_def_write_space(struct sock
*sk
)
1094 read_lock(&sk
->callback_lock
);
1096 /* Do not wake up a writer until he can make "significant"
1099 if((atomic_read(&sk
->wmem_alloc
) << 1) <= sk
->sndbuf
) {
1100 if (sk
->sleep
&& waitqueue_active(sk
->sleep
))
1101 wake_up_interruptible(sk
->sleep
);
1103 /* Should agree with poll, otherwise some programs break */
1104 if (sock_writeable(sk
))
1105 sk_wake_async(sk
, 2, POLL_OUT
);
1108 read_unlock(&sk
->callback_lock
);
1111 void sock_def_destruct(struct sock
*sk
)
1113 if (sk
->protinfo
.destruct_hook
)
1114 kfree(sk
->protinfo
.destruct_hook
);
1117 void sock_init_data(struct socket
*sock
, struct sock
*sk
)
1119 skb_queue_head_init(&sk
->receive_queue
);
1120 skb_queue_head_init(&sk
->write_queue
);
1121 skb_queue_head_init(&sk
->error_queue
);
1123 init_timer(&sk
->timer
);
1125 sk
->allocation
= GFP_KERNEL
;
1126 sk
->rcvbuf
= sysctl_rmem_default
;
1127 sk
->sndbuf
= sysctl_wmem_default
;
1128 sk
->state
= TCP_CLOSE
;
1134 sk
->type
= sock
->type
;
1135 sk
->sleep
= &sock
->wait
;
1140 sk
->dst_lock
= RW_LOCK_UNLOCKED
;
1141 sk
->callback_lock
= RW_LOCK_UNLOCKED
;
1143 sk
->state_change
= sock_def_wakeup
;
1144 sk
->data_ready
= sock_def_readable
;
1145 sk
->write_space
= sock_def_write_space
;
1146 sk
->error_report
= sock_def_error_report
;
1147 sk
->destruct
= sock_def_destruct
;
1149 sk
->peercred
.pid
= 0;
1150 sk
->peercred
.uid
= -1;
1151 sk
->peercred
.gid
= -1;
1153 sk
->rcvtimeo
= MAX_SCHEDULE_TIMEOUT
;
1154 sk
->sndtimeo
= MAX_SCHEDULE_TIMEOUT
;
1156 atomic_set(&sk
->refcnt
, 1);