2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the AF_INET socket handler.
8 * Version: @(#)sock.h 1.0.4 05/13/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche <flla@stud.uni-sb.de>
16 * Alan Cox : Volatiles in skbuff pointers. See
17 * skbuff comments. May be overdone,
18 * better to prove they can be removed
20 * Alan Cox : Added a zapped field for tcp to note
21 * a socket is reset and must stay shut up
22 * Alan Cox : New fields for options
23 * Pauline Middelink : identd support
24 * Alan Cox : Eliminate low level recv/recvfrom
25 * David S. Miller : New socket lookup architecture.
26 * Steve Whitehouse: Default routines for sock_ops
28 * This program is free software; you can redistribute it and/or
29 * modify it under the terms of the GNU General Public License
30 * as published by the Free Software Foundation; either version
31 * 2 of the License, or (at your option) any later version.
36 #include <linux/config.h>
37 #include <linux/timer.h>
38 #include <linux/cache.h>
39 #include <linux/in.h> /* struct sockaddr_in */
41 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
42 #include <linux/in6.h> /* struct sockaddr_in6 */
43 #include <linux/ipv6.h> /* dest_cache, inet6_options */
44 #include <linux/icmpv6.h>
45 #include <net/if_inet6.h> /* struct ipv6_mc_socklist */
48 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
49 #include <linux/icmp.h>
51 #include <linux/tcp.h> /* struct tcphdr */
53 #include <linux/netdevice.h>
54 #include <linux/skbuff.h> /* struct sk_buff */
55 #include <net/protocol.h> /* struct inet_protocol */
56 #if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
59 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
61 #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
62 #include <net/netrom.h>
64 #if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
69 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
70 #if defined(CONFIG_SPX) || defined(CONFIG_SPX_MODULE)
74 #endif /* CONFIG_SPX */
75 #endif /* CONFIG_IPX */
77 #if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
78 #include <linux/atalk.h>
81 #if defined(CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
85 #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
86 #include <net/irda/irda.h>
89 #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
94 #include <linux/filter.h>
97 #include <asm/atomic.h>
101 /* The AF_UNIX specific socket options */
103 struct unix_address
*addr
;
104 struct dentry
* dentry
;
105 struct vfsmount
* mnt
;
106 struct semaphore readsem
;
109 struct sock
* gc_tree
;
112 wait_queue_head_t peer_wait
;
116 /* Once the IPX ncpd patches are in these are going into protinfo. */
117 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
119 ipx_address dest_addr
;
120 ipx_interface
*intrfc
;
122 #ifdef CONFIG_IPX_INTERN
123 unsigned char node
[IPX_NODE_LEN
];
127 * To handle special ncp connection-handling sockets for mars_nwe,
128 * the connection number must be stored in the socket.
130 unsigned short ipx_ncp_conn
;
134 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
136 struct in6_addr saddr
;
137 struct in6_addr rcv_saddr
;
138 struct in6_addr daddr
;
139 struct in6_addr
*daddr_cache
;
147 /* pktoption flags */
167 struct ipv6_mc_socklist
*ipv6_mc_list
;
168 struct ipv6_fl_socklist
*ipv6_fl_list
;
171 struct ipv6_txoptions
*opt
;
172 struct sk_buff
*pktoptions
;
176 __u32 checksum
; /* perform checksum */
177 __u32 offset
; /* checksum offset */
179 struct icmp6_filter filter
;
184 #if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
186 struct icmp_filter filter
;
190 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
193 int ttl
; /* TTL setting */
196 struct ip_options
*opt
;
197 unsigned char hdrincl
; /* Include headers ? */
198 __u8 mc_ttl
; /* Multicasting TTL */
199 __u8 mc_loop
; /* Loopback */
202 int mc_index
; /* Multicast device index */
204 struct ip_mc_socklist
*mc_list
; /* Group array */
209 /* This defines a selective acknowledgement block. */
210 struct tcp_sack_block
{
216 int tcp_header_len
; /* Bytes of tcp header to send */
219 * Header prediction flags
220 * 0x5?10 << 16 + snd_wnd in net byte order
225 * RFC793 variables by their proper names. This means you can
226 * read the code and the spec side by side (and laugh ...)
227 * See RFC793 and RFC1122. The RFC writes these in capitals.
229 __u32 rcv_nxt
; /* What we want to receive next */
230 __u32 snd_nxt
; /* Next sequence we send */
232 __u32 snd_una
; /* First byte we want an ack for */
233 __u32 snd_sml
; /* Last byte of the most recently transmitted small packet */
234 __u32 rcv_tstamp
; /* timestamp of last received ACK (for keepalives) */
235 __u32 lsndtime
; /* timestamp of last sent data packet (for restart window) */
237 /* Delayed ACK control data */
239 __u8 pending
; /* ACK is pending */
240 __u8 quick
; /* Scheduled number of quick acks */
241 __u8 pingpong
; /* The session is interactive */
242 __u8 blocked
; /* Delayed ACK was blocked by socket lock*/
243 __u32 ato
; /* Predicted tick of soft clock */
244 __u32 lrcvtime
; /* timestamp of last received data packet*/
245 __u16 last_seg_size
; /* Size of last incoming segment */
246 __u16 rcv_mss
; /* MSS used for delayed ACK decisions */
247 __u32 rcv_segs
; /* Number of received segments since last ack */
250 /* Data for direct copy to user */
252 struct sk_buff_head prequeue
;
254 struct task_struct
*task
;
259 __u32 snd_wl1
; /* Sequence for window update */
260 __u32 snd_wl2
; /* Ack sequence for update */
261 __u32 snd_wnd
; /* The window we expect to receive */
262 __u32 max_window
; /* Maximal window ever seen from peer */
263 __u32 pmtu_cookie
; /* Last pmtu seen by socket */
264 __u16 mss_cache
; /* Cached effective mss, not including SACKS */
265 __u16 mss_clamp
; /* Maximal mss, negotiated at connection setup */
266 __u16 ext_header_len
; /* Network protocol overhead (IP/IPv6 options) */
267 __u8 dup_acks
; /* Consecutive duplicate acks seen from other end */
274 /* RTT measurement */
275 __u8 backoff
; /* backoff */
276 __u32 srtt
; /* smothed round trip time << 3 */
277 __u32 mdev
; /* medium deviation */
278 __u32 rto
; /* retransmit timeout */
280 __u32 packets_out
; /* Packets which are "in flight" */
281 __u32 fackets_out
; /* Non-retrans SACK'd packets */
282 __u32 retrans_out
; /* Fast-retransmitted packets out */
283 __u32 high_seq
; /* snd_nxt at onset of congestion */
286 * Slow start and congestion control (see also Nagle, and Karn & Partridge)
288 __u32 snd_ssthresh
; /* Slow start size threshold */
289 __u32 snd_cwnd
; /* Sending congestion window */
290 __u16 snd_cwnd_cnt
; /* Linear increase counter */
291 __u16 snd_cwnd_clamp
; /* Do not allow snd_cwnd to grow above this */
293 __u8 nonagle
; /* Disable Nagle algorithm? */
294 __u8 syn_retries
; /* num of allowed syn retries */
295 __u16 user_mss
; /* mss requested by user in ioctl */
297 /* Two commonly used timers in both sender and receiver paths. */
298 struct timer_list retransmit_timer
; /* Resend (no ack) */
299 struct timer_list delack_timer
; /* Ack delay */
301 struct sk_buff_head out_of_order_queue
; /* Out of order segments go here */
303 struct tcp_func
*af_specific
; /* Operations which are AF_INET{4,6} specific */
304 struct sk_buff
*send_head
; /* Front of stuff to transmit */
305 struct sk_buff
*retrans_head
; /* retrans head can be
306 * different to the head of
307 * write queue if we are doing
311 __u32 rcv_wnd
; /* Current receiver window */
312 __u32 rcv_wup
; /* rcv_nxt on last window update sent */
316 * Options received (usually on last packet, some only on SYN packets).
318 char tstamp_ok
, /* TIMESTAMP seen on SYN packet */
319 wscale_ok
, /* Wscale seen on SYN packet */
320 sack_ok
; /* SACK seen on SYN packet */
321 char saw_tstamp
; /* Saw TIMESTAMP on last packet */
322 __u8 snd_wscale
; /* Window scaling received from sender */
323 __u8 rcv_wscale
; /* Window scaling to send to receiver */
324 __u8 rexmt_done
; /* Retransmitted up to send head? */
325 __u8 keepalive_probes
; /* num of allowed keep alive probes */
328 __u32 rcv_tsval
; /* Time stamp value */
329 __u32 rcv_tsecr
; /* Time stamp echo reply */
330 __u32 ts_recent
; /* Time stamp to echo next */
331 long ts_recent_stamp
;/* Time we stored ts_recent (for aging) */
334 struct tcp_sack_block selective_acks
[4]; /* The SACKS themselves*/
336 struct timer_list probe_timer
; /* Probes */
337 __u32 window_clamp
; /* Maximal window to advertise */
338 __u8 probes_out
; /* unanswered 0 window probes */
339 __u8 num_sacks
; /* Number of SACK blocks */
340 __u16 advmss
; /* Advertised MSS */
348 /* The syn_wait_lock is necessary only to avoid tcp_get_info having
349 * to grab the main lock sock while browsing the listening hash
350 * (otherwise it's deadlock prone).
351 * This lock is acquired in read mode only from tcp_get_info() and
352 * it's acquired in write mode _only_ from code that is actively
353 * changing the syn_wait_queue. All readers that are holding
354 * the master sock lock don't need to grab this lock in read mode
355 * too as the syn_wait_queue writes are always protected from
356 * the main sock lock.
358 rwlock_t syn_wait_lock
;
359 struct tcp_listen_opt
*listen_opt
;
361 /* FIFO of established children */
362 struct open_request
*accept_queue
;
363 struct open_request
*accept_queue_tail
;
365 int write_pending
; /* A write to socket waits to start. */
367 unsigned int keepalive_time
; /* time before keep alive takes place */
368 unsigned int keepalive_intvl
; /* time interval between keep alive probes */
374 * This structure really needs to be cleaned up.
375 * Most of it is for TCP, and not used by any of
376 * the other protocols.
380 * The idea is to start moving to a newer struct gradualy
382 * IMHO the newer struct should have the following format:
385 * sockmem [mem, proto, callbacks]
409 * The idea failed because IPv6 transition asssumes dual IP/IPv6 sockets.
410 * So, net_pinfo is IPv6 are really, and protinfo unifies all another
414 /* Define this to get the sk->debug debugging facility. */
415 #define SOCK_DEBUGGING
416 #ifdef SOCK_DEBUGGING
417 #define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG ## msg); } while (0)
419 #define SOCK_DEBUG(sk, msg...) do { } while (0)
422 /* This is the per-socket lock. The spinlock provides a synchronization
423 * between user contexts and software interrupt processing, whereas the
424 * mini-semaphore synchronizes multiple users amongst themselves.
429 wait_queue_head_t wq
;
432 #define sock_lock_init(__sk) \
433 do { spin_lock_init(&((__sk)->lock.slock)); \
434 (__sk)->dst_lock = RW_LOCK_UNLOCKED; \
435 (__sk)->lock.users = 0; \
436 init_waitqueue_head(&((__sk)->lock.wq)); \
440 /* Socket demultiplex comparisons on incoming packets. */
441 __u32 daddr
; /* Foreign IPv4 addr */
442 __u32 rcv_saddr
; /* Bound local IPv4 addr */
443 __u16 dport
; /* Destination port */
444 unsigned short num
; /* Local port */
445 int bound_dev_if
; /* Bound device index if != 0 */
447 /* Main hash linkage for various protocol lookup tables. */
450 struct sock
*bind_next
;
451 struct sock
**bind_pprev
;
453 volatile unsigned char state
, /* Connection state */
454 zapped
; /* In ax25 & ipx means not linked */
455 __u16 sport
; /* Source port */
457 unsigned short family
; /* Address family */
458 unsigned char reuse
, /* SO_REUSEADDR setting */
460 atomic_t refcnt
; /* Reference count */
462 socket_lock_t lock
; /* Synchronizer... */
463 int rcvbuf
; /* Size of receive buffer in bytes */
465 wait_queue_head_t
*sleep
; /* Sock wait queue */
466 struct dst_entry
*dst_cache
; /* Destination cache */
468 atomic_t rmem_alloc
; /* Receive queue bytes committed */
469 struct sk_buff_head receive_queue
; /* Incoming packets */
470 atomic_t wmem_alloc
; /* Transmit queue bytes committed */
471 struct sk_buff_head write_queue
; /* Packet sending queue */
472 atomic_t omem_alloc
; /* "o" is "option" or "other" */
473 __u32 saddr
; /* Sending source */
474 unsigned int allocation
; /* Allocation mode */
475 int sndbuf
; /* Size of send buffer in bytes */
478 /* Not all are volatile, but some are, so we might as well say they all are.
479 * XXX Make this a flag word -DaveM
492 unsigned long lingertime
;
497 /* The backlog queue is special, it is always used with
498 * the per-socket spinlock held and requires low latency
499 * access. Therefore we special case it's implementation.
502 struct sk_buff
*head
;
503 struct sk_buff
*tail
;
506 rwlock_t callback_lock
;
508 /* Error queue, rarely used. */
509 struct sk_buff_head error_queue
;
513 unsigned short shutdown
;
515 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
517 struct ipv6_pinfo af_inet6
;
522 struct tcp_opt af_tcp
;
523 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
524 struct raw_opt tp_raw4
;
526 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
527 struct raw6_opt tp_raw
;
528 #endif /* CONFIG_IPV6 */
529 #if defined(CONFIG_SPX) || defined (CONFIG_SPX_MODULE)
530 struct spx_opt af_spx
;
531 #endif /* CONFIG_SPX */
535 int err
, err_soft
; /* Soft holds errors that don't
536 cause failure but are the cause
537 of a persistent failure not just
539 unsigned short ack_backlog
;
540 unsigned short max_ack_backlog
;
543 unsigned char localroute
; /* Route locally only */
544 unsigned char protocol
;
545 struct ucred peercred
;
551 /* Socket Filtering Instructions */
552 struct sk_filter
*filter
;
553 #endif /* CONFIG_FILTER */
555 /* This is where all the private (optional) areas that don't
556 * overlap will eventually live.
560 struct unix_opt af_unix
;
561 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
562 struct inet_opt af_inet
;
564 #if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
565 struct atalk_sock af_at
;
567 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
568 struct ipx_opt af_ipx
;
570 #if defined (CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
573 #if defined (CONFIG_PACKET) || defined(CONFIG_PACKET_MODULE)
574 struct packet_opt
*af_packet
;
576 #if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
579 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
582 #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
585 #if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
588 #ifdef CONFIG_NETLINK
589 struct netlink_opt
*af_netlink
;
591 #if defined(CONFIG_ECONET) || defined(CONFIG_ECONET_MODULE)
592 struct econet_opt
*af_econet
;
594 #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
595 struct atm_vcc
*af_atm
;
597 #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
598 struct irda_sock
*irda
;
603 /* This part is used for the timeout functions. */
604 struct timer_list timer
; /* This is the sock cleanup timer. */
605 struct timeval stamp
;
607 /* Identd and reporting IO signals */
608 struct socket
*socket
;
610 /* RPC layer private data */
614 void (*state_change
)(struct sock
*sk
);
615 void (*data_ready
)(struct sock
*sk
,int bytes
);
616 void (*write_space
)(struct sock
*sk
);
617 void (*error_report
)(struct sock
*sk
);
619 int (*backlog_rcv
) (struct sock
*sk
,
620 struct sk_buff
*skb
);
621 void (*destruct
)(struct sock
*sk
);
624 /* The per-socket spinlock must be held here. */
625 #define sk_add_backlog(__sk, __skb) \
626 do { if((__sk)->backlog.tail == NULL) { \
627 (__sk)->backlog.head = \
628 (__sk)->backlog.tail = (__skb); \
630 ((__sk)->backlog.tail)->next = (__skb); \
631 (__sk)->backlog.tail = (__skb); \
633 (__skb)->next = NULL; \
636 /* IP protocol blocks we attach to sockets.
637 * socket layer -> transport layer interface
638 * transport -> network interface is defined by struct inet_proto
641 void (*close
)(struct sock
*sk
,
643 int (*connect
)(struct sock
*sk
,
644 struct sockaddr
*uaddr
,
646 int (*disconnect
)(struct sock
*sk
, int flags
);
648 struct sock
* (*accept
) (struct sock
*sk
, int flags
, int *err
);
650 int (*ioctl
)(struct sock
*sk
, int cmd
,
652 int (*init
)(struct sock
*sk
);
653 int (*destroy
)(struct sock
*sk
);
654 void (*shutdown
)(struct sock
*sk
, int how
);
655 int (*setsockopt
)(struct sock
*sk
, int level
,
656 int optname
, char *optval
, int optlen
);
657 int (*getsockopt
)(struct sock
*sk
, int level
,
658 int optname
, char *optval
,
660 int (*sendmsg
)(struct sock
*sk
, struct msghdr
*msg
,
662 int (*recvmsg
)(struct sock
*sk
, struct msghdr
*msg
,
663 int len
, int noblock
, int flags
,
665 int (*bind
)(struct sock
*sk
,
666 struct sockaddr
*uaddr
, int addr_len
);
668 int (*backlog_rcv
) (struct sock
*sk
,
669 struct sk_buff
*skb
);
671 /* Keeping track of sk's, looking them up, and port selection methods. */
672 void (*hash
)(struct sock
*sk
);
673 void (*unhash
)(struct sock
*sk
);
674 int (*get_port
)(struct sock
*sk
, unsigned short snum
);
680 u8 __pad
[SMP_CACHE_BYTES
- sizeof(int)];
684 /* Called with local bh disabled */
685 static void __inline__
sock_prot_inc_use(struct proto
*prot
)
687 prot
->stats
[smp_processor_id()].inuse
++;
690 static void __inline__
sock_prot_dec_use(struct proto
*prot
)
692 prot
->stats
[smp_processor_id()].inuse
--;
695 /* About 10 seconds */
696 #define SOCK_DESTROY_TIME (10*HZ)
698 /* Sockets 0-1023 can't be bound to unless you are superuser */
699 #define PROT_SOCK 1024
701 #define SHUTDOWN_MASK 3
702 #define RCV_SHUTDOWN 1
703 #define SEND_SHUTDOWN 2
705 /* Used by processes to "lock" a socket state, so that
706 * interrupts and bottom half handlers won't change it
707 * from under us. It essentially blocks any incoming
708 * packets, so that we won't get any new data or any
709 * packets that change the state of the socket.
711 * While locked, BH processing will add new packets to
712 * the backlog queue. This queue is processed by the
713 * owner of the socket lock right before it is released.
715 * Since ~2.3.5 it is also exclusive sleep lock serializing
716 * accesses from user process context.
718 extern void __lock_sock(struct sock
*sk
);
719 extern void __release_sock(struct sock
*sk
);
720 #define lock_sock(__sk) \
721 do { spin_lock_bh(&((__sk)->lock.slock)); \
722 if ((__sk)->lock.users != 0) \
724 (__sk)->lock.users = 1; \
725 spin_unlock_bh(&((__sk)->lock.slock)); \
728 #define release_sock(__sk) \
729 do { spin_lock_bh(&((__sk)->lock.slock)); \
730 if ((__sk)->backlog.tail != NULL) \
731 __release_sock(__sk); \
732 (__sk)->lock.users = 0; \
733 if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \
734 spin_unlock_bh(&((__sk)->lock.slock)); \
737 /* BH context may only use the following locking interface. */
738 #define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock))
739 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock))
742 * This might not be the most appropriate place for this two
743 * but since they are used by a lot of the net related code
744 * at least they get declared on a include that is common to all
747 static __inline__
int min(unsigned int a
, unsigned int b
)
754 static __inline__
int max(unsigned int a
, unsigned int b
)
761 extern struct sock
* sk_alloc(int family
, int priority
, int zero_it
);
762 extern void sk_free(struct sock
*sk
);
764 extern struct sk_buff
*sock_wmalloc(struct sock
*sk
,
765 unsigned long size
, int force
,
767 extern struct sk_buff
*sock_rmalloc(struct sock
*sk
,
768 unsigned long size
, int force
,
770 extern void sock_wfree(struct sk_buff
*skb
);
771 extern void sock_rfree(struct sk_buff
*skb
);
772 extern void sock_cfree(struct sk_buff
*skb
);
773 extern unsigned long sock_rspace(struct sock
*sk
);
774 extern unsigned long sock_wspace(struct sock
*sk
);
776 extern int sock_setsockopt(struct socket
*sock
, int level
,
777 int op
, char *optval
,
780 extern int sock_getsockopt(struct socket
*sock
, int level
,
781 int op
, char *optval
,
783 extern struct sk_buff
*sock_alloc_send_skb(struct sock
*sk
,
785 unsigned long fallback
,
788 extern void *sock_kmalloc(struct sock
*sk
, int size
, int priority
);
789 extern void sock_kfree_s(struct sock
*sk
, void *mem
, int size
);
791 extern int copy_and_csum_toiovec(struct iovec
*iov
, struct sk_buff
*skb
, int hlen
);
794 * Functions to fill in entries in struct proto_ops when a protocol
795 * does not implement a particular function.
797 extern int sock_no_release(struct socket
*);
798 extern int sock_no_bind(struct socket
*,
799 struct sockaddr
*, int);
800 extern int sock_no_connect(struct socket
*,
801 struct sockaddr
*, int, int);
802 extern int sock_no_socketpair(struct socket
*,
804 extern int sock_no_accept(struct socket
*,
805 struct socket
*, int);
806 extern int sock_no_getname(struct socket
*,
807 struct sockaddr
*, int *, int);
808 extern unsigned int sock_no_poll(struct file
*, struct socket
*,
809 struct poll_table_struct
*);
810 extern int sock_no_ioctl(struct socket
*, unsigned int,
812 extern int sock_no_listen(struct socket
*, int);
813 extern int sock_no_shutdown(struct socket
*, int);
814 extern int sock_no_getsockopt(struct socket
*, int , int,
816 extern int sock_no_setsockopt(struct socket
*, int, int,
818 extern int sock_no_fcntl(struct socket
*,
819 unsigned int, unsigned long);
820 extern int sock_no_sendmsg(struct socket
*,
821 struct msghdr
*, int,
822 struct scm_cookie
*);
823 extern int sock_no_recvmsg(struct socket
*,
824 struct msghdr
*, int, int,
825 struct scm_cookie
*);
826 extern int sock_no_mmap(struct file
*file
,
828 struct vm_area_struct
*vma
);
831 * Default socket callbacks and setup code
834 extern void sock_def_destruct(struct sock
*);
836 /* Initialise core socket variables */
837 extern void sock_init_data(struct socket
*sock
, struct sock
*sk
);
839 extern void sklist_remove_socket(struct sock
**list
, struct sock
*sk
);
840 extern void sklist_insert_socket(struct sock
**list
, struct sock
*sk
);
841 extern void sklist_destroy_socket(struct sock
**list
, struct sock
*sk
);
846 * sk_filter - run a packet through a socket filter
847 * @skb: buffer to filter
848 * @filter: filter to apply
850 * Run the filter code and then cut skb->data to correct size returned by
851 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
852 * than pkt_len we keep whole skb->data. This is the socket level
853 * wrapper to sk_run_filter. It returns 0 if the packet should
854 * be accepted or 1 if the packet should be tossed.
857 extern __inline__
int sk_filter(struct sk_buff
*skb
, struct sk_filter
*filter
)
861 pkt_len
= sk_run_filter(skb
, filter
->insns
, filter
->len
);
863 return 1; /* Toss Packet */
865 skb_trim(skb
, pkt_len
);
871 * sk_filter_release: Release a socket filter
873 * @fp: filter to remove
875 * Remove a filter from a socket and release its resources.
878 extern __inline__
void sk_filter_release(struct sock
*sk
, struct sk_filter
*fp
)
880 unsigned int size
= sk_filter_len(fp
);
882 atomic_sub(size
, &sk
->omem_alloc
);
884 if (atomic_dec_and_test(&fp
->refcnt
))
888 extern __inline__
void sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
890 atomic_inc(&fp
->refcnt
);
891 atomic_add(sk_filter_len(fp
), &sk
->omem_alloc
);
894 #endif /* CONFIG_FILTER */
897 * Socket reference counting postulates.
899 * * Each user of socket SHOULD hold a reference count.
900 * * Each access point to socket (an hash table bucket, reference from a list,
901 * running timer, skb in flight MUST hold a reference count.
902 * * When reference count hits 0, it means it will never increase back.
903 * * When reference count hits 0, it means that no references from
904 * outside exist to this socket and current process on current CPU
905 * is last user and may/should destroy this socket.
906 * * sk_free is called from any context: process, BH, IRQ. When
907 * it is called, socket has no references from outside -> sk_free
908 * may release descendant resources allocated by the socket, but
909 * to the time when it is called, socket is NOT referenced by any
910 * hash tables, lists etc.
911 * * Packets, delivered from outside (from network or from another process)
912 * and enqueued on receive/error queues SHOULD NOT grab reference count,
913 * when they sit in queue. Otherwise, packets will leak to hole, when
914 * socket is looked up by one cpu and unhasing is made by another CPU.
915 * It is true for udp/raw, netlink (leak to receive and error queues), tcp
916 * (leak to backlog). Packet socket does all the processing inside
917 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
918 * use separate SMP lock, so that they are prone too.
921 /* Grab socket reference count. This operation is valid only
922 when sk is ALREADY grabbed f.e. it is found in hash table
923 or a list and the lookup is made under lock preventing hash table
927 extern __inline__
void sock_hold(struct sock
*sk
)
929 atomic_inc(&sk
->refcnt
);
932 /* Ungrab socket in the context, which assumes that socket refcnt
933 cannot hit zero, f.e. it is true in context of any socketcall.
935 extern __inline__
void __sock_put(struct sock
*sk
)
937 atomic_dec(&sk
->refcnt
);
940 /* Ungrab socket and destroy it, if it was the last reference. */
941 extern __inline__
void sock_put(struct sock
*sk
)
943 if (atomic_dec_and_test(&sk
->refcnt
))
947 /* Detach socket from process context.
948 * Announce socket dead, detach it from wait queue and inode.
949 * Note that parent inode held reference count on this struct sock,
950 * we do not release it in this function, because protocol
951 * probably wants some additional cleanups or even continuing
952 * to work with this socket (TCP).
954 * NOTE: When softnet goes in replace _irq with _bh!
956 extern __inline__
void sock_orphan(struct sock
*sk
)
958 write_lock_bh(&sk
->callback_lock
);
962 write_unlock_bh(&sk
->callback_lock
);
965 extern __inline__
void sock_graft(struct sock
*sk
, struct socket
*parent
)
967 write_lock_bh(&sk
->callback_lock
);
968 sk
->sleep
= &parent
->wait
;
971 write_unlock_bh(&sk
->callback_lock
);
975 extern __inline__
struct dst_entry
*
976 __sk_dst_get(struct sock
*sk
)
978 return sk
->dst_cache
;
981 extern __inline__
struct dst_entry
*
982 sk_dst_get(struct sock
*sk
)
984 struct dst_entry
*dst
;
986 read_lock(&sk
->dst_lock
);
990 read_unlock(&sk
->dst_lock
);
994 extern __inline__
void
995 __sk_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
997 struct dst_entry
*old_dst
;
999 old_dst
= sk
->dst_cache
;
1000 sk
->dst_cache
= dst
;
1001 dst_release(old_dst
);
1004 extern __inline__
void
1005 sk_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
1007 write_lock(&sk
->dst_lock
);
1008 __sk_dst_set(sk
, dst
);
1009 write_unlock(&sk
->dst_lock
);
1012 extern __inline__
void
1013 __sk_dst_reset(struct sock
*sk
)
1015 struct dst_entry
*old_dst
;
1017 old_dst
= sk
->dst_cache
;
1018 sk
->dst_cache
= NULL
;
1019 dst_release(old_dst
);
1022 extern __inline__
void
1023 sk_dst_reset(struct sock
*sk
)
1025 write_lock(&sk
->dst_lock
);
1027 write_unlock(&sk
->dst_lock
);
1030 extern __inline__
struct dst_entry
*
1031 __sk_dst_check(struct sock
*sk
, u32 cookie
)
1033 struct dst_entry
*dst
= sk
->dst_cache
;
1035 if (dst
&& dst
->obsolete
&& dst
->ops
->check(dst
, cookie
) == NULL
) {
1036 sk
->dst_cache
= NULL
;
1043 extern __inline__
struct dst_entry
*
1044 sk_dst_check(struct sock
*sk
, u32 cookie
)
1046 struct dst_entry
*dst
= sk_dst_get(sk
);
1048 if (dst
&& dst
->obsolete
&& dst
->ops
->check(dst
, cookie
) == NULL
) {
1058 * Queue a received datagram if it will fit. Stream and sequenced
1059 * protocols can't normally use this as they need to fit buffers in
1060 * and play with them.
1062 * Inlined as it's very short and called for pretty much every
1063 * packet ever received.
1066 extern __inline__
void skb_set_owner_w(struct sk_buff
*skb
, struct sock
*sk
)
1070 skb
->destructor
= sock_wfree
;
1071 atomic_add(skb
->truesize
, &sk
->wmem_alloc
);
1074 extern __inline__
void skb_set_owner_r(struct sk_buff
*skb
, struct sock
*sk
)
1077 skb
->destructor
= sock_rfree
;
1078 atomic_add(skb
->truesize
, &sk
->rmem_alloc
);
1081 extern __inline__
void skb_set_owner_c(struct sk_buff
*skb
, struct sock
*sk
)
1085 skb
->destructor
= sock_cfree
;
1089 extern __inline__
int sock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
1091 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1092 number of warnings when compiling with -W --ANK
1094 if (atomic_read(&sk
->rmem_alloc
) + skb
->truesize
>= (unsigned)sk
->rcvbuf
)
1097 #ifdef CONFIG_FILTER
1100 struct sk_filter
*filter
;
1102 /* It would be deadlock, if sock_queue_rcv_skb is used
1103 with socket lock! We assume that users of this
1104 function are lock free.
1107 if ((filter
= sk
->filter
) != NULL
&& sk_filter(skb
, filter
))
1111 return err
; /* Toss packet */
1113 #endif /* CONFIG_FILTER */
1115 skb_set_owner_r(skb
, sk
);
1116 skb_queue_tail(&sk
->receive_queue
, skb
);
1118 sk
->data_ready(sk
,skb
->len
);
1122 extern __inline__
int sock_queue_err_skb(struct sock
*sk
, struct sk_buff
*skb
)
1124 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1125 number of warnings when compiling with -W --ANK
1127 if (atomic_read(&sk
->rmem_alloc
) + skb
->truesize
>= (unsigned)sk
->rcvbuf
)
1129 skb_set_owner_r(skb
, sk
);
1130 skb_queue_tail(&sk
->error_queue
,skb
);
1132 sk
->data_ready(sk
,skb
->len
);
1137 * Recover an error report and clear atomically
1140 extern __inline__
int sock_error(struct sock
*sk
)
1142 int err
=xchg(&sk
->err
,0);
1146 extern __inline__
unsigned long sock_wspace(struct sock
*sk
)
1150 if (!(sk
->shutdown
& SEND_SHUTDOWN
)) {
1151 amt
= sk
->sndbuf
- atomic_read(&sk
->wmem_alloc
);
1158 extern __inline__
void sk_wake_async(struct sock
*sk
, int how
, int band
)
1160 if (sk
->socket
&& sk
->socket
->fasync_list
)
1161 sock_wake_async(sk
->socket
, how
, band
);
1164 #define SOCK_MIN_SNDBUF 2048
1165 #define SOCK_MIN_RCVBUF 128
1166 /* Must be less or equal SOCK_MIN_SNDBUF */
1167 #define SOCK_MIN_WRITE_SPACE SOCK_MIN_SNDBUF
1170 * Default write policy as shown to user space via poll/select/SIGIO
1171 * Kernel internally doesn't use the MIN_WRITE_SPACE threshold.
1173 extern __inline__
int sock_writeable(struct sock
*sk
)
1175 return sock_wspace(sk
) >= SOCK_MIN_WRITE_SPACE
;
1178 extern __inline__
int gfp_any(void)
1180 return in_softirq() ? GFP_ATOMIC
: GFP_KERNEL
;
1183 extern __inline__
long sock_rcvtimeo(struct sock
*sk
, int noblock
)
1185 return noblock
? 0 : sk
->rcvtimeo
;
1188 extern __inline__
long sock_sndtimeo(struct sock
*sk
, int noblock
)
1190 return noblock
? 0 : sk
->sndtimeo
;
1193 extern __inline__
int sock_rcvlowat(struct sock
*sk
, int waitall
, int len
)
1195 return waitall
? len
: min(sk
->rcvlowat
, len
);
1198 /* Alas, with timeout socket operations are not restartable.
1199 * Compare this to poll().
1201 extern __inline__
int sock_intr_errno(long timeo
)
1203 return timeo
== MAX_SCHEDULE_TIMEOUT
? -ERESTARTSYS
: -EINTR
;
1207 * Enable debug/info messages
1211 #define NETDEBUG(x) do { } while (0)
1213 #define NETDEBUG(x) do { x; } while (0)
1217 * Macros for sleeping on a socket. Use them like this:
1219 * SOCK_SLEEP_PRE(sk)
1222 * SOCK_SLEEP_POST(sk)
1226 #define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
1227 DECLARE_WAITQUEUE(wait, tsk); \
1228 tsk->state = TASK_INTERRUPTIBLE; \
1229 add_wait_queue((sk)->sleep, &wait); \
1232 #define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
1233 remove_wait_queue((sk)->sleep, &wait); \
1237 extern __u32 sysctl_wmem_max
;
1238 extern __u32 sysctl_rmem_max
;
1240 #endif /* _SOCK_H */