2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the AF_INET socket handler.
8 * Version: @(#)sock.h 1.0.4 05/13/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche <flla@stud.uni-sb.de>
16 * Alan Cox : Volatiles in skbuff pointers. See
17 * skbuff comments. May be overdone,
18 * better to prove they can be removed
20 * Alan Cox : Added a zapped field for tcp to note
21 * a socket is reset and must stay shut up
22 * Alan Cox : New fields for options
23 * Pauline Middelink : identd support
24 * Alan Cox : Eliminate low level recv/recvfrom
25 * David S. Miller : New socket lookup architecture.
26 * Steve Whitehouse: Default routines for sock_ops
28 * This program is free software; you can redistribute it and/or
29 * modify it under the terms of the GNU General Public License
30 * as published by the Free Software Foundation; either version
31 * 2 of the License, or (at your option) any later version.
36 #include <linux/config.h>
37 #include <linux/timer.h>
38 #include <linux/in.h> /* struct sockaddr_in */
40 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
41 #include <linux/in6.h> /* struct sockaddr_in6 */
42 #include <linux/ipv6.h> /* dest_cache, inet6_options */
43 #include <linux/icmpv6.h>
44 #include <net/if_inet6.h> /* struct ipv6_mc_socklist */
47 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
48 #include <linux/icmp.h>
50 #include <linux/tcp.h> /* struct tcphdr */
52 #include <linux/netdevice.h>
53 #include <linux/skbuff.h> /* struct sk_buff */
54 #include <net/protocol.h> /* struct inet_protocol */
55 #if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
58 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
60 #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
61 #include <net/netrom.h>
63 #if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
68 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
69 #if defined(CONFIG_SPX) || defined(CONFIG_SPX_MODULE)
73 #endif /* CONFIG_SPX */
74 #endif /* CONFIG_IPX */
76 #if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
77 #include <linux/atalk.h>
80 #if defined(CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
84 #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
85 #include <net/irda/irda.h>
88 #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
93 #include <linux/filter.h>
96 #include <asm/atomic.h>
99 #define MIN_WRITE_SPACE 2048
101 /* The AF_UNIX specific socket options */
103 struct unix_address
*addr
;
104 struct dentry
* dentry
;
105 struct semaphore readsem
;
108 struct sock
* gc_tree
;
111 wait_queue_head_t peer_wait
;
115 /* Once the IPX ncpd patches are in these are going into protinfo. */
116 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
118 ipx_address dest_addr
;
119 ipx_interface
*intrfc
;
121 #ifdef CONFIG_IPX_INTERN
122 unsigned char node
[IPX_NODE_LEN
];
126 * To handle special ncp connection-handling sockets for mars_nwe,
127 * the connection number must be stored in the socket.
129 unsigned short ipx_ncp_conn
;
133 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
135 struct in6_addr saddr
;
136 struct in6_addr rcv_saddr
;
137 struct in6_addr daddr
;
138 struct in6_addr
*daddr_cache
;
146 /* pktoption flags */
166 struct ipv6_mc_socklist
*ipv6_mc_list
;
167 struct ipv6_fl_socklist
*ipv6_fl_list
;
170 struct ipv6_txoptions
*opt
;
171 struct sk_buff
*pktoptions
;
175 __u32 checksum
; /* perform checksum */
176 __u32 offset
; /* checksum offset */
178 struct icmp6_filter filter
;
183 #if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
185 struct icmp_filter filter
;
189 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
192 int ttl
; /* TTL setting */
195 struct ip_options
*opt
;
196 unsigned char hdrincl
; /* Include headers ? */
197 __u8 mc_ttl
; /* Multicasting TTL */
198 __u8 mc_loop
; /* Loopback */
201 int mc_index
; /* Multicast device index */
203 struct ip_mc_socklist
*mc_list
; /* Group array */
208 /* This defines a selective acknowledgement block. */
209 struct tcp_sack_block
{
215 int tcp_header_len
; /* Bytes of tcp header to send */
218 * Header prediction flags
219 * 0x5?10 << 16 + snd_wnd in net byte order
224 * RFC793 variables by their proper names. This means you can
225 * read the code and the spec side by side (and laugh ...)
226 * See RFC793 and RFC1122. The RFC writes these in capitals.
228 __u32 rcv_nxt
; /* What we want to receive next */
229 __u32 snd_nxt
; /* Next sequence we send */
231 __u32 snd_una
; /* First byte we want an ack for */
232 __u32 rcv_tstamp
; /* timestamp of last received packet */
233 __u32 lrcvtime
; /* timestamp of last received data packet*/
234 __u32 srtt
; /* smothed round trip time << 3 */
236 __u32 ato
; /* delayed ack timeout */
237 __u32 snd_wl1
; /* Sequence for window update */
239 __u32 snd_wl2
; /* Ack sequence for update */
240 __u32 snd_wnd
; /* The window we expect to receive */
242 __u32 pmtu_cookie
; /* Last pmtu seen by socket */
243 __u16 mss_cache
; /* Cached effective mss, not including SACKS */
244 __u16 mss_clamp
; /* Maximal mss, negotiated at connection setup */
245 __u16 ext_header_len
; /* Dave, do you allow mw to use this hole? 8) --ANK */
246 __u8 pending
; /* pending events */
248 __u32 last_ack_sent
; /* last ack we sent */
250 __u32 backoff
; /* backoff */
251 __u32 mdev
; /* medium deviation */
252 __u32 snd_cwnd
; /* Sending congestion window */
253 __u32 rto
; /* retransmit timeout */
255 __u32 packets_out
; /* Packets which are "in flight" */
256 __u32 fackets_out
; /* Non-retrans SACK'd packets */
257 __u32 retrans_out
; /* Fast-retransmitted packets out */
258 __u32 high_seq
; /* snd_nxt at onset of congestion */
260 * Slow start and congestion control (see also Nagle, and Karn & Partridge)
262 __u32 snd_ssthresh
; /* Slow start size threshold */
263 __u16 snd_cwnd_cnt
; /* Linear increase counter */
264 __u16 snd_cwnd_clamp
; /* Do not allow snd_cwnd to grow above this */
265 __u8 dup_acks
; /* Consequetive duplicate acks seen from other end */
267 __u16 user_mss
; /* mss requested by user in ioctl */
269 /* Two commonly used timers in both sender and receiver paths. */
270 struct timer_list retransmit_timer
; /* Resend (no ack) */
271 struct timer_list delack_timer
; /* Ack delay */
273 struct sk_buff_head out_of_order_queue
; /* Out of order segments go here */
275 struct tcp_func
*af_specific
; /* Operations which are AF_INET{4,6} specific */
276 struct sk_buff
*send_head
; /* Front of stuff to transmit */
277 struct sk_buff
*retrans_head
; /* retrans head can be
278 * different to the head of
279 * write queue if we are doing
283 __u32 rcv_wnd
; /* Current receiver window */
284 __u32 rcv_wup
; /* rcv_nxt on last window update sent */
288 * Options received (usually on last packet, some only on SYN packets).
290 char tstamp_ok
, /* TIMESTAMP seen on SYN packet */
291 wscale_ok
, /* Wscale seen on SYN packet */
292 sack_ok
; /* SACK seen on SYN packet */
293 char saw_tstamp
; /* Saw TIMESTAMP on last packet */
294 __u8 snd_wscale
; /* Window scaling received from sender */
295 __u8 rcv_wscale
; /* Window scaling to send to receiver */
296 __u8 rexmt_done
; /* Retransmitted up to send head? */
297 __u32 rcv_tsval
; /* Time stamp value */
298 __u32 rcv_tsecr
; /* Time stamp echo reply */
299 __u32 ts_recent
; /* Time stamp to echo next */
300 long ts_recent_stamp
;/* Time we stored ts_recent (for aging) */
301 int num_sacks
; /* Number of SACK blocks */
302 struct tcp_sack_block selective_acks
[4]; /* The SACKS themselves*/
304 struct timer_list probe_timer
; /* Probes */
305 __u32 window_clamp
; /* XXX Document this... -DaveM */
306 __u32 probes_out
; /* unanswered 0 window probes */
312 __u32 last_seg_size
; /* Size of last incoming segment */
313 __u32 rcv_mss
; /* MSS used for delayed ACK decisions */
315 struct open_request
*syn_wait_queue
;
316 struct open_request
**syn_wait_last
;
318 int syn_backlog
; /* Backlog of received SYNs */
321 unsigned int keepalive_time
; /* time before keep alive takes place */
322 unsigned int keepalive_intvl
; /* time interval between keep alive probes */
323 unsigned char keepalive_probes
; /* num of allowed keep alive probes */
324 unsigned char syn_retries
; /* num of allowed syn retries */
329 * This structure really needs to be cleaned up.
330 * Most of it is for TCP, and not used by any of
331 * the other protocols.
335 * The idea is to start moving to a newer struct gradualy
337 * IMHO the newer struct should have the following format:
340 * sockmem [mem, proto, callbacks]
364 * The idea failed because IPv6 transition asssumes dual IP/IPv6 sockets.
365 * So, net_pinfo is IPv6 are really, and protinfo unifies all another
369 /* Define this to get the sk->debug debugging facility. */
370 #define SOCK_DEBUGGING
371 #ifdef SOCK_DEBUGGING
372 #define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG ## msg); } while (0)
374 #define SOCK_DEBUG(sk, msg...) do { } while (0)
377 /* This is the per-socket lock. The spinlock provides a synchronization
378 * between user contexts and software interrupt processing, whereas the
379 * mini-semaphore synchronizes multiple users amongst themselves.
384 wait_queue_head_t wq
;
387 #define sock_lock_init(__sk) \
388 do { spin_lock_init(&((__sk)->lock.slock)); \
389 (__sk)->lock.users = 0; \
390 init_waitqueue_head(&((__sk)->lock.wq)); \
394 /* Socket demultiplex comparisons on incoming packets. */
395 __u32 daddr
; /* Foreign IPv4 addr */
396 __u32 rcv_saddr
; /* Bound local IPv4 addr */
397 __u16 dport
; /* Destination port */
398 unsigned short num
; /* Local port */
399 int bound_dev_if
; /* Bound device index if != 0 */
401 /* Main hash linkage for various protocol lookup tables. */
404 struct sock
*bind_next
;
405 struct sock
**bind_pprev
;
407 volatile unsigned char state
, /* Connection state */
408 zapped
; /* In ax25 & ipx means not linked */
409 __u16 sport
; /* Source port */
411 unsigned short family
; /* Address family */
412 unsigned char reuse
, /* SO_REUSEADDR setting */
413 nonagle
; /* Disable Nagle algorithm? */
414 atomic_t refcnt
; /* Reference count */
416 socket_lock_t lock
; /* Synchronizer... */
417 int rcvbuf
; /* Size of receive buffer in bytes */
419 wait_queue_head_t
*sleep
; /* Sock wait queue */
420 struct dst_entry
*dst_cache
; /* Destination cache */
422 atomic_t rmem_alloc
; /* Receive queue bytes committed */
423 struct sk_buff_head receive_queue
; /* Incoming packets */
424 atomic_t wmem_alloc
; /* Transmit queue bytes committed */
425 struct sk_buff_head write_queue
; /* Packet sending queue */
426 atomic_t omem_alloc
; /* "o" is "option" or "other" */
427 __u32 saddr
; /* Sending source */
428 unsigned int allocation
; /* Allocation mode */
429 int sndbuf
; /* Size of send buffer in bytes */
432 /* Not all are volatile, but some are, so we might as well say they all are.
433 * XXX Make this a flag word -DaveM
446 unsigned long lingertime
;
451 /* The backlog queue is special, it is always used with
452 * the per-socket spinlock held and requires low latency
453 * access. Therefore we special case it's implementation.
456 struct sk_buff
*head
;
457 struct sk_buff
*tail
;
460 rwlock_t callback_lock
;
462 /* Error queue, rarely used. */
463 struct sk_buff_head error_queue
;
467 unsigned short shutdown
;
469 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
471 struct ipv6_pinfo af_inet6
;
476 struct tcp_opt af_tcp
;
477 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
478 struct raw_opt tp_raw4
;
480 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
481 struct raw6_opt tp_raw
;
482 #endif /* CONFIG_IPV6 */
483 #if defined(CONFIG_SPX) || defined (CONFIG_SPX_MODULE)
484 struct spx_opt af_spx
;
485 #endif /* CONFIG_SPX */
489 int err
, err_soft
; /* Soft holds errors that don't
490 cause failure but are the cause
491 of a persistent failure not just
493 unsigned short ack_backlog
;
494 unsigned short max_ack_backlog
;
497 unsigned char localroute
; /* Route locally only */
498 unsigned char protocol
;
499 struct ucred peercred
;
502 /* Socket Filtering Instructions */
503 struct sk_filter
*filter
;
504 #endif /* CONFIG_FILTER */
506 /* This is where all the private (optional) areas that don't
507 * overlap will eventually live.
511 struct unix_opt af_unix
;
512 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
513 struct inet_opt af_inet
;
515 #if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
516 struct atalk_sock af_at
;
518 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
519 struct ipx_opt af_ipx
;
521 #if defined (CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
524 #if defined (CONFIG_PACKET) || defined(CONFIG_PACKET_MODULE)
525 struct packet_opt
*af_packet
;
527 #if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
530 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
533 #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
536 #if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
539 #ifdef CONFIG_NETLINK
540 struct netlink_opt
*af_netlink
;
542 #if defined(CONFIG_ECONET) || defined(CONFIG_ECONET_MODULE)
543 struct econet_opt
*af_econet
;
545 #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
546 struct atm_vcc
*af_atm
;
548 #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
549 struct irda_sock
*irda
;
554 /* This part is used for the timeout functions. */
555 spinlock_t timer_lock
; /* Required until timer in core is repaired */
556 struct timer_list timer
; /* This is the sock cleanup timer. */
557 struct timeval stamp
;
560 struct socket
*socket
;
562 /* RPC layer private data */
566 void (*state_change
)(struct sock
*sk
);
567 void (*data_ready
)(struct sock
*sk
,int bytes
);
568 void (*write_space
)(struct sock
*sk
);
569 void (*error_report
)(struct sock
*sk
);
571 int (*backlog_rcv
) (struct sock
*sk
,
572 struct sk_buff
*skb
);
573 void (*destruct
)(struct sock
*sk
);
576 /* The per-socket spinlock must be held here. */
577 #define sk_add_backlog(__sk, __skb) \
578 do { if((__sk)->backlog.tail == NULL) { \
579 (__sk)->backlog.head = \
580 (__sk)->backlog.tail = (__skb); \
582 ((__sk)->backlog.tail)->next = (__skb); \
583 (__sk)->backlog.tail = (__skb); \
585 (__skb)->next = NULL; \
588 /* IP protocol blocks we attach to sockets.
589 * socket layer -> transport layer interface
590 * transport -> network interface is defined by struct inet_proto
593 void (*close
)(struct sock
*sk
,
595 int (*connect
)(struct sock
*sk
,
596 struct sockaddr
*uaddr
,
598 int (*disconnect
)(struct sock
*sk
, int flags
);
600 struct sock
* (*accept
) (struct sock
*sk
, int flags
, int *err
);
601 void (*retransmit
)(struct sock
*sk
, int all
);
602 void (*write_wakeup
)(struct sock
*sk
);
603 void (*read_wakeup
)(struct sock
*sk
);
605 unsigned int (*poll
)(struct file
* file
, struct socket
*sock
,
606 struct poll_table_struct
*wait
);
608 int (*ioctl
)(struct sock
*sk
, int cmd
,
610 int (*init
)(struct sock
*sk
);
611 int (*destroy
)(struct sock
*sk
);
612 void (*shutdown
)(struct sock
*sk
, int how
);
613 int (*setsockopt
)(struct sock
*sk
, int level
,
614 int optname
, char *optval
, int optlen
);
615 int (*getsockopt
)(struct sock
*sk
, int level
,
616 int optname
, char *optval
,
618 int (*sendmsg
)(struct sock
*sk
, struct msghdr
*msg
,
620 int (*recvmsg
)(struct sock
*sk
, struct msghdr
*msg
,
621 int len
, int noblock
, int flags
,
623 int (*bind
)(struct sock
*sk
,
624 struct sockaddr
*uaddr
, int addr_len
);
626 int (*backlog_rcv
) (struct sock
*sk
,
627 struct sk_buff
*skb
);
629 /* Keeping track of sk's, looking them up, and port selection methods. */
630 void (*hash
)(struct sock
*sk
);
631 void (*unhash
)(struct sock
*sk
);
632 int (*get_port
)(struct sock
*sk
, unsigned short snum
);
634 unsigned short max_header
;
635 unsigned long retransmits
;
637 int inuse
, highestinuse
;
641 /* About 10 seconds */
642 #define SOCK_DESTROY_TIME (10*HZ)
644 /* Sockets 0-1023 can't be bound to unless you are superuser */
645 #define PROT_SOCK 1024
647 #define SHUTDOWN_MASK 3
648 #define RCV_SHUTDOWN 1
649 #define SEND_SHUTDOWN 2
651 /* Used by processes to "lock" a socket state, so that
652 * interrupts and bottom half handlers won't change it
653 * from under us. It essentially blocks any incoming
654 * packets, so that we won't get any new data or any
655 * packets that change the state of the socket.
657 * While locked, BH processing will add new packets to
658 * the backlog queue. This queue is processed by the
659 * owner of the socket lock right before it is released.
661 extern void __lock_sock(struct sock
*sk
);
662 extern void __release_sock(struct sock
*sk
);
663 #define lock_sock(__sk) \
664 do { spin_lock_bh(&((__sk)->lock.slock)); \
665 if ((__sk)->lock.users != 0) \
667 (__sk)->lock.users = 1; \
668 spin_unlock_bh(&((__sk)->lock.slock)); \
670 #define release_sock(__sk) \
671 do { spin_lock_bh(&((__sk)->lock.slock)); \
672 (__sk)->lock.users = 0; \
673 if ((__sk)->backlog.tail != NULL) \
674 __release_sock(__sk); \
675 wake_up(&((__sk)->lock.wq)); \
676 spin_unlock_bh(&((__sk)->lock.slock)); \
679 /* BH context may only use the following locking interface. */
680 #define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock))
681 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock))
684 * This might not be the most appropriate place for this two
685 * but since they are used by a lot of the net related code
686 * at least they get declared on a include that is common to all
689 static __inline__
int min(unsigned int a
, unsigned int b
)
696 static __inline__
int max(unsigned int a
, unsigned int b
)
703 extern struct sock
* sk_alloc(int family
, int priority
, int zero_it
);
704 extern void sk_free(struct sock
*sk
);
706 extern struct sk_buff
*sock_wmalloc(struct sock
*sk
,
707 unsigned long size
, int force
,
709 extern struct sk_buff
*sock_rmalloc(struct sock
*sk
,
710 unsigned long size
, int force
,
712 extern void sock_wfree(struct sk_buff
*skb
);
713 extern void sock_rfree(struct sk_buff
*skb
);
714 extern void sock_cfree(struct sk_buff
*skb
);
715 extern unsigned long sock_rspace(struct sock
*sk
);
716 extern unsigned long sock_wspace(struct sock
*sk
);
718 extern int sock_setsockopt(struct socket
*sock
, int level
,
719 int op
, char *optval
,
722 extern int sock_getsockopt(struct socket
*sock
, int level
,
723 int op
, char *optval
,
725 extern struct sk_buff
*sock_alloc_send_skb(struct sock
*sk
,
727 unsigned long fallback
,
730 extern void *sock_kmalloc(struct sock
*sk
, int size
, int priority
);
731 extern void sock_kfree_s(struct sock
*sk
, void *mem
, int size
);
735 * Functions to fill in entries in struct proto_ops when a protocol
736 * does not implement a particular function.
738 extern int sock_no_release(struct socket
*);
739 extern int sock_no_bind(struct socket
*,
740 struct sockaddr
*, int);
741 extern int sock_no_connect(struct socket
*,
742 struct sockaddr
*, int, int);
743 extern int sock_no_socketpair(struct socket
*,
745 extern int sock_no_accept(struct socket
*,
746 struct socket
*, int);
747 extern int sock_no_getname(struct socket
*,
748 struct sockaddr
*, int *, int);
749 extern unsigned int sock_no_poll(struct file
*, struct socket
*,
750 struct poll_table_struct
*);
751 extern int sock_no_ioctl(struct socket
*, unsigned int,
753 extern int sock_no_listen(struct socket
*, int);
754 extern int sock_no_shutdown(struct socket
*, int);
755 extern int sock_no_getsockopt(struct socket
*, int , int,
757 extern int sock_no_setsockopt(struct socket
*, int, int,
759 extern int sock_no_fcntl(struct socket
*,
760 unsigned int, unsigned long);
761 extern int sock_no_sendmsg(struct socket
*,
762 struct msghdr
*, int,
763 struct scm_cookie
*);
764 extern int sock_no_recvmsg(struct socket
*,
765 struct msghdr
*, int,
766 struct scm_cookie
*);
767 extern int sock_no_mmap(struct file
*file
,
769 struct vm_area_struct
*vma
);
772 * Default socket callbacks and setup code
775 extern void sock_def_callback1(struct sock
*);
776 extern void sock_def_callback2(struct sock
*, int);
777 extern void sock_def_callback3(struct sock
*);
778 extern void sock_def_destruct(struct sock
*);
780 /* Initialise core socket variables */
781 extern void sock_init_data(struct socket
*sock
, struct sock
*sk
);
783 extern void sklist_remove_socket(struct sock
**list
, struct sock
*sk
);
784 extern void sklist_insert_socket(struct sock
**list
, struct sock
*sk
);
785 extern void sklist_destroy_socket(struct sock
**list
, struct sock
*sk
);
789 * Run the filter code and then cut skb->data to correct size returned by
790 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
791 * than pkt_len we keep whole skb->data.
793 extern __inline__
int sk_filter(struct sk_buff
*skb
, struct sk_filter
*filter
)
797 pkt_len
= sk_run_filter(skb
, filter
->insns
, filter
->len
);
799 return 1; /* Toss Packet */
801 skb_trim(skb
, pkt_len
);
806 extern __inline__
void sk_filter_release(struct sock
*sk
, struct sk_filter
*fp
)
808 unsigned int size
= sk_filter_len(fp
);
810 atomic_sub(size
, &sk
->omem_alloc
);
812 if (atomic_dec_and_test(&fp
->refcnt
))
816 extern __inline__
void sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
818 atomic_inc(&fp
->refcnt
);
819 atomic_add(sk_filter_len(fp
), &sk
->omem_alloc
);
822 #endif /* CONFIG_FILTER */
825 * Socket reference counting postulates.
827 * * Each user of socket SHOULD hold a reference count.
828 * * Each access point to socket (an hash table bucket, reference from a list,
829 * running timer, skb in flight MUST hold a reference count.
830 * * When reference count hits 0, it means it will never increase back.
831 * * When reference count hits 0, it means that no references from
832 * outside exist to this socket and current process on current CPU
833 * is last user and may/should destroy this socket.
834 * * sk_free is called from any context: process, BH, IRQ. When
835 * it is called, socket has no references from outside -> sk_free
836 * may release descendant resources allocated by the socket, but
837 * to the time when it is called, socket is NOT referenced by any
838 * hash tables, lists etc.
839 * * Packets, delivered from outside (from network or from another process)
840 * and enqueued on receive/error queues SHOULD NOT grab reference count,
841 * when they sit in queue. Otherwise, packets will leak to hole, when
842 * socket is looked up by one cpu and unhasing is made by another CPU.
843 * It is true for udp/raw, netlink (leak to receive and error queues), tcp
844 * (leak to backlog). Packet socket does all the processing inside
845 * ptype_lock, so that it has not this race condition. UNIX sockets
846 * use separate SMP lock, so that they are prone too.
849 /* Grab socket reference count. This operation is valid only
850 when sk is ALREADY grabbed f.e. it is found in hash table
851 or a list and the lookup is made under lock preventing hash table
855 extern __inline__
void sock_hold(struct sock
*sk
)
857 atomic_inc(&sk
->refcnt
);
860 /* Ungrab socket in the context, which assumes that socket refcnt
861 cannot hit zero, f.e. it is true in context of any socketcall.
863 extern __inline__
void __sock_put(struct sock
*sk
)
865 atomic_dec(&sk
->refcnt
);
868 /* Ungrab socket and destroy it, if it was the last reference. */
869 extern __inline__
void sock_put(struct sock
*sk
)
871 if (atomic_dec_and_test(&sk
->refcnt
))
875 extern __inline__
struct dst_entry
*
876 __sk_dst_get(struct sock
*sk
)
878 return sk
->dst_cache
;
881 extern __inline__
struct dst_entry
*
882 sk_dst_get(struct sock
*sk
)
884 struct dst_entry
*dst
;
886 read_lock(&sk
->dst_lock
);
890 read_unlock(&sk
->dst_lock
);
894 extern __inline__
void
895 __sk_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
897 struct dst_entry
*old_dst
;
899 old_dst
= sk
->dst_cache
;
901 dst_release(old_dst
);
904 extern __inline__
void
905 sk_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
907 write_lock(&sk
->dst_lock
);
908 __sk_dst_set(sk
, dst
);
909 write_unlock(&sk
->dst_lock
);
912 extern __inline__
void
913 __sk_dst_reset(struct sock
*sk
)
915 struct dst_entry
*old_dst
;
917 old_dst
= sk
->dst_cache
;
918 sk
->dst_cache
= NULL
;
919 dst_release(old_dst
);
922 extern __inline__
void
923 sk_dst_reset(struct sock
*sk
)
925 write_lock(&sk
->dst_lock
);
927 write_unlock(&sk
->dst_lock
);
930 extern __inline__
struct dst_entry
*
931 __sk_dst_check(struct sock
*sk
, u32 cookie
)
933 struct dst_entry
*dst
= sk
->dst_cache
;
935 if (dst
&& dst
->obsolete
&& dst
->ops
->check(dst
, cookie
) == NULL
) {
936 sk
->dst_cache
= NULL
;
943 extern __inline__
struct dst_entry
*
944 sk_dst_check(struct sock
*sk
, u32 cookie
)
946 struct dst_entry
*dst
= sk_dst_get(sk
);
948 if (dst
&& dst
->obsolete
&& dst
->ops
->check(dst
, cookie
) == NULL
) {
958 * Queue a received datagram if it will fit. Stream and sequenced
959 * protocols can't normally use this as they need to fit buffers in
960 * and play with them.
962 * Inlined as it's very short and called for pretty much every
963 * packet ever received.
966 extern __inline__
void skb_set_owner_w(struct sk_buff
*skb
, struct sock
*sk
)
970 skb
->destructor
= sock_wfree
;
971 atomic_add(skb
->truesize
, &sk
->wmem_alloc
);
974 extern __inline__
void skb_set_owner_r(struct sk_buff
*skb
, struct sock
*sk
)
977 skb
->destructor
= sock_rfree
;
978 atomic_add(skb
->truesize
, &sk
->rmem_alloc
);
981 extern __inline__
void skb_set_owner_c(struct sk_buff
*skb
, struct sock
*sk
)
985 skb
->destructor
= sock_cfree
;
989 extern __inline__
int sock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
991 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
992 number of warnings when compiling with -W --ANK
994 if (atomic_read(&sk
->rmem_alloc
) + skb
->truesize
>= (unsigned)sk
->rcvbuf
)
1000 struct sk_filter
*filter
;
1002 /* It would be deadlock, if sock_queue_rcv_skb is used
1003 with socket lock! We assume that users of this
1004 function are lock free.
1007 if ((filter
= sk
->filter
) != NULL
&& sk_filter(skb
, filter
))
1011 return err
; /* Toss packet */
1013 #endif /* CONFIG_FILTER */
1015 skb_set_owner_r(skb
, sk
);
1016 skb_queue_tail(&sk
->receive_queue
, skb
);
1018 sk
->data_ready(sk
,skb
->len
);
1022 extern __inline__
int sock_queue_err_skb(struct sock
*sk
, struct sk_buff
*skb
)
1024 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1025 number of warnings when compiling with -W --ANK
1027 if (atomic_read(&sk
->rmem_alloc
) + skb
->truesize
>= (unsigned)sk
->rcvbuf
)
1029 skb_set_owner_r(skb
, sk
);
1030 skb_queue_tail(&sk
->error_queue
,skb
);
1032 sk
->data_ready(sk
,skb
->len
);
1037 * Recover an error report and clear atomically
1040 extern __inline__
int sock_error(struct sock
*sk
)
1042 int err
=xchg(&sk
->err
,0);
1046 extern __inline__
unsigned long sock_wspace(struct sock
*sk
)
1050 if (!(sk
->shutdown
& SEND_SHUTDOWN
)) {
1051 amt
= sk
->sndbuf
- atomic_read(&sk
->wmem_alloc
);
1059 * Default write policy as shown to user space via poll/select/SIGIO
1060 * Kernel internally doesn't use the MIN_WRITE_SPACE threshold.
1062 extern __inline__
int sock_writeable(struct sock
*sk
)
1064 return sock_wspace(sk
) >= MIN_WRITE_SPACE
;
1067 extern __inline__
int gfp_any(void)
1069 return in_interrupt() ? GFP_ATOMIC
: GFP_KERNEL
;
1074 * Enable debug/info messages
1078 #define NETDEBUG(x) do { } while (0)
1080 #define NETDEBUG(x) do { x; } while (0)
1084 * Macros for sleeping on a socket. Use them like this:
1086 * SOCK_SLEEP_PRE(sk)
1089 * SOCK_SLEEP_POST(sk)
1093 #define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
1094 DECLARE_WAITQUEUE(wait, tsk); \
1095 tsk->state = TASK_INTERRUPTIBLE; \
1096 add_wait_queue((sk)->sleep, &wait); \
1099 #define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
1100 remove_wait_queue((sk)->sleep, &wait); \
1104 #endif /* _SOCK_H */