- Linus: more PageDirty / swapcache handling
[davej-history.git] / include / net / sock.h
blob4b3a82bef1dcd19991cab02537eabdaa992349d2
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the AF_INET socket handler.
8 * Version: @(#)sock.h 1.0.4 05/13/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche <flla@stud.uni-sb.de>
15 * Fixes:
16 * Alan Cox : Volatiles in skbuff pointers. See
17 * skbuff comments. May be overdone,
18 * better to prove they can be removed
19 * than the reverse.
20 * Alan Cox : Added a zapped field for tcp to note
21 * a socket is reset and must stay shut up
22 * Alan Cox : New fields for options
23 * Pauline Middelink : identd support
24 * Alan Cox : Eliminate low level recv/recvfrom
25 * David S. Miller : New socket lookup architecture.
26 * Steve Whitehouse: Default routines for sock_ops
28 * This program is free software; you can redistribute it and/or
29 * modify it under the terms of the GNU General Public License
30 * as published by the Free Software Foundation; either version
31 * 2 of the License, or (at your option) any later version.
33 #ifndef _SOCK_H
34 #define _SOCK_H
36 #include <linux/config.h>
37 #include <linux/timer.h>
38 #include <linux/cache.h>
39 #include <linux/in.h> /* struct sockaddr_in */
41 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
42 #include <linux/in6.h> /* struct sockaddr_in6 */
43 #include <linux/ipv6.h> /* dest_cache, inet6_options */
44 #include <linux/icmpv6.h>
45 #include <net/if_inet6.h> /* struct ipv6_mc_socklist */
46 #endif
48 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
49 #include <linux/icmp.h>
50 #endif
51 #include <linux/tcp.h> /* struct tcphdr */
53 #include <linux/netdevice.h>
54 #include <linux/skbuff.h> /* struct sk_buff */
55 #include <net/protocol.h> /* struct inet_protocol */
56 #if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
57 #include <net/x25.h>
58 #endif
59 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
60 #include <net/ax25.h>
61 #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
62 #include <net/netrom.h>
63 #endif
64 #if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
65 #include <net/rose.h>
66 #endif
67 #endif
69 #if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE)
70 #include <linux/if_pppox.h>
71 #include <linux/ppp_channel.h> /* struct ppp_channel */
72 #endif
74 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
75 #if defined(CONFIG_SPX) || defined(CONFIG_SPX_MODULE)
76 #include <net/spx.h>
77 #else
78 #include <net/ipx.h>
79 #endif /* CONFIG_SPX */
80 #endif /* CONFIG_IPX */
82 #if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
83 #include <linux/atalk.h>
84 #endif
86 #if defined(CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
87 #include <net/dn.h>
88 #endif
90 #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
91 #include <net/irda/irda.h>
92 #endif
94 #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
95 struct atm_vcc;
96 #endif
98 #ifdef CONFIG_FILTER
99 #include <linux/filter.h>
100 #endif
102 #include <asm/atomic.h>
103 #include <net/dst.h>
106 /* The AF_UNIX specific socket options */
107 struct unix_opt {
108 struct unix_address *addr;
109 struct dentry * dentry;
110 struct vfsmount * mnt;
111 struct semaphore readsem;
112 struct sock * other;
113 struct sock ** list;
114 struct sock * gc_tree;
115 atomic_t inflight;
116 rwlock_t lock;
117 wait_queue_head_t peer_wait;
121 /* Once the IPX ncpd patches are in these are going into protinfo. */
122 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
123 struct ipx_opt {
124 ipx_address dest_addr;
125 ipx_interface *intrfc;
126 unsigned short port;
127 #ifdef CONFIG_IPX_INTERN
128 unsigned char node[IPX_NODE_LEN];
129 #endif
130 unsigned short type;
132 * To handle special ncp connection-handling sockets for mars_nwe,
133 * the connection number must be stored in the socket.
135 unsigned short ipx_ncp_conn;
137 #endif
139 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
140 struct ipv6_pinfo {
141 struct in6_addr saddr;
142 struct in6_addr rcv_saddr;
143 struct in6_addr daddr;
144 struct in6_addr *daddr_cache;
146 __u32 flow_label;
147 __u32 frag_size;
148 int hop_limit;
149 int mcast_hops;
150 int mcast_oif;
152 /* pktoption flags */
153 union {
154 struct {
155 __u8 srcrt:2,
156 rxinfo:1,
157 rxhlim:1,
158 hopopts:1,
159 dstopts:1,
160 authhdr:1,
161 rxflow:1;
162 } bits;
163 __u8 all;
164 } rxopt;
166 /* sockopt flags */
167 __u8 mc_loop:1,
168 recverr:1,
169 sndflow:1,
170 pmtudisc:2;
172 struct ipv6_mc_socklist *ipv6_mc_list;
173 struct ipv6_fl_socklist *ipv6_fl_list;
174 __u32 dst_cookie;
176 struct ipv6_txoptions *opt;
177 struct sk_buff *pktoptions;
180 struct raw6_opt {
181 __u32 checksum; /* perform checksum */
182 __u32 offset; /* checksum offset */
184 struct icmp6_filter filter;
187 #endif /* IPV6 */
189 #if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
190 struct raw_opt {
191 struct icmp_filter filter;
193 #endif
195 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
196 struct inet_opt
198 int ttl; /* TTL setting */
199 int tos; /* TOS */
200 unsigned cmsg_flags;
201 struct ip_options *opt;
202 unsigned char hdrincl; /* Include headers ? */
203 __u8 mc_ttl; /* Multicasting TTL */
204 __u8 mc_loop; /* Loopback */
205 unsigned recverr : 1,
206 freebind : 1;
207 __u8 pmtudisc;
208 int mc_index; /* Multicast device index */
209 __u32 mc_addr;
210 struct ip_mc_socklist *mc_list; /* Group array */
212 #endif
214 #if defined(CONFIG_PPPOE) || defined (CONFIG_PPPOE_MODULE)
215 struct pppoe_opt
217 struct net_device *dev; /* device associated with socket*/
218 struct pppoe_addr pa; /* what this socket is bound to*/
219 struct sockaddr_pppox relay; /* what socket data will be
220 relayed to (PPPoE relaying) */
223 struct pppox_opt
225 struct ppp_channel chan;
226 struct sock *sk;
227 struct pppox_opt *next; /* for hash table */
228 union {
229 struct pppoe_opt pppoe;
230 } proto;
232 #define pppoe_dev proto.pppoe.dev
233 #define pppoe_pa proto.pppoe.pa
234 #define pppoe_relay proto.pppoe.relay
235 #endif
237 /* This defines a selective acknowledgement block. */
238 struct tcp_sack_block {
239 __u32 start_seq;
240 __u32 end_seq;
243 struct tcp_opt {
244 int tcp_header_len; /* Bytes of tcp header to send */
247 * Header prediction flags
248 * 0x5?10 << 16 + snd_wnd in net byte order
250 __u32 pred_flags;
253 * RFC793 variables by their proper names. This means you can
254 * read the code and the spec side by side (and laugh ...)
255 * See RFC793 and RFC1122. The RFC writes these in capitals.
257 __u32 rcv_nxt; /* What we want to receive next */
258 __u32 snd_nxt; /* Next sequence we send */
260 __u32 snd_una; /* First byte we want an ack for */
261 __u32 snd_sml; /* Last byte of the most recently transmitted small packet */
262 __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
263 __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
265 /* Delayed ACK control data */
266 struct {
267 __u8 pending; /* ACK is pending */
268 __u8 quick; /* Scheduled number of quick acks */
269 __u8 pingpong; /* The session is interactive */
270 __u8 blocked; /* Delayed ACK was blocked by socket lock*/
271 __u32 ato; /* Predicted tick of soft clock */
272 unsigned long timeout; /* Currently scheduled timeout */
273 __u32 lrcvtime; /* timestamp of last received data packet*/
274 __u16 last_seg_size; /* Size of last incoming segment */
275 __u16 rcv_mss; /* MSS used for delayed ACK decisions */
276 } ack;
278 /* Data for direct copy to user */
279 struct {
280 struct sk_buff_head prequeue;
281 int memory;
282 struct task_struct *task;
283 struct iovec *iov;
284 int len;
285 } ucopy;
287 __u32 snd_wl1; /* Sequence for window update */
288 __u32 snd_wnd; /* The window we expect to receive */
289 __u32 max_window; /* Maximal window ever seen from peer */
290 __u32 pmtu_cookie; /* Last pmtu seen by socket */
291 __u16 mss_cache; /* Cached effective mss, not including SACKS */
292 __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
293 __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */
294 __u8 ca_state; /* State of fast-retransmit machine */
295 __u8 retransmits; /* Number of unrecovered RTO timeouts. */
297 __u8 reordering; /* Packet reordering metric. */
298 __u8 queue_shrunk; /* Write queue has been shrunk recently.*/
299 __u8 defer_accept; /* User waits for some data after accept() */
301 /* RTT measurement */
302 __u8 backoff; /* backoff */
303 __u32 srtt; /* smothed round trip time << 3 */
304 __u32 mdev; /* medium deviation */
305 __u32 rto; /* retransmit timeout */
307 __u32 packets_out; /* Packets which are "in flight" */
308 __u32 left_out; /* Packets which leaved network */
309 __u32 retrans_out; /* Retransmitted packets out */
313 * Slow start and congestion control (see also Nagle, and Karn & Partridge)
315 __u32 snd_ssthresh; /* Slow start size threshold */
316 __u32 snd_cwnd; /* Sending congestion window */
317 __u16 snd_cwnd_cnt; /* Linear increase counter */
318 __u16 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
319 __u32 snd_cwnd_used;
320 __u32 snd_cwnd_stamp;
322 /* Two commonly used timers in both sender and receiver paths. */
323 unsigned long timeout;
324 struct timer_list retransmit_timer; /* Resend (no ack) */
325 struct timer_list delack_timer; /* Ack delay */
327 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
329 struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
330 struct sk_buff *send_head; /* Front of stuff to transmit */
332 __u32 rcv_wnd; /* Current receiver window */
333 __u32 rcv_wup; /* rcv_nxt on last window update sent */
334 __u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
335 __u32 pushed_seq; /* Last pushed seq, required to talk to windows */
336 __u32 copied_seq; /* Head of yet unread data */
338 * Options received (usually on last packet, some only on SYN packets).
340 char tstamp_ok, /* TIMESTAMP seen on SYN packet */
341 wscale_ok, /* Wscale seen on SYN packet */
342 sack_ok; /* SACK seen on SYN packet */
343 char saw_tstamp; /* Saw TIMESTAMP on last packet */
344 __u8 snd_wscale; /* Window scaling received from sender */
345 __u8 rcv_wscale; /* Window scaling to send to receiver */
346 __u8 nonagle; /* Disable Nagle algorithm? */
347 __u8 keepalive_probes; /* num of allowed keep alive probes */
349 /* PAWS/RTTM data */
350 __u32 rcv_tsval; /* Time stamp value */
351 __u32 rcv_tsecr; /* Time stamp echo reply */
352 __u32 ts_recent; /* Time stamp to echo next */
353 long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
355 /* SACKs data */
356 __u16 user_mss; /* mss requested by user in ioctl */
357 __u8 dsack; /* D-SACK is scheduled */
358 __u8 eff_sacks; /* Size of SACK array to send with next packet */
359 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
360 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
362 __u32 window_clamp; /* Maximal window to advertise */
363 __u32 rcv_ssthresh; /* Current window clamp */
364 __u8 probes_out; /* unanswered 0 window probes */
365 __u8 num_sacks; /* Number of SACK blocks */
366 __u16 advmss; /* Advertised MSS */
368 __u8 syn_retries; /* num of allowed syn retries */
369 __u8 ecn_flags; /* ECN status bits. */
370 __u16 prior_ssthresh; /* ssthresh saved at recovery start */
371 __u32 lost_out; /* Lost packets */
372 __u32 sacked_out; /* SACK'd packets */
373 __u32 fackets_out; /* FACK'd packets */
374 __u32 high_seq; /* snd_nxt at onset of congestion */
376 __u32 retrans_stamp; /* Timestamp of the last retransmit,
377 * also used in SYN-SENT to remember stamp of
378 * the first SYN. */
379 __u32 undo_marker; /* tracking retrans started here. */
380 int undo_retrans; /* number of undoable retransmissions. */
381 __u32 syn_seq; /* Seq of received SYN. */
382 __u32 fin_seq; /* Seq of received FIN. */
383 __u32 urg_seq; /* Seq of received urgent pointer */
384 __u16 urg_data; /* Saved octet of OOB data and control flags */
385 __u8 pending; /* Scheduled timer event */
386 __u8 urg_mode; /* In urgent mode */
387 __u32 snd_up; /* Urgent pointer */
389 /* The syn_wait_lock is necessary only to avoid tcp_get_info having
390 * to grab the main lock sock while browsing the listening hash
391 * (otherwise it's deadlock prone).
392 * This lock is acquired in read mode only from tcp_get_info() and
393 * it's acquired in write mode _only_ from code that is actively
394 * changing the syn_wait_queue. All readers that are holding
395 * the master sock lock don't need to grab this lock in read mode
396 * too as the syn_wait_queue writes are always protected from
397 * the main sock lock.
399 rwlock_t syn_wait_lock;
400 struct tcp_listen_opt *listen_opt;
402 /* FIFO of established children */
403 struct open_request *accept_queue;
404 struct open_request *accept_queue_tail;
406 int write_pending; /* A write to socket waits to start. */
408 unsigned int keepalive_time; /* time before keep alive takes place */
409 unsigned int keepalive_intvl; /* time interval between keep alive probes */
410 int linger2;
415 * This structure really needs to be cleaned up.
416 * Most of it is for TCP, and not used by any of
417 * the other protocols.
421 * The idea is to start moving to a newer struct gradualy
423 * IMHO the newer struct should have the following format:
425 * struct sock {
426 * sockmem [mem, proto, callbacks]
428 * union or struct {
429 * ax25;
430 * } ll_pinfo;
432 * union {
433 * ipv4;
434 * ipv6;
435 * ipx;
436 * netrom;
437 * rose;
438 * x25;
439 * } net_pinfo;
441 * union {
442 * tcp;
443 * udp;
444 * spx;
445 * netrom;
446 * } tp_pinfo;
450 * The idea failed because IPv6 transition asssumes dual IP/IPv6 sockets.
451 * So, net_pinfo is IPv6 are really, and protinfo unifies all another
452 * private areas.
455 /* Define this to get the sk->debug debugging facility. */
456 #define SOCK_DEBUGGING
457 #ifdef SOCK_DEBUGGING
458 #define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG ## msg); } while (0)
459 #else
460 #define SOCK_DEBUG(sk, msg...) do { } while (0)
461 #endif
463 /* This is the per-socket lock. The spinlock provides a synchronization
464 * between user contexts and software interrupt processing, whereas the
465 * mini-semaphore synchronizes multiple users amongst themselves.
467 typedef struct {
468 spinlock_t slock;
469 unsigned int users;
470 wait_queue_head_t wq;
471 } socket_lock_t;
473 #define sock_lock_init(__sk) \
474 do { spin_lock_init(&((__sk)->lock.slock)); \
475 (__sk)->lock.users = 0; \
476 init_waitqueue_head(&((__sk)->lock.wq)); \
477 } while(0);
479 struct sock {
480 /* Socket demultiplex comparisons on incoming packets. */
481 __u32 daddr; /* Foreign IPv4 addr */
482 __u32 rcv_saddr; /* Bound local IPv4 addr */
483 __u16 dport; /* Destination port */
484 unsigned short num; /* Local port */
485 int bound_dev_if; /* Bound device index if != 0 */
487 /* Main hash linkage for various protocol lookup tables. */
488 struct sock *next;
489 struct sock **pprev;
490 struct sock *bind_next;
491 struct sock **bind_pprev;
493 volatile unsigned char state, /* Connection state */
494 zapped; /* In ax25 & ipx means not linked */
495 __u16 sport; /* Source port */
497 unsigned short family; /* Address family */
498 unsigned char reuse; /* SO_REUSEADDR setting */
499 unsigned char shutdown;
500 atomic_t refcnt; /* Reference count */
502 socket_lock_t lock; /* Synchronizer... */
503 int rcvbuf; /* Size of receive buffer in bytes */
505 wait_queue_head_t *sleep; /* Sock wait queue */
506 struct dst_entry *dst_cache; /* Destination cache */
507 rwlock_t dst_lock;
508 atomic_t rmem_alloc; /* Receive queue bytes committed */
509 struct sk_buff_head receive_queue; /* Incoming packets */
510 atomic_t wmem_alloc; /* Transmit queue bytes committed */
511 struct sk_buff_head write_queue; /* Packet sending queue */
512 atomic_t omem_alloc; /* "o" is "option" or "other" */
513 int wmem_queued; /* Persistent queue size */
514 int forward_alloc; /* Space allocated forward. */
515 __u32 saddr; /* Sending source */
516 unsigned int allocation; /* Allocation mode */
517 int sndbuf; /* Size of send buffer in bytes */
518 struct sock *prev;
520 /* Not all are volatile, but some are, so we might as well say they all are.
521 * XXX Make this a flag word -DaveM
523 volatile char dead,
524 done,
525 urginline,
526 keepopen,
527 linger,
528 destroy,
529 no_check,
530 broadcast,
531 bsdism;
532 unsigned char debug;
533 unsigned char rcvtstamp;
534 unsigned char userlocks;
535 int proc;
536 unsigned long lingertime;
538 int hashent;
539 struct sock *pair;
541 /* The backlog queue is special, it is always used with
542 * the per-socket spinlock held and requires low latency
543 * access. Therefore we special case it's implementation.
545 struct {
546 struct sk_buff *head;
547 struct sk_buff *tail;
548 } backlog;
550 rwlock_t callback_lock;
552 /* Error queue, rarely used. */
553 struct sk_buff_head error_queue;
555 struct proto *prot;
557 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
558 union {
559 struct ipv6_pinfo af_inet6;
560 } net_pinfo;
561 #endif
563 union {
564 struct tcp_opt af_tcp;
565 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
566 struct raw_opt tp_raw4;
567 #endif
568 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
569 struct raw6_opt tp_raw;
570 #endif /* CONFIG_IPV6 */
571 #if defined(CONFIG_SPX) || defined (CONFIG_SPX_MODULE)
572 struct spx_opt af_spx;
573 #endif /* CONFIG_SPX */
575 } tp_pinfo;
577 int err, err_soft; /* Soft holds errors that don't
578 cause failure but are the cause
579 of a persistent failure not just
580 'timed out' */
581 unsigned short ack_backlog;
582 unsigned short max_ack_backlog;
583 __u32 priority;
584 unsigned short type;
585 unsigned char localroute; /* Route locally only */
586 unsigned char protocol;
587 struct ucred peercred;
588 int rcvlowat;
589 long rcvtimeo;
590 long sndtimeo;
592 #ifdef CONFIG_FILTER
593 /* Socket Filtering Instructions */
594 struct sk_filter *filter;
595 #endif /* CONFIG_FILTER */
597 /* This is where all the private (optional) areas that don't
598 * overlap will eventually live.
600 union {
601 void *destruct_hook;
602 struct unix_opt af_unix;
603 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
604 struct inet_opt af_inet;
605 #endif
606 #if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
607 struct atalk_sock af_at;
608 #endif
609 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
610 struct ipx_opt af_ipx;
611 #endif
612 #if defined (CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
613 struct dn_scp dn;
614 #endif
615 #if defined (CONFIG_PACKET) || defined(CONFIG_PACKET_MODULE)
616 struct packet_opt *af_packet;
617 #endif
618 #if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
619 x25_cb *x25;
620 #endif
621 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
622 ax25_cb *ax25;
623 #endif
624 #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
625 nr_cb *nr;
626 #endif
627 #if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
628 rose_cb *rose;
629 #endif
630 #if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE)
631 struct pppox_opt *pppox;
632 #endif
633 #ifdef CONFIG_NETLINK
634 struct netlink_opt *af_netlink;
635 #endif
636 #if defined(CONFIG_ECONET) || defined(CONFIG_ECONET_MODULE)
637 struct econet_opt *af_econet;
638 #endif
639 #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
640 struct atm_vcc *af_atm;
641 #endif
642 #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
643 struct irda_sock *irda;
644 #endif
645 } protinfo;
648 /* This part is used for the timeout functions. */
649 struct timer_list timer; /* This is the sock cleanup timer. */
650 struct timeval stamp;
652 /* Identd and reporting IO signals */
653 struct socket *socket;
655 /* RPC layer private data */
656 void *user_data;
658 /* Callbacks */
659 void (*state_change)(struct sock *sk);
660 void (*data_ready)(struct sock *sk,int bytes);
661 void (*write_space)(struct sock *sk);
662 void (*error_report)(struct sock *sk);
664 int (*backlog_rcv) (struct sock *sk,
665 struct sk_buff *skb);
666 void (*destruct)(struct sock *sk);
669 /* The per-socket spinlock must be held here. */
670 #define sk_add_backlog(__sk, __skb) \
671 do { if((__sk)->backlog.tail == NULL) { \
672 (__sk)->backlog.head = \
673 (__sk)->backlog.tail = (__skb); \
674 } else { \
675 ((__sk)->backlog.tail)->next = (__skb); \
676 (__sk)->backlog.tail = (__skb); \
678 (__skb)->next = NULL; \
679 } while(0)
681 /* IP protocol blocks we attach to sockets.
682 * socket layer -> transport layer interface
683 * transport -> network interface is defined by struct inet_proto
685 struct proto {
686 void (*close)(struct sock *sk,
687 long timeout);
688 int (*connect)(struct sock *sk,
689 struct sockaddr *uaddr,
690 int addr_len);
691 int (*disconnect)(struct sock *sk, int flags);
693 struct sock * (*accept) (struct sock *sk, int flags, int *err);
695 int (*ioctl)(struct sock *sk, int cmd,
696 unsigned long arg);
697 int (*init)(struct sock *sk);
698 int (*destroy)(struct sock *sk);
699 void (*shutdown)(struct sock *sk, int how);
700 int (*setsockopt)(struct sock *sk, int level,
701 int optname, char *optval, int optlen);
702 int (*getsockopt)(struct sock *sk, int level,
703 int optname, char *optval,
704 int *option);
705 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
706 int len);
707 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
708 int len, int noblock, int flags,
709 int *addr_len);
710 int (*bind)(struct sock *sk,
711 struct sockaddr *uaddr, int addr_len);
713 int (*backlog_rcv) (struct sock *sk,
714 struct sk_buff *skb);
716 /* Keeping track of sk's, looking them up, and port selection methods. */
717 void (*hash)(struct sock *sk);
718 void (*unhash)(struct sock *sk);
719 int (*get_port)(struct sock *sk, unsigned short snum);
721 char name[32];
723 struct {
724 int inuse;
725 u8 __pad[SMP_CACHE_BYTES - sizeof(int)];
726 } stats[NR_CPUS];
729 /* Called with local bh disabled */
730 static void __inline__ sock_prot_inc_use(struct proto *prot)
732 prot->stats[smp_processor_id()].inuse++;
735 static void __inline__ sock_prot_dec_use(struct proto *prot)
737 prot->stats[smp_processor_id()].inuse--;
740 /* About 10 seconds */
741 #define SOCK_DESTROY_TIME (10*HZ)
743 /* Sockets 0-1023 can't be bound to unless you are superuser */
744 #define PROT_SOCK 1024
746 #define SHUTDOWN_MASK 3
747 #define RCV_SHUTDOWN 1
748 #define SEND_SHUTDOWN 2
750 #define SOCK_SNDBUF_LOCK 1
751 #define SOCK_RCVBUF_LOCK 2
752 #define SOCK_BINDADDR_LOCK 4
753 #define SOCK_BINDPORT_LOCK 8
756 /* Used by processes to "lock" a socket state, so that
757 * interrupts and bottom half handlers won't change it
758 * from under us. It essentially blocks any incoming
759 * packets, so that we won't get any new data or any
760 * packets that change the state of the socket.
762 * While locked, BH processing will add new packets to
763 * the backlog queue. This queue is processed by the
764 * owner of the socket lock right before it is released.
766 * Since ~2.3.5 it is also exclusive sleep lock serializing
767 * accesses from user process context.
769 extern void __lock_sock(struct sock *sk);
770 extern void __release_sock(struct sock *sk);
771 #define lock_sock(__sk) \
772 do { spin_lock_bh(&((__sk)->lock.slock)); \
773 if ((__sk)->lock.users != 0) \
774 __lock_sock(__sk); \
775 (__sk)->lock.users = 1; \
776 spin_unlock_bh(&((__sk)->lock.slock)); \
777 } while(0)
779 #define release_sock(__sk) \
780 do { spin_lock_bh(&((__sk)->lock.slock)); \
781 if ((__sk)->backlog.tail != NULL) \
782 __release_sock(__sk); \
783 (__sk)->lock.users = 0; \
784 if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \
785 spin_unlock_bh(&((__sk)->lock.slock)); \
786 } while(0)
788 /* BH context may only use the following locking interface. */
789 #define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock))
790 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock))
793 * This might not be the most appropriate place for this two
794 * but since they are used by a lot of the net related code
795 * at least they get declared on a include that is common to all
798 static __inline__ int min(unsigned int a, unsigned int b)
800 if (a > b)
801 a = b;
802 return a;
805 static __inline__ int max(unsigned int a, unsigned int b)
807 if (a < b)
808 a = b;
809 return a;
812 extern struct sock * sk_alloc(int family, int priority, int zero_it);
813 extern void sk_free(struct sock *sk);
815 extern struct sk_buff *sock_wmalloc(struct sock *sk,
816 unsigned long size, int force,
817 int priority);
818 extern struct sk_buff *sock_rmalloc(struct sock *sk,
819 unsigned long size, int force,
820 int priority);
821 extern void sock_wfree(struct sk_buff *skb);
822 extern void sock_rfree(struct sk_buff *skb);
824 extern int sock_setsockopt(struct socket *sock, int level,
825 int op, char *optval,
826 int optlen);
828 extern int sock_getsockopt(struct socket *sock, int level,
829 int op, char *optval,
830 int *optlen);
831 extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
832 unsigned long size,
833 unsigned long fallback,
834 int noblock,
835 int *errcode);
836 extern void *sock_kmalloc(struct sock *sk, int size, int priority);
837 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
839 extern int copy_and_csum_toiovec(struct iovec *iov, struct sk_buff *skb, int hlen);
842 * Functions to fill in entries in struct proto_ops when a protocol
843 * does not implement a particular function.
845 extern int sock_no_release(struct socket *);
846 extern int sock_no_bind(struct socket *,
847 struct sockaddr *, int);
848 extern int sock_no_connect(struct socket *,
849 struct sockaddr *, int, int);
850 extern int sock_no_socketpair(struct socket *,
851 struct socket *);
852 extern int sock_no_accept(struct socket *,
853 struct socket *, int);
854 extern int sock_no_getname(struct socket *,
855 struct sockaddr *, int *, int);
856 extern unsigned int sock_no_poll(struct file *, struct socket *,
857 struct poll_table_struct *);
858 extern int sock_no_ioctl(struct socket *, unsigned int,
859 unsigned long);
860 extern int sock_no_listen(struct socket *, int);
861 extern int sock_no_shutdown(struct socket *, int);
862 extern int sock_no_getsockopt(struct socket *, int , int,
863 char *, int *);
864 extern int sock_no_setsockopt(struct socket *, int, int,
865 char *, int);
866 extern int sock_no_fcntl(struct socket *,
867 unsigned int, unsigned long);
868 extern int sock_no_sendmsg(struct socket *,
869 struct msghdr *, int,
870 struct scm_cookie *);
871 extern int sock_no_recvmsg(struct socket *,
872 struct msghdr *, int, int,
873 struct scm_cookie *);
874 extern int sock_no_mmap(struct file *file,
875 struct socket *sock,
876 struct vm_area_struct *vma);
879 * Default socket callbacks and setup code
882 extern void sock_def_destruct(struct sock *);
884 /* Initialise core socket variables */
885 extern void sock_init_data(struct socket *sock, struct sock *sk);
887 extern void sklist_remove_socket(struct sock **list, struct sock *sk);
888 extern void sklist_insert_socket(struct sock **list, struct sock *sk);
889 extern void sklist_destroy_socket(struct sock **list, struct sock *sk);
891 #ifdef CONFIG_FILTER
894 * sk_filter - run a packet through a socket filter
895 * @skb: buffer to filter
896 * @filter: filter to apply
898 * Run the filter code and then cut skb->data to correct size returned by
899 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
900 * than pkt_len we keep whole skb->data. This is the socket level
901 * wrapper to sk_run_filter. It returns 0 if the packet should
902 * be accepted or 1 if the packet should be tossed.
905 static inline int sk_filter(struct sk_buff *skb, struct sk_filter *filter)
907 int pkt_len;
909 pkt_len = sk_run_filter(skb, filter->insns, filter->len);
910 if(!pkt_len)
911 return 1; /* Toss Packet */
912 else
913 skb_trim(skb, pkt_len);
915 return 0;
919 * sk_filter_release: Release a socket filter
920 * @sk: socket
921 * @fp: filter to remove
923 * Remove a filter from a socket and release its resources.
926 static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
928 unsigned int size = sk_filter_len(fp);
930 atomic_sub(size, &sk->omem_alloc);
932 if (atomic_dec_and_test(&fp->refcnt))
933 kfree(fp);
936 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
938 atomic_inc(&fp->refcnt);
939 atomic_add(sk_filter_len(fp), &sk->omem_alloc);
942 #endif /* CONFIG_FILTER */
945 * Socket reference counting postulates.
947 * * Each user of socket SHOULD hold a reference count.
948 * * Each access point to socket (an hash table bucket, reference from a list,
949 * running timer, skb in flight MUST hold a reference count.
950 * * When reference count hits 0, it means it will never increase back.
951 * * When reference count hits 0, it means that no references from
952 * outside exist to this socket and current process on current CPU
953 * is last user and may/should destroy this socket.
954 * * sk_free is called from any context: process, BH, IRQ. When
955 * it is called, socket has no references from outside -> sk_free
956 * may release descendant resources allocated by the socket, but
957 * to the time when it is called, socket is NOT referenced by any
958 * hash tables, lists etc.
959 * * Packets, delivered from outside (from network or from another process)
960 * and enqueued on receive/error queues SHOULD NOT grab reference count,
961 * when they sit in queue. Otherwise, packets will leak to hole, when
962 * socket is looked up by one cpu and unhasing is made by another CPU.
963 * It is true for udp/raw, netlink (leak to receive and error queues), tcp
964 * (leak to backlog). Packet socket does all the processing inside
965 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
966 * use separate SMP lock, so that they are prone too.
969 /* Grab socket reference count. This operation is valid only
970 when sk is ALREADY grabbed f.e. it is found in hash table
971 or a list and the lookup is made under lock preventing hash table
972 modifications.
975 static inline void sock_hold(struct sock *sk)
977 atomic_inc(&sk->refcnt);
980 /* Ungrab socket in the context, which assumes that socket refcnt
981 cannot hit zero, f.e. it is true in context of any socketcall.
983 static inline void __sock_put(struct sock *sk)
985 atomic_dec(&sk->refcnt);
988 /* Ungrab socket and destroy it, if it was the last reference. */
989 static inline void sock_put(struct sock *sk)
991 if (atomic_dec_and_test(&sk->refcnt))
992 sk_free(sk);
995 /* Detach socket from process context.
996 * Announce socket dead, detach it from wait queue and inode.
997 * Note that parent inode held reference count on this struct sock,
998 * we do not release it in this function, because protocol
999 * probably wants some additional cleanups or even continuing
1000 * to work with this socket (TCP).
1002 static inline void sock_orphan(struct sock *sk)
1004 write_lock_bh(&sk->callback_lock);
1005 sk->dead = 1;
1006 sk->socket = NULL;
1007 sk->sleep = NULL;
1008 write_unlock_bh(&sk->callback_lock);
1011 static inline void sock_graft(struct sock *sk, struct socket *parent)
1013 write_lock_bh(&sk->callback_lock);
1014 sk->sleep = &parent->wait;
1015 parent->sk = sk;
1016 sk->socket = parent;
1017 write_unlock_bh(&sk->callback_lock);
1020 static inline int sock_i_uid(struct sock *sk)
1022 int uid;
1024 read_lock(&sk->callback_lock);
1025 uid = sk->socket ? sk->socket->inode->i_uid : 0;
1026 read_unlock(&sk->callback_lock);
1027 return uid;
1030 static inline unsigned long sock_i_ino(struct sock *sk)
1032 unsigned long ino;
1034 read_lock(&sk->callback_lock);
1035 ino = sk->socket ? sk->socket->inode->i_ino : 0;
1036 read_unlock(&sk->callback_lock);
1037 return ino;
1040 static inline struct dst_entry *
1041 __sk_dst_get(struct sock *sk)
1043 return sk->dst_cache;
1046 static inline struct dst_entry *
1047 sk_dst_get(struct sock *sk)
1049 struct dst_entry *dst;
1051 read_lock(&sk->dst_lock);
1052 dst = sk->dst_cache;
1053 if (dst)
1054 dst_hold(dst);
1055 read_unlock(&sk->dst_lock);
1056 return dst;
1059 static inline void
1060 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1062 struct dst_entry *old_dst;
1064 old_dst = sk->dst_cache;
1065 sk->dst_cache = dst;
1066 dst_release(old_dst);
1069 static inline void
1070 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1072 write_lock(&sk->dst_lock);
1073 __sk_dst_set(sk, dst);
1074 write_unlock(&sk->dst_lock);
1077 static inline void
1078 __sk_dst_reset(struct sock *sk)
1080 struct dst_entry *old_dst;
1082 old_dst = sk->dst_cache;
1083 sk->dst_cache = NULL;
1084 dst_release(old_dst);
1087 static inline void
1088 sk_dst_reset(struct sock *sk)
1090 write_lock(&sk->dst_lock);
1091 __sk_dst_reset(sk);
1092 write_unlock(&sk->dst_lock);
1095 static inline struct dst_entry *
1096 __sk_dst_check(struct sock *sk, u32 cookie)
1098 struct dst_entry *dst = sk->dst_cache;
1100 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
1101 sk->dst_cache = NULL;
1102 return NULL;
1105 return dst;
1108 static inline struct dst_entry *
1109 sk_dst_check(struct sock *sk, u32 cookie)
1111 struct dst_entry *dst = sk_dst_get(sk);
1113 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
1114 sk_dst_reset(sk);
1115 return NULL;
1118 return dst;
1123 * Queue a received datagram if it will fit. Stream and sequenced
1124 * protocols can't normally use this as they need to fit buffers in
1125 * and play with them.
1127 * Inlined as it's very short and called for pretty much every
1128 * packet ever received.
1131 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1133 sock_hold(sk);
1134 skb->sk = sk;
1135 skb->destructor = sock_wfree;
1136 atomic_add(skb->truesize, &sk->wmem_alloc);
1139 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1141 skb->sk = sk;
1142 skb->destructor = sock_rfree;
1143 atomic_add(skb->truesize, &sk->rmem_alloc);
1146 static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1148 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1149 number of warnings when compiling with -W --ANK
1151 if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
1152 return -ENOMEM;
1154 #ifdef CONFIG_FILTER
1155 if (sk->filter) {
1156 int err = 0;
1157 struct sk_filter *filter;
1159 /* It would be deadlock, if sock_queue_rcv_skb is used
1160 with socket lock! We assume that users of this
1161 function are lock free.
1163 bh_lock_sock(sk);
1164 if ((filter = sk->filter) != NULL && sk_filter(skb, filter))
1165 err = -EPERM;
1166 bh_unlock_sock(sk);
1167 if (err)
1168 return err; /* Toss packet */
1170 #endif /* CONFIG_FILTER */
1172 skb_set_owner_r(skb, sk);
1173 skb_queue_tail(&sk->receive_queue, skb);
1174 if (!sk->dead)
1175 sk->data_ready(sk,skb->len);
1176 return 0;
1179 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1181 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1182 number of warnings when compiling with -W --ANK
1184 if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
1185 return -ENOMEM;
1186 skb_set_owner_r(skb, sk);
1187 skb_queue_tail(&sk->error_queue,skb);
1188 if (!sk->dead)
1189 sk->data_ready(sk,skb->len);
1190 return 0;
1194 * Recover an error report and clear atomically
1197 static inline int sock_error(struct sock *sk)
1199 int err=xchg(&sk->err,0);
1200 return -err;
1203 static inline unsigned long sock_wspace(struct sock *sk)
1205 int amt = 0;
1207 if (!(sk->shutdown & SEND_SHUTDOWN)) {
1208 amt = sk->sndbuf - atomic_read(&sk->wmem_alloc);
1209 if (amt < 0)
1210 amt = 0;
1212 return amt;
1215 static inline void sk_wake_async(struct sock *sk, int how, int band)
1217 if (sk->socket && sk->socket->fasync_list)
1218 sock_wake_async(sk->socket, how, band);
1221 #define SOCK_MIN_SNDBUF 2048
1222 #define SOCK_MIN_RCVBUF 256
1223 /* Must be less or equal SOCK_MIN_SNDBUF */
1224 #define SOCK_MIN_WRITE_SPACE SOCK_MIN_SNDBUF
1227 * Default write policy as shown to user space via poll/select/SIGIO
1228 * Kernel internally doesn't use the MIN_WRITE_SPACE threshold.
1230 static inline int sock_writeable(struct sock *sk)
1232 return sock_wspace(sk) >= SOCK_MIN_WRITE_SPACE;
1235 static inline int gfp_any(void)
1237 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1240 static inline long sock_rcvtimeo(struct sock *sk, int noblock)
1242 return noblock ? 0 : sk->rcvtimeo;
1245 static inline long sock_sndtimeo(struct sock *sk, int noblock)
1247 return noblock ? 0 : sk->sndtimeo;
1250 static inline int sock_rcvlowat(struct sock *sk, int waitall, int len)
1252 return (waitall ? len : min(sk->rcvlowat, len)) ? : 1;
1255 /* Alas, with timeout socket operations are not restartable.
1256 * Compare this to poll().
1258 static inline int sock_intr_errno(long timeo)
1260 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1263 static __inline__ void
1264 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1266 if (sk->rcvtstamp)
1267 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(skb->stamp), &skb->stamp);
1268 else
1269 sk->stamp = skb->stamp;
1273 * Enable debug/info messages
1276 #if 0
1277 #define NETDEBUG(x) do { } while (0)
1278 #else
1279 #define NETDEBUG(x) do { x; } while (0)
1280 #endif
1283 * Macros for sleeping on a socket. Use them like this:
1285 * SOCK_SLEEP_PRE(sk)
1286 * if (condition)
1287 * schedule();
1288 * SOCK_SLEEP_POST(sk)
1292 #define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
1293 DECLARE_WAITQUEUE(wait, tsk); \
1294 tsk->state = TASK_INTERRUPTIBLE; \
1295 add_wait_queue((sk)->sleep, &wait); \
1296 release_sock(sk);
1298 #define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
1299 remove_wait_queue((sk)->sleep, &wait); \
1300 lock_sock(sk); \
1303 extern __u32 sysctl_wmem_max;
1304 extern __u32 sysctl_rmem_max;
1306 #endif /* _SOCK_H */