2 * Definitions for the 'struct sk_buff' memory handlers.
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
17 #include <linux/config.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/time.h>
22 #include <asm/atomic.h>
23 #include <asm/types.h>
24 #include <linux/spinlock.h>
26 #define HAVE_ALLOC_SKB /* For the drivers to know */
27 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
28 #define SLAB_SKB /* Slabified skbuffs */
30 #define CHECKSUM_NONE 0
32 #define CHECKSUM_UNNECESSARY 2
35 #define NET_CALLER(arg) (*(((void**)&arg)-1))
37 #define NET_CALLER(arg) __builtin_return_address(0)
41 /* These two members must be first. */
42 struct sk_buff
* next
;
43 struct sk_buff
* prev
;
50 /* These two members must be first. */
51 struct sk_buff
* next
; /* Next buffer in list */
52 struct sk_buff
* prev
; /* Previous buffer in list */
54 struct sk_buff_head
* list
; /* List we are on */
55 struct sock
*sk
; /* Socket we are owned by */
56 struct timeval stamp
; /* Time we arrived */
57 struct net_device
*dev
; /* Device we arrived on/are leaving by */
58 struct net_device
*rx_dev
;
60 /* Transport layer header */
65 struct icmphdr
*icmph
;
66 struct igmphdr
*igmph
;
72 /* Network layer header */
76 struct ipv6hdr
*ipv6h
;
82 /* Link layer header */
85 struct ethhdr
*ethernet
;
89 struct dst_entry
*dst
;
93 unsigned int len
; /* Length of actual data */
94 unsigned int csum
; /* Checksum */
95 volatile char used
; /* Data moved to user and not MSG_PEEK */
96 unsigned char is_clone
, /* We are a clone */
97 cloned
, /* head may be cloned (check refcnt to be sure). */
98 pkt_type
, /* Packet class */
99 pkt_bridged
, /* Tracker for bridging */
100 ip_summed
; /* Driver fed us an IP checksum */
101 __u32 priority
; /* Packet queueing priority */
102 atomic_t users
; /* User count - see datagram.c,tcp.c */
103 unsigned short protocol
; /* Packet protocol from driver. */
104 unsigned short security
; /* Security level of packet */
105 unsigned int truesize
; /* Buffer size */
107 unsigned char *head
; /* Head of buffer */
108 unsigned char *data
; /* Data head pointer */
109 unsigned char *tail
; /* Tail pointer */
110 unsigned char *end
; /* End pointer */
111 void (*destructor
)(struct sk_buff
*); /* Destruct function */
112 #ifdef CONFIG_NETFILTER
113 /* Can be used for communication between hooks. */
114 unsigned long nfmark
;
115 /* Reason for doing this to the packet (see netfilter.h) */
119 #ifdef CONFIG_NETFILTER_DEBUG
120 unsigned int nf_debug
;
122 #endif /*CONFIG_NETFILTER*/
123 #if defined(CONFIG_SHAPER) || defined(CONFIG_SHAPER_MODULE)
124 __u32 shapelatency
; /* Latency on frame */
125 __u32 shapeclock
; /* Time it should go out */
126 __u32 shapelen
; /* Frame length in clocks */
127 __u32 shapestamp
; /* Stamp for shaper */
128 __u16 shapepend
; /* Pending */
131 #if defined(CONFIG_HIPPI)
138 /* These are just the default values. This is run time configurable.
139 * FIXME: Probably the config option should go away. -- erics
141 #ifdef CONFIG_SKB_LARGE
142 #define SK_WMEM_MAX 65535
143 #define SK_RMEM_MAX 65535
145 #define SK_WMEM_MAX 32767
146 #define SK_RMEM_MAX 32767
151 * Handling routines are only of interest to the kernel
153 #include <linux/malloc.h>
155 #include <asm/system.h>
157 extern void __kfree_skb(struct sk_buff
*skb
);
158 extern void skb_queue_head_init(struct sk_buff_head
*list
);
159 extern void skb_queue_head(struct sk_buff_head
*list
,struct sk_buff
*buf
);
160 extern void skb_queue_tail(struct sk_buff_head
*list
,struct sk_buff
*buf
);
161 extern struct sk_buff
* skb_dequeue(struct sk_buff_head
*list
);
162 extern void skb_insert(struct sk_buff
*old
,struct sk_buff
*newsk
);
163 extern void skb_append(struct sk_buff
*old
,struct sk_buff
*newsk
);
164 extern void skb_unlink(struct sk_buff
*buf
);
165 extern __u32
skb_queue_len(struct sk_buff_head
*list
);
166 extern struct sk_buff
* skb_peek_copy(struct sk_buff_head
*list
);
167 extern struct sk_buff
* alloc_skb(unsigned int size
, int priority
);
168 extern struct sk_buff
* dev_alloc_skb(unsigned int size
);
169 extern void kfree_skbmem(struct sk_buff
*skb
);
170 extern struct sk_buff
* skb_clone(struct sk_buff
*skb
, int priority
);
171 extern struct sk_buff
* skb_copy(struct sk_buff
*skb
, int priority
);
172 extern struct sk_buff
* skb_realloc_headroom(struct sk_buff
*skb
, int newheadroom
);
173 #define dev_kfree_skb(a) kfree_skb(a)
174 extern unsigned char * skb_put(struct sk_buff
*skb
, unsigned int len
);
175 extern unsigned char * skb_push(struct sk_buff
*skb
, unsigned int len
);
176 extern unsigned char * skb_pull(struct sk_buff
*skb
, unsigned int len
);
177 extern int skb_headroom(struct sk_buff
*skb
);
178 extern int skb_tailroom(struct sk_buff
*skb
);
179 extern void skb_reserve(struct sk_buff
*skb
, unsigned int len
);
180 extern void skb_trim(struct sk_buff
*skb
, unsigned int len
);
181 extern void skb_over_panic(struct sk_buff
*skb
, int len
, void *here
);
182 extern void skb_under_panic(struct sk_buff
*skb
, int len
, void *here
);
185 extern __inline__ atomic_t
*skb_datarefp(struct sk_buff
*skb
)
187 return (atomic_t
*)(skb
->end
);
190 extern __inline__
int skb_queue_empty(struct sk_buff_head
*list
)
192 return (list
->next
== (struct sk_buff
*) list
);
195 extern __inline__
void kfree_skb(struct sk_buff
*skb
)
197 if (atomic_dec_and_test(&skb
->users
))
201 /* Use this if you didn't touch the skb state [for fast switching] */
202 extern __inline__
void kfree_skb_fast(struct sk_buff
*skb
)
204 if (atomic_dec_and_test(&skb
->users
))
208 extern __inline__
int skb_cloned(struct sk_buff
*skb
)
210 return skb
->cloned
&& atomic_read(skb_datarefp(skb
)) != 1;
213 extern __inline__
int skb_shared(struct sk_buff
*skb
)
215 return (atomic_read(&skb
->users
) != 1);
218 extern __inline__
struct sk_buff
*skb_share_check(struct sk_buff
*skb
, int pri
)
220 if (skb_shared(skb
)) {
221 struct sk_buff
*nskb
;
222 nskb
= skb_clone(skb
, pri
);
231 * Copy shared buffers into a new sk_buff. We effectively do COW on
232 * packets to handle cases where we have a local reader and forward
233 * and a couple of other messy ones. The normal one is tcpdumping
234 * a packet thats being forwarded.
237 extern __inline__
struct sk_buff
*skb_unshare(struct sk_buff
*skb
, int pri
)
239 struct sk_buff
*nskb
;
242 nskb
=skb_copy(skb
, pri
);
243 kfree_skb(skb
); /* Free our shared copy */
248 * Peek an sk_buff. Unlike most other operations you _MUST_
249 * be careful with this one. A peek leaves the buffer on the
250 * list and someone else may run off with it. For an interrupt
251 * type system cli() peek the buffer copy the data and sti();
254 extern __inline__
struct sk_buff
*skb_peek(struct sk_buff_head
*list_
)
256 struct sk_buff
*list
= ((struct sk_buff
*)list_
)->next
;
257 if (list
== (struct sk_buff
*)list_
)
262 extern __inline__
struct sk_buff
*skb_peek_tail(struct sk_buff_head
*list_
)
264 struct sk_buff
*list
= ((struct sk_buff
*)list_
)->prev
;
265 if (list
== (struct sk_buff
*)list_
)
271 * Return the length of an sk_buff queue
274 extern __inline__ __u32
skb_queue_len(struct sk_buff_head
*list_
)
279 extern __inline__
void skb_queue_head_init(struct sk_buff_head
*list
)
281 spin_lock_init(&list
->lock
);
282 list
->prev
= (struct sk_buff
*)list
;
283 list
->next
= (struct sk_buff
*)list
;
288 * Insert an sk_buff at the start of a list.
290 * The "__skb_xxxx()" functions are the non-atomic ones that
291 * can only be called with interrupts disabled.
294 extern __inline__
void __skb_queue_head(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
296 struct sk_buff
*prev
, *next
;
300 prev
= (struct sk_buff
*)list
;
308 extern __inline__
void skb_queue_head(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
312 spin_lock_irqsave(&list
->lock
, flags
);
313 __skb_queue_head(list
, newsk
);
314 spin_unlock_irqrestore(&list
->lock
, flags
);
318 * Insert an sk_buff at the end of a list.
321 extern __inline__
void __skb_queue_tail(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
323 struct sk_buff
*prev
, *next
;
327 next
= (struct sk_buff
*)list
;
335 extern __inline__
void skb_queue_tail(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
339 spin_lock_irqsave(&list
->lock
, flags
);
340 __skb_queue_tail(list
, newsk
);
341 spin_unlock_irqrestore(&list
->lock
, flags
);
345 * Remove an sk_buff from a list.
348 extern __inline__
struct sk_buff
*__skb_dequeue(struct sk_buff_head
*list
)
350 struct sk_buff
*next
, *prev
, *result
;
352 prev
= (struct sk_buff
*) list
;
368 extern __inline__
struct sk_buff
*skb_dequeue(struct sk_buff_head
*list
)
371 struct sk_buff
*result
;
373 spin_lock_irqsave(&list
->lock
, flags
);
374 result
= __skb_dequeue(list
);
375 spin_unlock_irqrestore(&list
->lock
, flags
);
380 * Insert a packet on a list.
383 extern __inline__
void __skb_insert(struct sk_buff
*newsk
,
384 struct sk_buff
* prev
, struct sk_buff
*next
,
385 struct sk_buff_head
* list
)
396 * Place a packet before a given packet in a list
398 extern __inline__
void skb_insert(struct sk_buff
*old
, struct sk_buff
*newsk
)
402 spin_lock_irqsave(&old
->list
->lock
, flags
);
403 __skb_insert(newsk
, old
->prev
, old
, old
->list
);
404 spin_unlock_irqrestore(&old
->list
->lock
, flags
);
408 * Place a packet after a given packet in a list.
411 extern __inline__
void __skb_append(struct sk_buff
*old
, struct sk_buff
*newsk
)
413 __skb_insert(newsk
, old
, old
->next
, old
->list
);
416 extern __inline__
void skb_append(struct sk_buff
*old
, struct sk_buff
*newsk
)
420 spin_lock_irqsave(&old
->list
->lock
, flags
);
421 __skb_append(old
, newsk
);
422 spin_unlock_irqrestore(&old
->list
->lock
, flags
);
426 * remove sk_buff from list. _Must_ be called atomically, and with
429 extern __inline__
void __skb_unlink(struct sk_buff
*skb
, struct sk_buff_head
*list
)
431 struct sk_buff
* next
, * prev
;
444 * Remove an sk_buff from its list. Works even without knowing the list it
445 * is sitting on, which can be handy at times. It also means that THE LIST
446 * MUST EXIST when you unlink. Thus a list must have its contents unlinked
450 extern __inline__
void skb_unlink(struct sk_buff
*skb
)
452 struct sk_buff_head
*list
= skb
->list
;
457 spin_lock_irqsave(&list
->lock
, flags
);
458 if(skb
->list
== list
)
459 __skb_unlink(skb
, skb
->list
);
460 spin_unlock_irqrestore(&list
->lock
, flags
);
464 /* XXX: more streamlined implementation */
465 extern __inline__
struct sk_buff
*__skb_dequeue_tail(struct sk_buff_head
*list
)
467 struct sk_buff
*skb
= skb_peek_tail(list
);
469 __skb_unlink(skb
, list
);
473 extern __inline__
struct sk_buff
*skb_dequeue_tail(struct sk_buff_head
*list
)
476 struct sk_buff
*result
;
478 spin_lock_irqsave(&list
->lock
, flags
);
479 result
= __skb_dequeue_tail(list
);
480 spin_unlock_irqrestore(&list
->lock
, flags
);
485 * Add data to an sk_buff
488 extern __inline__
unsigned char *__skb_put(struct sk_buff
*skb
, unsigned int len
)
490 unsigned char *tmp
=skb
->tail
;
496 extern __inline__
unsigned char *skb_put(struct sk_buff
*skb
, unsigned int len
)
498 unsigned char *tmp
=skb
->tail
;
501 if(skb
->tail
>skb
->end
) {
502 skb_over_panic(skb
, len
, current_text_addr());
507 extern __inline__
unsigned char *__skb_push(struct sk_buff
*skb
, unsigned int len
)
514 extern __inline__
unsigned char *skb_push(struct sk_buff
*skb
, unsigned int len
)
518 if(skb
->data
<skb
->head
) {
519 skb_under_panic(skb
, len
, current_text_addr());
524 extern __inline__
char *__skb_pull(struct sk_buff
*skb
, unsigned int len
)
527 return skb
->data
+=len
;
530 extern __inline__
unsigned char * skb_pull(struct sk_buff
*skb
, unsigned int len
)
534 return __skb_pull(skb
,len
);
537 extern __inline__
int skb_headroom(struct sk_buff
*skb
)
539 return skb
->data
-skb
->head
;
542 extern __inline__
int skb_tailroom(struct sk_buff
*skb
)
544 return skb
->end
-skb
->tail
;
547 extern __inline__
void skb_reserve(struct sk_buff
*skb
, unsigned int len
)
553 extern __inline__
void __skb_trim(struct sk_buff
*skb
, unsigned int len
)
556 skb
->tail
= skb
->data
+len
;
559 extern __inline__
void skb_trim(struct sk_buff
*skb
, unsigned int len
)
561 if (skb
->len
> len
) {
562 __skb_trim(skb
, len
);
566 extern __inline__
void skb_orphan(struct sk_buff
*skb
)
569 skb
->destructor(skb
);
570 skb
->destructor
= NULL
;
574 extern __inline__
void skb_queue_purge(struct sk_buff_head
*list
)
577 while ((skb
=skb_dequeue(list
))!=NULL
)
581 extern __inline__
void __skb_queue_purge(struct sk_buff_head
*list
)
584 while ((skb
=__skb_dequeue(list
))!=NULL
)
588 extern __inline__
struct sk_buff
*dev_alloc_skb(unsigned int length
)
592 skb
= alloc_skb(length
+16, GFP_ATOMIC
);
598 extern __inline__
struct sk_buff
*
599 skb_cow(struct sk_buff
*skb
, unsigned int headroom
)
601 headroom
= (headroom
+15)&~15;
603 if ((unsigned)skb_headroom(skb
) < headroom
|| skb_cloned(skb
)) {
604 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, headroom
);
611 extern struct sk_buff
* skb_recv_datagram(struct sock
*sk
,unsigned flags
,int noblock
, int *err
);
612 extern unsigned int datagram_poll(struct file
*file
, struct socket
*sock
, struct poll_table_struct
*wait
);
613 extern int skb_copy_datagram(struct sk_buff
*from
, int offset
, char *to
,int size
);
614 extern int skb_copy_datagram_iovec(struct sk_buff
*from
, int offset
, struct iovec
*to
,int size
);
615 extern void skb_free_datagram(struct sock
* sk
, struct sk_buff
*skb
);
617 extern void skb_init(void);
618 extern void skb_add_mtu(int mtu
);
620 #endif /* __KERNEL__ */
621 #endif /* _LINUX_SKBUFF_H */