2 * Definitions for the 'struct sk_buff' memory handlers.
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
17 #include <linux/config.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/time.h>
22 #include <asm/atomic.h>
23 #include <asm/types.h>
24 #include <linux/spinlock.h>
26 #define HAVE_ALLOC_SKB /* For the drivers to know */
27 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
28 #define SLAB_SKB /* Slabified skbuffs */
30 #define CHECKSUM_NONE 0
32 #define CHECKSUM_UNNECESSARY 2
35 #define NET_CALLER(arg) (*(((void**)&arg)-1))
37 #define NET_CALLER(arg) __builtin_return_address(0)
40 #ifdef CONFIG_NETFILTER
43 void (*destroy
)(struct nf_conntrack
*);
47 struct nf_conntrack
*master
;
52 /* These two members must be first. */
53 struct sk_buff
* next
;
54 struct sk_buff
* prev
;
61 /* These two members must be first. */
62 struct sk_buff
* next
; /* Next buffer in list */
63 struct sk_buff
* prev
; /* Previous buffer in list */
65 struct sk_buff_head
* list
; /* List we are on */
66 struct sock
*sk
; /* Socket we are owned by */
67 struct timeval stamp
; /* Time we arrived */
68 struct net_device
*dev
; /* Device we arrived on/are leaving by */
70 /* Transport layer header */
75 struct icmphdr
*icmph
;
76 struct igmphdr
*igmph
;
82 /* Network layer header */
86 struct ipv6hdr
*ipv6h
;
92 /* Link layer header */
95 struct ethhdr
*ethernet
;
99 struct dst_entry
*dst
;
102 * This is the control buffer. It is free to use for every
103 * layer. Please put your private variables there. If you
104 * want to keep them across layers you have to do a skb_clone()
105 * first. This is owned by whoever has the skb queued ATM.
109 unsigned int len
; /* Length of actual data */
110 unsigned int csum
; /* Checksum */
111 volatile char used
; /* Data moved to user and not MSG_PEEK */
112 unsigned char cloned
, /* head may be cloned (check refcnt to be sure). */
113 pkt_type
, /* Packet class */
114 ip_summed
; /* Driver fed us an IP checksum */
115 __u32 priority
; /* Packet queueing priority */
116 atomic_t users
; /* User count - see datagram.c,tcp.c */
117 unsigned short protocol
; /* Packet protocol from driver. */
118 unsigned short security
; /* Security level of packet */
119 unsigned int truesize
; /* Buffer size */
121 unsigned char *head
; /* Head of buffer */
122 unsigned char *data
; /* Data head pointer */
123 unsigned char *tail
; /* Tail pointer */
124 unsigned char *end
; /* End pointer */
125 void (*destructor
)(struct sk_buff
*); /* Destruct function */
126 #ifdef CONFIG_NETFILTER
127 /* Can be used for communication between hooks. */
128 unsigned long nfmark
;
131 /* Associated connection, if any */
132 struct nf_ct_info
*nfct
;
133 #ifdef CONFIG_NETFILTER_DEBUG
134 unsigned int nf_debug
;
136 #endif /*CONFIG_NETFILTER*/
138 #if defined(CONFIG_HIPPI)
144 #ifdef CONFIG_NET_SCHED
145 __u32 tc_index
; /* traffic control index */
149 #define SK_WMEM_MAX 65535
150 #define SK_RMEM_MAX 65535
154 * Handling routines are only of interest to the kernel
156 #include <linux/malloc.h>
158 #include <asm/system.h>
160 extern void __kfree_skb(struct sk_buff
*skb
);
161 extern struct sk_buff
* skb_peek_copy(struct sk_buff_head
*list
);
162 extern struct sk_buff
* alloc_skb(unsigned int size
, int priority
);
163 extern void kfree_skbmem(struct sk_buff
*skb
);
164 extern struct sk_buff
* skb_clone(struct sk_buff
*skb
, int priority
);
165 extern struct sk_buff
* skb_copy(const struct sk_buff
*skb
, int priority
);
166 extern struct sk_buff
* skb_copy_expand(const struct sk_buff
*skb
,
170 #define dev_kfree_skb(a) kfree_skb(a)
171 extern void skb_over_panic(struct sk_buff
*skb
, int len
, void *here
);
172 extern void skb_under_panic(struct sk_buff
*skb
, int len
, void *here
);
174 /* Backwards compatibility */
175 #define skb_realloc_headroom(skb, nhr) skb_copy_expand(skb, nhr, skb_tailroom(skb), GFP_ATOMIC)
178 static inline atomic_t
*skb_datarefp(struct sk_buff
*skb
)
180 return (atomic_t
*)(skb
->end
);
184 * skb_queue_empty - check if a queue is empty
187 * Returns true if the queue is empty, false otherwise.
190 static inline int skb_queue_empty(struct sk_buff_head
*list
)
192 return (list
->next
== (struct sk_buff
*) list
);
196 * skb_get - reference buffer
197 * @skb: buffer to reference
199 * Makes another reference to a socket buffer and returns a pointer
203 static inline struct sk_buff
*skb_get(struct sk_buff
*skb
)
205 atomic_inc(&skb
->users
);
210 * If users==1, we are the only owner and are can avoid redundant
215 * kfree_skb - free an sk_buff
216 * @skb: buffer to free
218 * Drop a reference to the buffer and free it if the usage count has
222 static inline void kfree_skb(struct sk_buff
*skb
)
224 if (atomic_read(&skb
->users
) == 1 || atomic_dec_and_test(&skb
->users
))
228 /* Use this if you didn't touch the skb state [for fast switching] */
229 static inline void kfree_skb_fast(struct sk_buff
*skb
)
231 if (atomic_read(&skb
->users
) == 1 || atomic_dec_and_test(&skb
->users
))
236 * skb_cloned - is the buffer a clone
237 * @skb: buffer to check
239 * Returns true if the buffer was generated with skb_clone() and is
240 * one of multiple shared copies of the buffer. Cloned buffers are
241 * shared data so must not be written to under normal circumstances.
244 static inline int skb_cloned(struct sk_buff
*skb
)
246 return skb
->cloned
&& atomic_read(skb_datarefp(skb
)) != 1;
250 * skb_shared - is the buffer shared
251 * @skb: buffer to check
253 * Returns true if more than one person has a reference to this
257 static inline int skb_shared(struct sk_buff
*skb
)
259 return (atomic_read(&skb
->users
) != 1);
263 * skb_share_check - check if buffer is shared and if so clone it
264 * @skb: buffer to check
265 * @pri: priority for memory allocation
267 * If the buffer is shared the buffer is cloned and the old copy
268 * drops a reference. A new clone with a single reference is returned.
269 * If the buffer is not shared the original buffer is returned. When
270 * being called from interrupt status or with spinlocks held pri must
273 * NULL is returned on a memory allocation failure.
276 static inline struct sk_buff
*skb_share_check(struct sk_buff
*skb
, int pri
)
278 if (skb_shared(skb
)) {
279 struct sk_buff
*nskb
;
280 nskb
= skb_clone(skb
, pri
);
289 * Copy shared buffers into a new sk_buff. We effectively do COW on
290 * packets to handle cases where we have a local reader and forward
291 * and a couple of other messy ones. The normal one is tcpdumping
292 * a packet thats being forwarded.
296 * skb_unshare - make a copy of a shared buffer
297 * @skb: buffer to check
298 * @pri: priority for memory allocation
300 * If the socket buffer is a clone then this function creates a new
301 * copy of the data, drops a reference count on the old copy and returns
302 * the new copy with the reference count at 1. If the buffer is not a clone
303 * the original buffer is returned. When called with a spinlock held or
304 * from interrupt state @pri must be %GFP_ATOMIC
306 * %NULL is returned on a memory allocation failure.
309 static inline struct sk_buff
*skb_unshare(struct sk_buff
*skb
, int pri
)
311 struct sk_buff
*nskb
;
314 nskb
=skb_copy(skb
, pri
);
315 kfree_skb(skb
); /* Free our shared copy */
321 * @list_: list to peek at
323 * Peek an &sk_buff. Unlike most other operations you _MUST_
324 * be careful with this one. A peek leaves the buffer on the
325 * list and someone else may run off with it. You must hold
326 * the appropriate locks or have a private queue to do this.
328 * Returns %NULL for an empty list or a pointer to the head element.
329 * The reference count is not incremented and the reference is therefore
330 * volatile. Use with caution.
333 static inline struct sk_buff
*skb_peek(struct sk_buff_head
*list_
)
335 struct sk_buff
*list
= ((struct sk_buff
*)list_
)->next
;
336 if (list
== (struct sk_buff
*)list_
)
343 * @list_: list to peek at
345 * Peek an &sk_buff. Unlike most other operations you _MUST_
346 * be careful with this one. A peek leaves the buffer on the
347 * list and someone else may run off with it. You must hold
348 * the appropriate locks or have a private queue to do this.
350 * Returns %NULL for an empty list or a pointer to the tail element.
351 * The reference count is not incremented and the reference is therefore
352 * volatile. Use with caution.
355 static inline struct sk_buff
*skb_peek_tail(struct sk_buff_head
*list_
)
357 struct sk_buff
*list
= ((struct sk_buff
*)list_
)->prev
;
358 if (list
== (struct sk_buff
*)list_
)
364 * skb_queue_len - get queue length
365 * @list_: list to measure
367 * Return the length of an &sk_buff queue.
370 static inline __u32
skb_queue_len(struct sk_buff_head
*list_
)
375 static inline void skb_queue_head_init(struct sk_buff_head
*list
)
377 spin_lock_init(&list
->lock
);
378 list
->prev
= (struct sk_buff
*)list
;
379 list
->next
= (struct sk_buff
*)list
;
384 * Insert an sk_buff at the start of a list.
386 * The "__skb_xxxx()" functions are the non-atomic ones that
387 * can only be called with interrupts disabled.
391 * __skb_queue_head - queue a buffer at the list head
393 * @newsk: buffer to queue
395 * Queue a buffer at the start of a list. This function takes no locks
396 * and you must therefore hold required locks before calling it.
398 * A buffer cannot be placed on two lists at the same time.
401 static inline void __skb_queue_head(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
403 struct sk_buff
*prev
, *next
;
407 prev
= (struct sk_buff
*)list
;
417 * skb_queue_head - queue a buffer at the list head
419 * @newsk: buffer to queue
421 * Queue a buffer at the start of the list. This function takes the
422 * list lock and can be used safely with other locking &sk_buff functions
425 * A buffer cannot be placed on two lists at the same time.
428 static inline void skb_queue_head(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
432 spin_lock_irqsave(&list
->lock
, flags
);
433 __skb_queue_head(list
, newsk
);
434 spin_unlock_irqrestore(&list
->lock
, flags
);
438 * __skb_queue_tail - queue a buffer at the list tail
440 * @newsk: buffer to queue
442 * Queue a buffer at the end of a list. This function takes no locks
443 * and you must therefore hold required locks before calling it.
445 * A buffer cannot be placed on two lists at the same time.
449 static inline void __skb_queue_tail(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
451 struct sk_buff
*prev
, *next
;
455 next
= (struct sk_buff
*)list
;
464 * skb_queue_tail - queue a buffer at the list tail
466 * @newsk: buffer to queue
468 * Queue a buffer at the tail of the list. This function takes the
469 * list lock and can be used safely with other locking &sk_buff functions
472 * A buffer cannot be placed on two lists at the same time.
475 static inline void skb_queue_tail(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
479 spin_lock_irqsave(&list
->lock
, flags
);
480 __skb_queue_tail(list
, newsk
);
481 spin_unlock_irqrestore(&list
->lock
, flags
);
485 * __skb_dequeue - remove from the head of the queue
486 * @list: list to dequeue from
488 * Remove the head of the list. This function does not take any locks
489 * so must be used with appropriate locks held only. The head item is
490 * returned or %NULL if the list is empty.
493 static inline struct sk_buff
*__skb_dequeue(struct sk_buff_head
*list
)
495 struct sk_buff
*next
, *prev
, *result
;
497 prev
= (struct sk_buff
*) list
;
514 * skb_dequeue - remove from the head of the queue
515 * @list: list to dequeue from
517 * Remove the head of the list. The list lock is taken so the function
518 * may be used safely with other locking list functions. The head item is
519 * returned or %NULL if the list is empty.
522 static inline struct sk_buff
*skb_dequeue(struct sk_buff_head
*list
)
525 struct sk_buff
*result
;
527 spin_lock_irqsave(&list
->lock
, flags
);
528 result
= __skb_dequeue(list
);
529 spin_unlock_irqrestore(&list
->lock
, flags
);
534 * Insert a packet on a list.
537 static inline void __skb_insert(struct sk_buff
*newsk
,
538 struct sk_buff
* prev
, struct sk_buff
*next
,
539 struct sk_buff_head
* list
)
550 * skb_insert - insert a buffer
551 * @old: buffer to insert before
552 * @newsk: buffer to insert
554 * Place a packet before a given packet in a list. The list locks are taken
555 * and this function is atomic with respect to other list locked calls
556 * A buffer cannot be placed on two lists at the same time.
559 static inline void skb_insert(struct sk_buff
*old
, struct sk_buff
*newsk
)
563 spin_lock_irqsave(&old
->list
->lock
, flags
);
564 __skb_insert(newsk
, old
->prev
, old
, old
->list
);
565 spin_unlock_irqrestore(&old
->list
->lock
, flags
);
569 * Place a packet after a given packet in a list.
572 static inline void __skb_append(struct sk_buff
*old
, struct sk_buff
*newsk
)
574 __skb_insert(newsk
, old
, old
->next
, old
->list
);
578 * skb_append - append a buffer
579 * @old: buffer to insert after
580 * @newsk: buffer to insert
582 * Place a packet after a given packet in a list. The list locks are taken
583 * and this function is atomic with respect to other list locked calls.
584 * A buffer cannot be placed on two lists at the same time.
588 static inline void skb_append(struct sk_buff
*old
, struct sk_buff
*newsk
)
592 spin_lock_irqsave(&old
->list
->lock
, flags
);
593 __skb_append(old
, newsk
);
594 spin_unlock_irqrestore(&old
->list
->lock
, flags
);
598 * remove sk_buff from list. _Must_ be called atomically, and with
602 static inline void __skb_unlink(struct sk_buff
*skb
, struct sk_buff_head
*list
)
604 struct sk_buff
* next
, * prev
;
617 * skb_unlink - remove a buffer from a list
618 * @skb: buffer to remove
620 * Place a packet after a given packet in a list. The list locks are taken
621 * and this function is atomic with respect to other list locked calls
623 * Works even without knowing the list it is sitting on, which can be
624 * handy at times. It also means that THE LIST MUST EXIST when you
625 * unlink. Thus a list must have its contents unlinked before it is
629 static inline void skb_unlink(struct sk_buff
*skb
)
631 struct sk_buff_head
*list
= skb
->list
;
636 spin_lock_irqsave(&list
->lock
, flags
);
637 if(skb
->list
== list
)
638 __skb_unlink(skb
, skb
->list
);
639 spin_unlock_irqrestore(&list
->lock
, flags
);
643 /* XXX: more streamlined implementation */
646 * __skb_dequeue_tail - remove from the tail of the queue
647 * @list: list to dequeue from
649 * Remove the tail of the list. This function does not take any locks
650 * so must be used with appropriate locks held only. The tail item is
651 * returned or %NULL if the list is empty.
654 static inline struct sk_buff
*__skb_dequeue_tail(struct sk_buff_head
*list
)
656 struct sk_buff
*skb
= skb_peek_tail(list
);
658 __skb_unlink(skb
, list
);
663 * skb_dequeue - remove from the head of the queue
664 * @list: list to dequeue from
666 * Remove the head of the list. The list lock is taken so the function
667 * may be used safely with other locking list functions. The tail item is
668 * returned or %NULL if the list is empty.
671 static inline struct sk_buff
*skb_dequeue_tail(struct sk_buff_head
*list
)
674 struct sk_buff
*result
;
676 spin_lock_irqsave(&list
->lock
, flags
);
677 result
= __skb_dequeue_tail(list
);
678 spin_unlock_irqrestore(&list
->lock
, flags
);
683 * Add data to an sk_buff
686 static inline unsigned char *__skb_put(struct sk_buff
*skb
, unsigned int len
)
688 unsigned char *tmp
=skb
->tail
;
695 * skb_put - add data to a buffer
696 * @skb: buffer to use
697 * @len: amount of data to add
699 * This function extends the used data area of the buffer. If this would
700 * exceed the total buffer size the kernel will panic. A pointer to the
701 * first byte of the extra data is returned.
704 static inline unsigned char *skb_put(struct sk_buff
*skb
, unsigned int len
)
706 unsigned char *tmp
=skb
->tail
;
709 if(skb
->tail
>skb
->end
) {
710 skb_over_panic(skb
, len
, current_text_addr());
715 static inline unsigned char *__skb_push(struct sk_buff
*skb
, unsigned int len
)
723 * skb_push - add data to the start of a buffer
724 * @skb: buffer to use
725 * @len: amount of data to add
727 * This function extends the used data area of the buffer at the buffer
728 * start. If this would exceed the total buffer headroom the kernel will
729 * panic. A pointer to the first byte of the extra data is returned.
732 static inline unsigned char *skb_push(struct sk_buff
*skb
, unsigned int len
)
736 if(skb
->data
<skb
->head
) {
737 skb_under_panic(skb
, len
, current_text_addr());
742 static inline char *__skb_pull(struct sk_buff
*skb
, unsigned int len
)
745 return skb
->data
+=len
;
749 * skb_pull - remove data from the start of a buffer
750 * @skb: buffer to use
751 * @len: amount of data to remove
753 * This function removes data from the start of a buffer, returning
754 * the memory to the headroom. A pointer to the next data in the buffer
755 * is returned. Once the data has been pulled future pushes will overwrite
759 static inline unsigned char * skb_pull(struct sk_buff
*skb
, unsigned int len
)
763 return __skb_pull(skb
,len
);
767 * skb_headroom - bytes at buffer head
768 * @skb: buffer to check
770 * Return the number of bytes of free space at the head of an &sk_buff.
773 static inline int skb_headroom(const struct sk_buff
*skb
)
775 return skb
->data
-skb
->head
;
779 * skb_tailroom - bytes at buffer end
780 * @skb: buffer to check
782 * Return the number of bytes of free space at the tail of an sk_buff
785 static inline int skb_tailroom(const struct sk_buff
*skb
)
787 return skb
->end
-skb
->tail
;
791 * skb_reserve - adjust headroom
792 * @skb: buffer to alter
793 * @len: bytes to move
795 * Increase the headroom of an empty &sk_buff by reducing the tail
796 * room. This is only allowed for an empty buffer.
799 static inline void skb_reserve(struct sk_buff
*skb
, unsigned int len
)
806 static inline void __skb_trim(struct sk_buff
*skb
, unsigned int len
)
809 skb
->tail
= skb
->data
+len
;
813 * skb_trim - remove end from a buffer
814 * @skb: buffer to alter
817 * Cut the length of a buffer down by removing data from the tail. If
818 * the buffer is already under the length specified it is not modified.
821 static inline void skb_trim(struct sk_buff
*skb
, unsigned int len
)
823 if (skb
->len
> len
) {
824 __skb_trim(skb
, len
);
829 * skb_orphan - orphan a buffer
830 * @skb: buffer to orphan
832 * If a buffer currently has an owner then we call the owner's
833 * destructor function and make the @skb unowned. The buffer continues
834 * to exist but is no longer charged to its former owner.
838 static inline void skb_orphan(struct sk_buff
*skb
)
841 skb
->destructor(skb
);
842 skb
->destructor
= NULL
;
847 * skb_purge - empty a list
848 * @list: list to empty
850 * Delete all buffers on an &sk_buff list. Each buffer is removed from
851 * the list and one reference dropped. This function takes the list
852 * lock and is atomic with respect to other list locking functions.
856 static inline void skb_queue_purge(struct sk_buff_head
*list
)
859 while ((skb
=skb_dequeue(list
))!=NULL
)
864 * __skb_purge - empty a list
865 * @list: list to empty
867 * Delete all buffers on an &sk_buff list. Each buffer is removed from
868 * the list and one reference dropped. This function does not take the
869 * list lock and the caller must hold the relevant locks to use it.
873 static inline void __skb_queue_purge(struct sk_buff_head
*list
)
876 while ((skb
=__skb_dequeue(list
))!=NULL
)
881 * dev_alloc_skb - allocate an skbuff for sending
882 * @length: length to allocate
884 * Allocate a new &sk_buff and assign it a usage count of one. The
885 * buffer has unspecified headroom built in. Users should allocate
886 * the headroom they think they need without accounting for the
887 * built in space. The built in space is used for optimisations.
889 * %NULL is returned in there is no free memory. Although this function
890 * allocates memory it can be called from an interrupt.
893 static inline struct sk_buff
*dev_alloc_skb(unsigned int length
)
897 skb
= alloc_skb(length
+16, GFP_ATOMIC
);
904 * skb_cow - copy a buffer if need be
905 * @skb: buffer to copy
906 * @headroom: needed headroom
908 * If the buffer passed lacks sufficient headroom or is a clone then
909 * it is copied and the additional headroom made available. If there
910 * is no free memory %NULL is returned. The new buffer is returned if
911 * a copy was made (and the old one dropped a reference). The existing
912 * buffer is returned otherwise.
914 * This function primarily exists to avoid making two copies when making
915 * a writable copy of a buffer and then growing the headroom.
919 static inline struct sk_buff
*
920 skb_cow(struct sk_buff
*skb
, unsigned int headroom
)
922 headroom
= (headroom
+15)&~15;
924 if ((unsigned)skb_headroom(skb
) < headroom
|| skb_cloned(skb
)) {
925 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, headroom
);
932 #define skb_queue_walk(queue, skb) \
933 for (skb = (queue)->next; \
934 (skb != (struct sk_buff *)(queue)); \
938 extern struct sk_buff
* skb_recv_datagram(struct sock
*sk
,unsigned flags
,int noblock
, int *err
);
939 extern unsigned int datagram_poll(struct file
*file
, struct socket
*sock
, struct poll_table_struct
*wait
);
940 extern int skb_copy_datagram(struct sk_buff
*from
, int offset
, char *to
,int size
);
941 extern int skb_copy_datagram_iovec(struct sk_buff
*from
, int offset
, struct iovec
*to
,int size
);
942 extern void skb_free_datagram(struct sock
* sk
, struct sk_buff
*skb
);
944 extern void skb_init(void);
945 extern void skb_add_mtu(int mtu
);
947 #ifdef CONFIG_NETFILTER
949 nf_conntrack_put(struct nf_ct_info
*nfct
)
951 if (nfct
&& atomic_dec_and_test(&nfct
->master
->use
))
952 nfct
->master
->destroy(nfct
->master
);
955 nf_conntrack_get(struct nf_ct_info
*nfct
)
958 atomic_inc(&nfct
->master
->use
);
962 #endif /* __KERNEL__ */
963 #endif /* _LINUX_SKBUFF_H */