Import 2.3.25pre1
[davej-history.git] / include / linux / netfilter.h
blob89c05f9f0bd966d444f07b40879cae17cfdaf127
1 #ifndef __LINUX_NETFILTER_H
2 #define __LINUX_NETFILTER_H
4 #ifdef __KERNEL__
5 #include <linux/init.h>
6 #include <linux/types.h>
7 #include <linux/skbuff.h>
8 #include <linux/net.h>
9 #include <linux/wait.h>
10 #include <linux/list.h>
11 #endif
13 /* Responses from hook functions. */
14 #define NF_DROP 0
15 #define NF_ACCEPT 1
16 #define NF_STOLEN 2
17 #define NF_QUEUE 3
18 #define NF_MAX_VERDICT NF_QUEUE
20 /* Generic cache responses from hook functions. */
21 #define NFC_ALTERED 0x8000
22 #define NFC_UNKNOWN 0x4000
24 #ifdef __KERNEL__
25 #include <linux/config.h>
26 #ifdef CONFIG_NETFILTER
28 extern void netfilter_init(void);
30 /* Largest hook number + 1 */
31 #define NF_MAX_HOOKS 5
33 struct sk_buff;
34 struct net_device;
36 typedef unsigned int nf_hookfn(unsigned int hooknum,
37 struct sk_buff **skb,
38 const struct net_device *in,
39 const struct net_device *out);
41 typedef unsigned int nf_cacheflushfn(const void *packet,
42 const struct net_device *in,
43 const struct net_device *out,
44 u_int32_t packetcount,
45 u_int32_t bytecount);
47 struct nf_hook_ops
49 struct list_head list;
51 /* User fills in from here down. */
52 nf_hookfn *hook;
53 nf_cacheflushfn *flush;
54 int pf;
55 int hooknum;
56 /* Hooks are ordered in ascending priority. */
57 int priority;
60 struct nf_sockopt_ops
62 struct list_head list;
64 int pf;
66 /* Non-inclusive ranges: use 0/0/NULL to never get called. */
67 int set_optmin;
68 int set_optmax;
69 int (*set)(struct sock *sk, int optval, void *user, unsigned int len);
71 int get_optmin;
72 int get_optmax;
73 int (*get)(struct sock *sk, int optval, void *user, int *len);
76 /* Function to register/unregister hook points. */
77 int nf_register_hook(struct nf_hook_ops *reg);
78 void nf_unregister_hook(struct nf_hook_ops *reg);
80 /* Functions to register get/setsockopt ranges (non-inclusive). You
81 need to check permissions yourself! */
82 int nf_register_sockopt(struct nf_sockopt_ops *reg);
83 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
85 extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
87 /* Activate hook/flush; either okfn or kfree_skb called, unless a hook
88 returns NF_STOLEN (in which case, it's up to the hook to deal with
89 the consequences).
91 Returns -ERRNO if packet dropped. Zero means queued, stolen or
92 accepted.
95 /* RR:
96 > I don't want nf_hook to return anything because people might forget
97 > about async and trust the return value to mean "packet was ok".
99 AK:
100 Just document it clearly, then you can expect some sense from kernel
101 coders :)
104 /* This is gross, but inline doesn't cut it for avoiding the function
105 call in fast path: gcc doesn't inline (needs value tracking?). --RR */
106 #ifdef CONFIG_NETFILTER_DEBUG
107 #define NF_HOOK nf_hook_slow
108 #else
109 #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
110 (list_empty(&nf_hooks[(pf)][(hook)]) \
111 ? (okfn)(skb) \
112 : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn)))
113 #endif
115 int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
116 struct net_device *indev, struct net_device *outdev,
117 int (*okfn)(struct sk_buff *));
119 void nf_cacheflush(int pf, unsigned int hook, const void *packet,
120 const struct net_device *indev, const struct net_device *outdev,
121 __u32 packetcount, __u32 bytecount);
123 /* Call setsockopt() */
124 int nf_setsockopt(struct sock *sk, int pf, int optval, char *opt,
125 int len);
126 int nf_getsockopt(struct sock *sk, int pf, int optval, char *opt,
127 int *len);
129 struct nf_wakeme
131 wait_queue_head_t sleep;
132 struct sk_buff_head skbq;
135 /* For netfilter device. */
136 struct nf_interest
138 struct list_head list;
140 int pf;
141 /* Bitmask of hook numbers to match (1 << hooknum). */
142 unsigned int hookmask;
143 /* If non-zero, only catch packets with this mark. */
144 unsigned int mark;
145 /* If non-zero, only catch packets of this reason. */
146 unsigned int reason;
148 struct nf_wakeme *wake;
151 /* For asynchronous packet handling. */
152 extern void nf_register_interest(struct nf_interest *interest);
153 extern void nf_unregister_interest(struct nf_interest *interest);
154 extern void nf_getinfo(const struct sk_buff *skb,
155 struct net_device **indev,
156 struct net_device **outdev,
157 unsigned long *mark);
158 extern void nf_reinject(struct sk_buff *skb,
159 unsigned long mark,
160 unsigned int verdict);
162 #ifdef CONFIG_NETFILTER_DEBUG
163 extern void nf_dump_skb(int pf, struct sk_buff *skb);
164 #endif
166 /* FIXME: Before cache is ever used, this must be implemented for real. */
167 extern void nf_invalidate_cache(int pf);
169 #else /* !CONFIG_NETFILTER */
170 #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
171 #endif /*CONFIG_NETFILTER*/
173 /* From arch/i386/kernel/smp.c:
175 * Why isn't this somewhere standard ??
177 * Maybe because this procedure is horribly buggy, and does
178 * not deserve to live. Think about signedness issues for five
179 * seconds to see why. - Linus
182 /* Two signed, return a signed. */
183 #define SMAX(a,b) ((ssize_t)(a)>(ssize_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
184 #define SMIN(a,b) ((ssize_t)(a)<(ssize_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
186 /* Two unsigned, return an unsigned. */
187 #define UMAX(a,b) ((size_t)(a)>(size_t)(b) ? (size_t)(a) : (size_t)(b))
188 #define UMIN(a,b) ((size_t)(a)<(size_t)(b) ? (size_t)(a) : (size_t)(b))
190 /* Two unsigned, return a signed. */
191 #define SUMAX(a,b) ((size_t)(a)>(size_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
192 #define SUMIN(a,b) ((size_t)(a)<(size_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
193 #endif /*__KERNEL__*/
195 enum nf_reason {
196 /* Do not, NOT, reorder these. Add at end. */
197 NF_REASON_NONE,
198 NF_REASON_SET_BY_IPCHAINS,
199 NF_REASON_FOR_ROUTING,
200 NF_REASON_FOR_CLS_FW,
201 NF_REASON_MIN_RESERVED_FOR_CONNTRACK = 1024,
204 #endif /*__LINUX_NETFILTER_H*/