Import 2.3.18pre1
[davej-history.git] / include / linux / netfilter.h
blob6c1b4a6305c273a6404218e26a9c5bab52ac9837
1 #ifndef __LINUX_NETFILTER_H
2 #define __LINUX_NETFILTER_H
4 #ifdef __KERNEL__
5 #include <linux/init.h>
6 #include <linux/types.h>
7 #include <linux/skbuff.h>
8 #include <linux/net.h>
9 #include <linux/wait.h>
10 #include <linux/list.h>
11 #endif
13 /* Responses from hook functions. */
14 #define NF_DROP 0
15 #define NF_ACCEPT 1
16 #define NF_STOLEN 2
17 #define NF_QUEUE 3
18 #define NF_MAX_VERDICT NF_QUEUE
20 /* Generic cache responses from hook functions. */
21 #define NFC_ALTERED 0x8000
22 #define NFC_UNKNOWN 0x4000
24 #ifdef __KERNEL__
25 #include <linux/config.h>
26 #ifdef CONFIG_NETFILTER
28 extern void netfilter_init(void);
30 /* Largest hook number + 1 */
31 #define NF_MAX_HOOKS 5
33 struct sk_buff;
34 struct net_device;
36 typedef unsigned int nf_hookfn(unsigned int hooknum,
37 struct sk_buff **skb,
38 const struct net_device *in,
39 const struct net_device *out);
41 typedef unsigned int nf_cacheflushfn(const void *packet,
42 const struct net_device *in,
43 const struct net_device *out,
44 u_int32_t packetcount,
45 u_int32_t bytecount);
47 struct nf_hook_ops
49 struct list_head list;
51 /* User fills in from here down. */
52 nf_hookfn *hook;
53 nf_cacheflushfn *flush;
54 int pf;
55 int hooknum;
56 /* Hooks are ordered in ascending priority. */
57 int priority;
60 struct nf_sockopt_ops
62 struct list_head list;
64 int pf;
66 /* Non-inclusive ranges: use 0/0/NULL to never get called. */
67 int set_optmin;
68 int set_optmax;
69 int (*set)(struct sock *sk, int optval, void *user, unsigned int len);
71 int get_optmin;
72 int get_optmax;
73 int (*get)(struct sock *sk, int optval, void *user, int *len);
76 /* Function to register/unregister hook points. */
77 int nf_register_hook(struct nf_hook_ops *reg);
78 void nf_unregister_hook(struct nf_hook_ops *reg);
80 /* Functions to register get/setsockopt ranges (non-inclusive). */
81 int nf_register_sockopt(struct nf_sockopt_ops *reg);
82 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
84 extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
86 /* Activate hook/flush; either okfn or kfree_skb called, unless a hook
87 returns NF_STOLEN (in which case, it's up to the hook to deal with
88 the consequences).
90 Returns -ERRNO if packet dropped. Zero means queued, stolen or
91 accepted.
94 /* RR:
95 > I don't want nf_hook to return anything because people might forget
96 > about async and trust the return value to mean "packet was ok".
98 AK:
99 Just document it clearly, then you can expect some sense from kernel
100 coders :)
103 /* This is gross, but inline doesn't cut it for avoiding the function
104 call in fast path: gcc doesn't inline (needs value tracking?). --RR */
105 #ifdef CONFIG_NETFILTER_DEBUG
106 #define NF_HOOK nf_hook_slow
107 #else
108 #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
109 (list_empty(&nf_hooks[(pf)][(hook)]) \
110 ? (okfn)(skb) \
111 : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn)))
112 #endif
114 int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
115 struct net_device *indev, struct net_device *outdev,
116 int (*okfn)(struct sk_buff *));
118 void nf_cacheflush(int pf, unsigned int hook, const void *packet,
119 const struct net_device *indev, const struct net_device *outdev,
120 __u32 packetcount, __u32 bytecount);
122 /* Call setsockopt() */
123 int nf_setsockopt(struct sock *sk, int pf, int optval, char *opt,
124 int len);
125 int nf_getsockopt(struct sock *sk, int pf, int optval, char *opt,
126 int *len);
128 struct nf_wakeme
130 wait_queue_head_t sleep;
131 struct sk_buff_head skbq;
134 /* For netfilter device. */
135 struct nf_interest
137 struct list_head list;
139 int pf;
140 /* Bitmask of hook numbers to match (1 << hooknum). */
141 unsigned int hookmask;
142 /* If non-zero, only catch packets with this mark. */
143 unsigned int mark;
144 /* If non-zero, only catch packets of this reason. */
145 unsigned int reason;
147 struct nf_wakeme *wake;
150 /* For asynchronous packet handling. */
151 extern void nf_register_interest(struct nf_interest *interest);
152 extern void nf_unregister_interest(struct nf_interest *interest);
153 extern void nf_getinfo(const struct sk_buff *skb,
154 struct net_device **indev,
155 struct net_device **outdev,
156 unsigned long *mark);
157 extern void nf_reinject(struct sk_buff *skb,
158 unsigned long mark,
159 unsigned int verdict);
161 #ifdef CONFIG_NETFILTER_DEBUG
162 extern void nf_dump_skb(int pf, struct sk_buff *skb);
163 #endif
165 /* FIXME: Before cache is ever used, this must be implemented for real. */
166 extern void nf_invalidate_cache(int pf);
168 #else /* !CONFIG_NETFILTER */
169 #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
170 #endif /*CONFIG_NETFILTER*/
172 /* From arch/i386/kernel/smp.c:
174 * Why isn't this somewhere standard ??
176 * Maybe because this procedure is horribly buggy, and does
177 * not deserve to live. Think about signedness issues for five
178 * seconds to see why. - Linus
181 /* Two signed, return a signed. */
182 #define SMAX(a,b) ((ssize_t)(a)>(ssize_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
183 #define SMIN(a,b) ((ssize_t)(a)<(ssize_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
185 /* Two unsigned, return an unsigned. */
186 #define UMAX(a,b) ((size_t)(a)>(size_t)(b) ? (size_t)(a) : (size_t)(b))
187 #define UMIN(a,b) ((size_t)(a)<(size_t)(b) ? (size_t)(a) : (size_t)(b))
189 /* Two unsigned, return a signed. */
190 #define SUMAX(a,b) ((size_t)(a)>(size_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
191 #define SUMIN(a,b) ((size_t)(a)<(size_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
192 #endif /*__KERNEL__*/
194 enum nf_reason {
195 /* Do not, NOT, reorder these. Add at end. */
196 NF_REASON_NONE,
197 NF_REASON_SET_BY_IPCHAINS,
198 NF_REASON_FOR_ROUTING,
199 NF_REASON_FOR_CLS_FW,
200 NF_REASON_MIN_RESERVED_FOR_CONNTRACK = 1024,
203 #endif /*__LINUX_NETFILTER_H*/