Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / include / linux / netfilter.h
blobc18810e526c4b95681971a59dc605b825662a981
1 #ifndef __LINUX_NETFILTER_H
2 #define __LINUX_NETFILTER_H
4 #ifdef __KERNEL__
5 #include <linux/init.h>
6 #include <linux/types.h>
7 #include <linux/skbuff.h>
8 #include <linux/net.h>
9 #include <linux/if.h>
10 #include <linux/wait.h>
11 #include <linux/list.h>
12 #endif
14 /* Responses from hook functions. */
15 #define NF_DROP 0
16 #define NF_ACCEPT 1
17 #define NF_STOLEN 2
18 #define NF_QUEUE 3
19 #define NF_REPEAT 4
20 #define NF_MAX_VERDICT NF_REPEAT
22 /* Generic cache responses from hook functions. */
23 #define NFC_ALTERED 0x8000
24 #define NFC_UNKNOWN 0x4000
26 #ifdef __KERNEL__
27 #include <linux/config.h>
28 #ifdef CONFIG_NETFILTER
30 extern void netfilter_init(void);
32 /* Largest hook number + 1 */
33 #define NF_MAX_HOOKS 8
35 struct sk_buff;
36 struct net_device;
38 typedef unsigned int nf_hookfn(unsigned int hooknum,
39 struct sk_buff **skb,
40 const struct net_device *in,
41 const struct net_device *out,
42 int (*okfn)(struct sk_buff *));
44 struct nf_hook_ops
46 struct list_head list;
48 /* User fills in from here down. */
49 nf_hookfn *hook;
50 int pf;
51 int hooknum;
52 /* Hooks are ordered in ascending priority. */
53 int priority;
56 struct nf_sockopt_ops
58 struct list_head list;
60 int pf;
62 /* Non-inclusive ranges: use 0/0/NULL to never get called. */
63 int set_optmin;
64 int set_optmax;
65 int (*set)(struct sock *sk, int optval, void *user, unsigned int len);
67 int get_optmin;
68 int get_optmax;
69 int (*get)(struct sock *sk, int optval, void *user, int *len);
71 /* Number of users inside set() or get(). */
72 unsigned int use;
73 struct task_struct *cleanup_task;
76 /* Each queued (to userspace) skbuff has one of these. */
77 struct nf_info
79 /* The ops struct which sent us to userspace. */
80 struct nf_hook_ops *elem;
82 /* If we're sent to userspace, this keeps housekeeping info */
83 int pf;
84 unsigned int hook;
85 struct net_device *indev, *outdev;
86 int (*okfn)(struct sk_buff *);
89 /* Function to register/unregister hook points. */
90 int nf_register_hook(struct nf_hook_ops *reg);
91 void nf_unregister_hook(struct nf_hook_ops *reg);
93 /* Functions to register get/setsockopt ranges (non-inclusive). You
94 need to check permissions yourself! */
95 int nf_register_sockopt(struct nf_sockopt_ops *reg);
96 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
98 extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
100 /* Activate hook; either okfn or kfree_skb called, unless a hook
101 returns NF_STOLEN (in which case, it's up to the hook to deal with
102 the consequences).
104 Returns -ERRNO if packet dropped. Zero means queued, stolen or
105 accepted.
108 /* RR:
109 > I don't want nf_hook to return anything because people might forget
110 > about async and trust the return value to mean "packet was ok".
113 Just document it clearly, then you can expect some sense from kernel
114 coders :)
117 /* This is gross, but inline doesn't cut it for avoiding the function
118 call in fast path: gcc doesn't inline (needs value tracking?). --RR */
119 #ifdef CONFIG_NETFILTER_DEBUG
120 #define NF_HOOK nf_hook_slow
121 #else
122 #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
123 (list_empty(&nf_hooks[(pf)][(hook)]) \
124 ? (okfn)(skb) \
125 : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn)))
126 #endif
128 int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
129 struct net_device *indev, struct net_device *outdev,
130 int (*okfn)(struct sk_buff *));
132 /* Call setsockopt() */
133 int nf_setsockopt(struct sock *sk, int pf, int optval, char *opt,
134 int len);
135 int nf_getsockopt(struct sock *sk, int pf, int optval, char *opt,
136 int *len);
138 /* Packet queuing */
139 typedef int (*nf_queue_outfn_t)(struct sk_buff *skb,
140 struct nf_info *info, void *data);
141 extern int nf_register_queue_handler(int pf,
142 nf_queue_outfn_t outfn, void *data);
143 extern int nf_unregister_queue_handler(int pf);
144 extern void nf_reinject(struct sk_buff *skb,
145 struct nf_info *info,
146 unsigned int verdict);
148 #ifdef CONFIG_NETFILTER_DEBUG
149 extern void nf_dump_skb(int pf, struct sk_buff *skb);
150 #endif
152 /* FIXME: Before cache is ever used, this must be implemented for real. */
153 extern void nf_invalidate_cache(int pf);
155 #else /* !CONFIG_NETFILTER */
156 #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
157 #endif /*CONFIG_NETFILTER*/
159 /* From arch/i386/kernel/smp.c:
161 * Why isn't this somewhere standard ??
163 * Maybe because this procedure is horribly buggy, and does
164 * not deserve to live. Think about signedness issues for five
165 * seconds to see why. - Linus
168 /* Two signed, return a signed. */
169 #define SMAX(a,b) ((ssize_t)(a)>(ssize_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
170 #define SMIN(a,b) ((ssize_t)(a)<(ssize_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
172 /* Two unsigned, return an unsigned. */
173 #define UMAX(a,b) ((size_t)(a)>(size_t)(b) ? (size_t)(a) : (size_t)(b))
174 #define UMIN(a,b) ((size_t)(a)<(size_t)(b) ? (size_t)(a) : (size_t)(b))
176 /* Two unsigned, return a signed. */
177 #define SUMAX(a,b) ((size_t)(a)>(size_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
178 #define SUMIN(a,b) ((size_t)(a)<(size_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
179 #endif /*__KERNEL__*/
181 #endif /*__LINUX_NETFILTER_H*/