Merge with Linux 2.5.48.
[linux-2.6/linux-mips.git] / include / net / xfrm.h
blob608dc85091893f249cc12fbcf358fdb2eec5e2b0
1 #include <linux/xfrm.h>
2 #include <linux/spinlock.h>
3 #include <linux/list.h>
4 #include <linux/skbuff.h>
5 #include <linux/netdevice.h>
6 #include <linux/crypto.h>
8 #include <net/dst.h>
9 #include <net/route.h>
11 extern struct semaphore xfrm_cfg_sem;
13 /* Organization of SPD aka "XFRM rules"
14 ------------------------------------
16 Basic objects:
17 - policy rule, struct xfrm_policy (=SPD entry)
18 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
19 - instance of a transformer, struct xfrm_state (=SA)
20 - template to clone xfrm_state, struct xfrm_tmpl
22 SPD is plain linear list of xfrm_policy rules, ordered by priority.
23 (To be compatible with existing pfkeyv2 implementations,
24 many rules with priority of 0x7fffffff are allowed to exist and
25 such rules are ordered in an unpredictable way, thanks to bsd folks.)
27 Lookup is plain linear search until the first match with selector.
29 If "action" is "block", then we prohibit the flow, otherwise:
30 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
31 policy entry has list of up to XFRM_MAX_DEPTH transformations,
32 described by templates xfrm_tmpl. Each template is resolved
33 to a complete xfrm_state (see below) and we pack bundle of transformations
34 to a dst_entry returned to requestor.
36 dst -. xfrm .-> xfrm_state #1
37 |---. child .-> dst -. xfrm .-> xfrm_state #2
38 |---. child .-> dst -. xfrm .-> xfrm_state #3
39 |---. child .-> NULL
41 Bundles are cached at xrfm_policy struct (field ->bundles).
44 Resolution of xrfm_tmpl
45 -----------------------
46 Template contains:
47 1. ->mode Mode: transport or tunnel
48 2. ->id.proto Protocol: AH/ESP/IPCOMP
49 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
50 Q: allow to resolve security gateway?
51 4. ->id.spi If not zero, static SPI.
52 5. ->saddr Local tunnel endpoint, ignored for transport mode.
53 6. ->algos List of allowed algos. Plain bitmask now.
54 Q: ealgos, aalgos, calgos. What a mess...
55 7. ->share Sharing mode.
56 Q: how to implement private sharing mode? To add struct sock* to
57 flow id?
59 Having this template we search through SAD searching for entries
60 with appropriate mode/proto/algo, permitted by selector.
61 If no appropriate entry found, it is requested from key manager.
63 PROBLEMS:
64 Q: How to find all the bundles referring to a physical path for
65 PMTU discovery? Seems, dst should contain list of all parents...
66 and enter to infinite locking hierarchy disaster.
67 No! It is easier, we will not search for them, let them find us.
68 We add genid to each dst plus pointer to genid of raw IP route,
69 pmtu disc will update pmtu on raw IP route and increase its genid.
70 dst_check() will see this for top level and trigger resyncing
71 metrics. Plus, it will be made via sk->dst_cache. Solved.
74 /* Full description of state of transformer. */
75 struct xfrm_state
77 struct list_head bydst;
78 struct list_head byspi;
80 atomic_t refcnt;
81 spinlock_t lock;
83 struct xfrm_id id;
84 struct xfrm_selector sel;
86 /* Key manger bits */
87 struct {
88 u8 state;
89 u8 dying;
90 u32 seq;
91 } km;
93 /* Parameters of this state. */
94 struct {
95 u8 mode;
96 u8 replay_window;
97 u8 aalgo, ealgo, calgo;
98 u16 reqid;
99 u16 family;
100 xfrm_address_t saddr;
101 int header_len;
102 int trailer_len;
103 } props;
105 struct xfrm_lifetime_cfg lft;
107 /* Data for transformer */
108 struct xfrm_algo *aalg;
109 struct xfrm_algo *ealg;
110 struct xfrm_algo *calg;
112 /* State for replay detection */
113 struct xfrm_replay_state replay;
115 /* Statistics */
116 struct xfrm_stats stats;
118 struct xfrm_lifetime_cur curlft;
119 struct timer_list timer;
121 /* Reference to data common to all the instances of this
122 * transformer. */
123 struct xfrm_type *type;
125 /* Private data of this transformer, format is opaque,
126 * interpreted by xfrm_type methods. */
127 void *data;
130 enum {
131 XFRM_STATE_VOID,
132 XFRM_STATE_ACQ,
133 XFRM_STATE_VALID,
134 XFRM_STATE_ERROR,
135 XFRM_STATE_EXPIRED,
136 XFRM_STATE_DEAD
140 struct xfrm_type
142 char *description;
143 struct module *owner;
144 __u8 proto;
146 int (*init_state)(struct xfrm_state *x, void *args);
147 void (*destructor)(struct xfrm_state *);
148 int (*input)(struct xfrm_state *, struct sk_buff *skb);
149 int (*output)(struct sk_buff *skb);
150 /* Estimate maximal size of result of transformation of a dgram */
151 u32 (*get_max_size)(struct xfrm_state *, int size);
154 extern int xfrm_register_type(struct xfrm_type *type);
155 extern int xfrm_unregister_type(struct xfrm_type *type);
156 extern struct xfrm_type *xfrm_get_type(u8 proto);
157 extern void xfrm_put_type(struct xfrm_type *type);
159 struct xfrm_tmpl
161 /* id in template is interpreted as:
162 * daddr - destination of tunnel, may be zero for transport mode.
163 * spi - zero to acquire spi. Not zero if spi is static, then
164 * daddr must be fixed too.
165 * proto - AH/ESP/IPCOMP
167 struct xfrm_id id;
169 /* Source address of tunnel. Ignored, if it is not a tunnel. */
170 xfrm_address_t saddr;
172 __u16 reqid;
174 /* Mode: transport/tunnel */
175 __u8 mode;
177 /* Sharing mode: unique, this session only, this user only etc. */
178 __u8 share;
180 /* May skip this transfomration if no SA is found */
181 __u8 optional;
183 /* Bit mask of algos allowed for acquisition */
184 __u32 aalgos;
185 __u32 ealgos;
186 __u32 calgos;
189 #define XFRM_MAX_DEPTH 3
191 struct xfrm_policy
193 struct xfrm_policy *next;
195 /* This lock only affects elements except for entry. */
196 rwlock_t lock;
197 atomic_t refcnt;
199 u32 priority;
200 u32 index;
201 struct xfrm_selector selector;
202 struct xfrm_lifetime_cfg lft;
203 struct xfrm_lifetime_cur curlft;
204 struct dst_entry *bundles;
205 __u16 family;
206 __u8 action;
207 __u8 flags;
208 __u8 dead;
209 __u8 xfrm_nr;
210 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
213 struct xfrm_mgr
215 struct list_head list;
216 char *id;
217 int (*notify)(struct xfrm_state *x, int event);
218 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
219 struct xfrm_policy *(*compile_policy)(int opt, u8 *data, int len, int *dir);
222 extern int xfrm_register_km(struct xfrm_mgr *km);
223 extern int xfrm_unregister_km(struct xfrm_mgr *km);
226 extern struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
228 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
230 if (policy)
231 atomic_inc(&policy->refcnt);
234 extern void __xfrm_policy_destroy(struct xfrm_policy *policy);
236 static inline void xfrm_pol_put(struct xfrm_policy *policy)
238 if (atomic_dec_and_test(&policy->refcnt))
239 __xfrm_policy_destroy(policy);
242 extern void __xfrm_state_destroy(struct xfrm_state *);
244 static inline void xfrm_state_put(struct xfrm_state *x)
246 if (atomic_dec_and_test(&x->refcnt))
247 __xfrm_state_destroy(x);
250 static inline int
251 xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
253 return !((fl->fl4_dst^sel->daddr.xfrm4_addr)&sel->daddr.xfrm4_mask) &&
254 !((fl->uli_u.ports.dport^sel->dport)&sel->dport_mask) &&
255 !((fl->uli_u.ports.sport^sel->sport)&sel->sport_mask) &&
256 (fl->proto == sel->proto || !sel->proto) &&
257 (fl->oif == sel->ifindex || !sel->ifindex) &&
258 !((fl->fl4_src^sel->saddr.xfrm4_addr)&sel->saddr.xfrm4_mask);
261 /* A struct encoding bundle of transformations to apply to some set of flow.
263 * dst->child points to the next element of bundle.
264 * dst->xfrm points to an instanse of transformer.
266 * Due to unfortunate limitations of current routing cache, which we
267 * have no time to fix, it mirrors struct rtable and bound to the same
268 * routing key, including saddr,daddr. However, we can have many of
269 * bundles differing by session id. All the bundles grow from a parent
270 * policy rule.
272 struct xfrm_dst
274 union {
275 struct xfrm_dst *next;
276 struct dst_entry dst;
277 struct rtable rt;
278 } u;
281 struct sec_path
283 atomic_t refcnt;
284 int len;
285 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
288 static inline struct sec_path *
289 secpath_get(struct sec_path *sp)
291 if (sp)
292 atomic_inc(&sp->refcnt);
293 return sp;
296 extern void __secpath_destroy(struct sec_path *sp);
298 static inline void
299 secpath_put(struct sec_path *sp)
301 if (sp && atomic_dec_and_test(&sp->refcnt))
302 __secpath_destroy(sp);
305 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb);
307 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
309 if (sk && sk->policy[XFRM_POLICY_IN])
310 return __xfrm_policy_check(sk, dir, skb);
312 return !xfrm_policy_list[dir] ||
313 (skb->dst->flags & DST_NOPOLICY) ||
314 __xfrm_policy_check(sk, dir, skb);
317 extern int __xfrm_route_forward(struct sk_buff *skb);
319 static inline int xfrm_route_forward(struct sk_buff *skb)
321 return !xfrm_policy_list[XFRM_POLICY_OUT] ||
322 (skb->dst->flags & DST_NOXFRM) ||
323 __xfrm_route_forward(skb);
326 extern int __xfrm_sk_clone_policy(struct sock *sk);
328 static inline int xfrm_sk_clone_policy(struct sock *sk)
330 if (unlikely(sk->policy[0] || sk->policy[1]))
331 return xfrm_sk_clone_policy(sk);
332 return 0;
335 extern void __xfrm_sk_free_policy(struct xfrm_policy *, int dir);
337 static inline void xfrm_sk_free_policy(struct sock *sk)
339 if (unlikely(sk->policy[0] != NULL)) {
340 __xfrm_sk_free_policy(sk->policy[0], 0);
341 sk->policy[0] = NULL;
343 if (unlikely(sk->policy[1] != NULL)) {
344 __xfrm_sk_free_policy(sk->policy[1], 1);
345 sk->policy[1] = NULL;
349 extern void xfrm_state_init(void);
350 extern void xfrm_input_init(void);
351 extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *);
352 extern struct xfrm_state *xfrm_state_alloc(void);
353 extern struct xfrm_state *xfrm_state_find(u32 daddr, u32 saddr, struct flowi *fl, struct xfrm_tmpl *tmpl,
354 struct xfrm_policy *pol, int *err);
355 extern int xfrm_state_check_expire(struct xfrm_state *x);
356 extern void xfrm_state_insert(struct xfrm_state *x);
357 extern int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb);
358 extern struct xfrm_state *xfrm_state_lookup(u32 daddr, u32 spi, u8 proto);
359 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
360 extern void xfrm_state_delete(struct xfrm_state *x);
361 extern void xfrm_state_flush(u8 proto);
362 extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
363 extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
364 extern int xfrm_check_selectors(struct xfrm_state **x, int n, struct flowi *fl);
365 extern int xfrm4_rcv(struct sk_buff *skb);
366 extern int xfrm_user_policy(struct sock *sk, int optname, u8 *optval, int optlen);
368 struct xfrm_policy *xfrm_policy_alloc(int gfp);
369 extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *);
370 struct xfrm_policy *xfrm_policy_lookup(int dir, struct flowi *fl);
371 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
372 struct xfrm_policy *xfrm_policy_delete(int dir, struct xfrm_selector *sel);
373 struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete);
374 void xfrm_policy_flush(void);
375 void xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
376 struct xfrm_state * xfrm_find_acq(u8 mode, u16 reqid, u8 proto, u32 daddr, u32 saddr, int create);
377 extern void xfrm_policy_flush(void);
378 extern void xfrm_policy_kill(struct xfrm_policy *);
379 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
380 extern struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl);
381 extern int xfrm_flush_bundles(struct xfrm_state *x);
383 extern wait_queue_head_t km_waitq;
384 extern void km_warn_expired(struct xfrm_state *x);
385 extern void km_expired(struct xfrm_state *x);
386 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *pol);