xfrm: Move IPsec replay detection functions to a separate file
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / xfrm / xfrm_user.c
blobf7b3c857c9893f91bda07e9a31380c8f5f98d177
1 /* xfrm_user.c: User interface to configure xfrm engine.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/pfkeyv2.h>
23 #include <linux/ipsec.h>
24 #include <linux/init.h>
25 #include <linux/security.h>
26 #include <net/sock.h>
27 #include <net/xfrm.h>
28 #include <net/netlink.h>
29 #include <net/ah.h>
30 #include <asm/uaccess.h>
31 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
32 #include <linux/in6.h>
33 #endif
35 static inline int aead_len(struct xfrm_algo_aead *alg)
37 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
40 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
42 struct nlattr *rt = attrs[type];
43 struct xfrm_algo *algp;
45 if (!rt)
46 return 0;
48 algp = nla_data(rt);
49 if (nla_len(rt) < xfrm_alg_len(algp))
50 return -EINVAL;
52 switch (type) {
53 case XFRMA_ALG_AUTH:
54 case XFRMA_ALG_CRYPT:
55 case XFRMA_ALG_COMP:
56 break;
58 default:
59 return -EINVAL;
62 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
63 return 0;
66 static int verify_auth_trunc(struct nlattr **attrs)
68 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
69 struct xfrm_algo_auth *algp;
71 if (!rt)
72 return 0;
74 algp = nla_data(rt);
75 if (nla_len(rt) < xfrm_alg_auth_len(algp))
76 return -EINVAL;
78 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
79 return 0;
82 static int verify_aead(struct nlattr **attrs)
84 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
85 struct xfrm_algo_aead *algp;
87 if (!rt)
88 return 0;
90 algp = nla_data(rt);
91 if (nla_len(rt) < aead_len(algp))
92 return -EINVAL;
94 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
95 return 0;
98 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
99 xfrm_address_t **addrp)
101 struct nlattr *rt = attrs[type];
103 if (rt && addrp)
104 *addrp = nla_data(rt);
107 static inline int verify_sec_ctx_len(struct nlattr **attrs)
109 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
110 struct xfrm_user_sec_ctx *uctx;
112 if (!rt)
113 return 0;
115 uctx = nla_data(rt);
116 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
117 return -EINVAL;
119 return 0;
123 static int verify_newsa_info(struct xfrm_usersa_info *p,
124 struct nlattr **attrs)
126 int err;
128 err = -EINVAL;
129 switch (p->family) {
130 case AF_INET:
131 break;
133 case AF_INET6:
134 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
135 break;
136 #else
137 err = -EAFNOSUPPORT;
138 goto out;
139 #endif
141 default:
142 goto out;
145 err = -EINVAL;
146 switch (p->id.proto) {
147 case IPPROTO_AH:
148 if ((!attrs[XFRMA_ALG_AUTH] &&
149 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
150 attrs[XFRMA_ALG_AEAD] ||
151 attrs[XFRMA_ALG_CRYPT] ||
152 attrs[XFRMA_ALG_COMP] ||
153 attrs[XFRMA_TFCPAD])
154 goto out;
155 break;
157 case IPPROTO_ESP:
158 if (attrs[XFRMA_ALG_COMP])
159 goto out;
160 if (!attrs[XFRMA_ALG_AUTH] &&
161 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
162 !attrs[XFRMA_ALG_CRYPT] &&
163 !attrs[XFRMA_ALG_AEAD])
164 goto out;
165 if ((attrs[XFRMA_ALG_AUTH] ||
166 attrs[XFRMA_ALG_AUTH_TRUNC] ||
167 attrs[XFRMA_ALG_CRYPT]) &&
168 attrs[XFRMA_ALG_AEAD])
169 goto out;
170 if (attrs[XFRMA_TFCPAD] &&
171 p->mode != XFRM_MODE_TUNNEL)
172 goto out;
173 break;
175 case IPPROTO_COMP:
176 if (!attrs[XFRMA_ALG_COMP] ||
177 attrs[XFRMA_ALG_AEAD] ||
178 attrs[XFRMA_ALG_AUTH] ||
179 attrs[XFRMA_ALG_AUTH_TRUNC] ||
180 attrs[XFRMA_ALG_CRYPT] ||
181 attrs[XFRMA_TFCPAD])
182 goto out;
183 break;
185 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
186 case IPPROTO_DSTOPTS:
187 case IPPROTO_ROUTING:
188 if (attrs[XFRMA_ALG_COMP] ||
189 attrs[XFRMA_ALG_AUTH] ||
190 attrs[XFRMA_ALG_AUTH_TRUNC] ||
191 attrs[XFRMA_ALG_AEAD] ||
192 attrs[XFRMA_ALG_CRYPT] ||
193 attrs[XFRMA_ENCAP] ||
194 attrs[XFRMA_SEC_CTX] ||
195 attrs[XFRMA_TFCPAD] ||
196 !attrs[XFRMA_COADDR])
197 goto out;
198 break;
199 #endif
201 default:
202 goto out;
205 if ((err = verify_aead(attrs)))
206 goto out;
207 if ((err = verify_auth_trunc(attrs)))
208 goto out;
209 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
210 goto out;
211 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
212 goto out;
213 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
214 goto out;
215 if ((err = verify_sec_ctx_len(attrs)))
216 goto out;
218 err = -EINVAL;
219 switch (p->mode) {
220 case XFRM_MODE_TRANSPORT:
221 case XFRM_MODE_TUNNEL:
222 case XFRM_MODE_ROUTEOPTIMIZATION:
223 case XFRM_MODE_BEET:
224 break;
226 default:
227 goto out;
230 err = 0;
232 out:
233 return err;
236 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
237 struct xfrm_algo_desc *(*get_byname)(const char *, int),
238 struct nlattr *rta)
240 struct xfrm_algo *p, *ualg;
241 struct xfrm_algo_desc *algo;
243 if (!rta)
244 return 0;
246 ualg = nla_data(rta);
248 algo = get_byname(ualg->alg_name, 1);
249 if (!algo)
250 return -ENOSYS;
251 *props = algo->desc.sadb_alg_id;
253 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
254 if (!p)
255 return -ENOMEM;
257 strcpy(p->alg_name, algo->name);
258 *algpp = p;
259 return 0;
262 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
263 struct nlattr *rta)
265 struct xfrm_algo *ualg;
266 struct xfrm_algo_auth *p;
267 struct xfrm_algo_desc *algo;
269 if (!rta)
270 return 0;
272 ualg = nla_data(rta);
274 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
275 if (!algo)
276 return -ENOSYS;
277 *props = algo->desc.sadb_alg_id;
279 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
280 if (!p)
281 return -ENOMEM;
283 strcpy(p->alg_name, algo->name);
284 p->alg_key_len = ualg->alg_key_len;
285 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
286 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
288 *algpp = p;
289 return 0;
292 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
293 struct nlattr *rta)
295 struct xfrm_algo_auth *p, *ualg;
296 struct xfrm_algo_desc *algo;
298 if (!rta)
299 return 0;
301 ualg = nla_data(rta);
303 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
304 if (!algo)
305 return -ENOSYS;
306 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
307 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
308 return -EINVAL;
309 *props = algo->desc.sadb_alg_id;
311 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
312 if (!p)
313 return -ENOMEM;
315 strcpy(p->alg_name, algo->name);
316 if (!p->alg_trunc_len)
317 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
319 *algpp = p;
320 return 0;
323 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
324 struct nlattr *rta)
326 struct xfrm_algo_aead *p, *ualg;
327 struct xfrm_algo_desc *algo;
329 if (!rta)
330 return 0;
332 ualg = nla_data(rta);
334 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
335 if (!algo)
336 return -ENOSYS;
337 *props = algo->desc.sadb_alg_id;
339 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
340 if (!p)
341 return -ENOMEM;
343 strcpy(p->alg_name, algo->name);
344 *algpp = p;
345 return 0;
348 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
350 int len = 0;
352 if (xfrm_ctx) {
353 len += sizeof(struct xfrm_user_sec_ctx);
354 len += xfrm_ctx->ctx_len;
356 return len;
359 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
361 memcpy(&x->id, &p->id, sizeof(x->id));
362 memcpy(&x->sel, &p->sel, sizeof(x->sel));
363 memcpy(&x->lft, &p->lft, sizeof(x->lft));
364 x->props.mode = p->mode;
365 x->props.replay_window = p->replay_window;
366 x->props.reqid = p->reqid;
367 x->props.family = p->family;
368 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
369 x->props.flags = p->flags;
371 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
372 x->sel.family = p->family;
376 * someday when pfkey also has support, we could have the code
377 * somehow made shareable and move it to xfrm_state.c - JHS
380 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
382 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
383 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
384 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
385 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
387 if (rp) {
388 struct xfrm_replay_state *replay;
389 replay = nla_data(rp);
390 memcpy(&x->replay, replay, sizeof(*replay));
391 memcpy(&x->preplay, replay, sizeof(*replay));
394 if (lt) {
395 struct xfrm_lifetime_cur *ltime;
396 ltime = nla_data(lt);
397 x->curlft.bytes = ltime->bytes;
398 x->curlft.packets = ltime->packets;
399 x->curlft.add_time = ltime->add_time;
400 x->curlft.use_time = ltime->use_time;
403 if (et)
404 x->replay_maxage = nla_get_u32(et);
406 if (rt)
407 x->replay_maxdiff = nla_get_u32(rt);
410 static struct xfrm_state *xfrm_state_construct(struct net *net,
411 struct xfrm_usersa_info *p,
412 struct nlattr **attrs,
413 int *errp)
415 struct xfrm_state *x = xfrm_state_alloc(net);
416 int err = -ENOMEM;
418 if (!x)
419 goto error_no_put;
421 copy_from_user_state(x, p);
423 if ((err = attach_aead(&x->aead, &x->props.ealgo,
424 attrs[XFRMA_ALG_AEAD])))
425 goto error;
426 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
427 attrs[XFRMA_ALG_AUTH_TRUNC])))
428 goto error;
429 if (!x->props.aalgo) {
430 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
431 attrs[XFRMA_ALG_AUTH])))
432 goto error;
434 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
435 xfrm_ealg_get_byname,
436 attrs[XFRMA_ALG_CRYPT])))
437 goto error;
438 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
439 xfrm_calg_get_byname,
440 attrs[XFRMA_ALG_COMP])))
441 goto error;
443 if (attrs[XFRMA_ENCAP]) {
444 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
445 sizeof(*x->encap), GFP_KERNEL);
446 if (x->encap == NULL)
447 goto error;
450 if (attrs[XFRMA_TFCPAD])
451 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
453 if (attrs[XFRMA_COADDR]) {
454 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
455 sizeof(*x->coaddr), GFP_KERNEL);
456 if (x->coaddr == NULL)
457 goto error;
460 xfrm_mark_get(attrs, &x->mark);
462 err = xfrm_init_state(x);
463 if (err)
464 goto error;
466 if (attrs[XFRMA_SEC_CTX] &&
467 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
468 goto error;
470 x->km.seq = p->seq;
471 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
472 /* sysctl_xfrm_aevent_etime is in 100ms units */
473 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
474 x->preplay.bitmap = 0;
475 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
476 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
478 if ((err = xfrm_init_replay(x)))
479 goto error;
481 /* override default values from above */
482 xfrm_update_ae_params(x, attrs);
484 return x;
486 error:
487 x->km.state = XFRM_STATE_DEAD;
488 xfrm_state_put(x);
489 error_no_put:
490 *errp = err;
491 return NULL;
494 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
495 struct nlattr **attrs)
497 struct net *net = sock_net(skb->sk);
498 struct xfrm_usersa_info *p = nlmsg_data(nlh);
499 struct xfrm_state *x;
500 int err;
501 struct km_event c;
502 uid_t loginuid = audit_get_loginuid(current);
503 u32 sessionid = audit_get_sessionid(current);
504 u32 sid;
506 err = verify_newsa_info(p, attrs);
507 if (err)
508 return err;
510 x = xfrm_state_construct(net, p, attrs, &err);
511 if (!x)
512 return err;
514 xfrm_state_hold(x);
515 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
516 err = xfrm_state_add(x);
517 else
518 err = xfrm_state_update(x);
520 security_task_getsecid(current, &sid);
521 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
523 if (err < 0) {
524 x->km.state = XFRM_STATE_DEAD;
525 __xfrm_state_put(x);
526 goto out;
529 c.seq = nlh->nlmsg_seq;
530 c.pid = nlh->nlmsg_pid;
531 c.event = nlh->nlmsg_type;
533 km_state_notify(x, &c);
534 out:
535 xfrm_state_put(x);
536 return err;
539 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
540 struct xfrm_usersa_id *p,
541 struct nlattr **attrs,
542 int *errp)
544 struct xfrm_state *x = NULL;
545 struct xfrm_mark m;
546 int err;
547 u32 mark = xfrm_mark_get(attrs, &m);
549 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
550 err = -ESRCH;
551 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
552 } else {
553 xfrm_address_t *saddr = NULL;
555 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
556 if (!saddr) {
557 err = -EINVAL;
558 goto out;
561 err = -ESRCH;
562 x = xfrm_state_lookup_byaddr(net, mark,
563 &p->daddr, saddr,
564 p->proto, p->family);
567 out:
568 if (!x && errp)
569 *errp = err;
570 return x;
573 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
574 struct nlattr **attrs)
576 struct net *net = sock_net(skb->sk);
577 struct xfrm_state *x;
578 int err = -ESRCH;
579 struct km_event c;
580 struct xfrm_usersa_id *p = nlmsg_data(nlh);
581 uid_t loginuid = audit_get_loginuid(current);
582 u32 sessionid = audit_get_sessionid(current);
583 u32 sid;
585 x = xfrm_user_state_lookup(net, p, attrs, &err);
586 if (x == NULL)
587 return err;
589 if ((err = security_xfrm_state_delete(x)) != 0)
590 goto out;
592 if (xfrm_state_kern(x)) {
593 err = -EPERM;
594 goto out;
597 err = xfrm_state_delete(x);
599 if (err < 0)
600 goto out;
602 c.seq = nlh->nlmsg_seq;
603 c.pid = nlh->nlmsg_pid;
604 c.event = nlh->nlmsg_type;
605 km_state_notify(x, &c);
607 out:
608 security_task_getsecid(current, &sid);
609 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
610 xfrm_state_put(x);
611 return err;
614 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
616 memcpy(&p->id, &x->id, sizeof(p->id));
617 memcpy(&p->sel, &x->sel, sizeof(p->sel));
618 memcpy(&p->lft, &x->lft, sizeof(p->lft));
619 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
620 memcpy(&p->stats, &x->stats, sizeof(p->stats));
621 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
622 p->mode = x->props.mode;
623 p->replay_window = x->props.replay_window;
624 p->reqid = x->props.reqid;
625 p->family = x->props.family;
626 p->flags = x->props.flags;
627 p->seq = x->km.seq;
630 struct xfrm_dump_info {
631 struct sk_buff *in_skb;
632 struct sk_buff *out_skb;
633 u32 nlmsg_seq;
634 u16 nlmsg_flags;
637 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
639 struct xfrm_user_sec_ctx *uctx;
640 struct nlattr *attr;
641 int ctx_size = sizeof(*uctx) + s->ctx_len;
643 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
644 if (attr == NULL)
645 return -EMSGSIZE;
647 uctx = nla_data(attr);
648 uctx->exttype = XFRMA_SEC_CTX;
649 uctx->len = ctx_size;
650 uctx->ctx_doi = s->ctx_doi;
651 uctx->ctx_alg = s->ctx_alg;
652 uctx->ctx_len = s->ctx_len;
653 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
655 return 0;
658 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
660 struct xfrm_algo *algo;
661 struct nlattr *nla;
663 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
664 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
665 if (!nla)
666 return -EMSGSIZE;
668 algo = nla_data(nla);
669 strcpy(algo->alg_name, auth->alg_name);
670 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
671 algo->alg_key_len = auth->alg_key_len;
673 return 0;
676 /* Don't change this without updating xfrm_sa_len! */
677 static int copy_to_user_state_extra(struct xfrm_state *x,
678 struct xfrm_usersa_info *p,
679 struct sk_buff *skb)
681 copy_to_user_state(x, p);
683 if (x->coaddr)
684 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
686 if (x->lastused)
687 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
689 if (x->aead)
690 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
691 if (x->aalg) {
692 if (copy_to_user_auth(x->aalg, skb))
693 goto nla_put_failure;
695 NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
696 xfrm_alg_auth_len(x->aalg), x->aalg);
698 if (x->ealg)
699 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
700 if (x->calg)
701 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
703 if (x->encap)
704 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
706 if (x->tfcpad)
707 NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
709 if (xfrm_mark_put(skb, &x->mark))
710 goto nla_put_failure;
712 if (x->security && copy_sec_ctx(x->security, skb) < 0)
713 goto nla_put_failure;
715 return 0;
717 nla_put_failure:
718 return -EMSGSIZE;
721 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
723 struct xfrm_dump_info *sp = ptr;
724 struct sk_buff *in_skb = sp->in_skb;
725 struct sk_buff *skb = sp->out_skb;
726 struct xfrm_usersa_info *p;
727 struct nlmsghdr *nlh;
728 int err;
730 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
731 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
732 if (nlh == NULL)
733 return -EMSGSIZE;
735 p = nlmsg_data(nlh);
737 err = copy_to_user_state_extra(x, p, skb);
738 if (err)
739 goto nla_put_failure;
741 nlmsg_end(skb, nlh);
742 return 0;
744 nla_put_failure:
745 nlmsg_cancel(skb, nlh);
746 return err;
749 static int xfrm_dump_sa_done(struct netlink_callback *cb)
751 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
752 xfrm_state_walk_done(walk);
753 return 0;
756 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
758 struct net *net = sock_net(skb->sk);
759 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
760 struct xfrm_dump_info info;
762 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
763 sizeof(cb->args) - sizeof(cb->args[0]));
765 info.in_skb = cb->skb;
766 info.out_skb = skb;
767 info.nlmsg_seq = cb->nlh->nlmsg_seq;
768 info.nlmsg_flags = NLM_F_MULTI;
770 if (!cb->args[0]) {
771 cb->args[0] = 1;
772 xfrm_state_walk_init(walk, 0);
775 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
777 return skb->len;
780 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
781 struct xfrm_state *x, u32 seq)
783 struct xfrm_dump_info info;
784 struct sk_buff *skb;
786 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
787 if (!skb)
788 return ERR_PTR(-ENOMEM);
790 info.in_skb = in_skb;
791 info.out_skb = skb;
792 info.nlmsg_seq = seq;
793 info.nlmsg_flags = 0;
795 if (dump_one_state(x, 0, &info)) {
796 kfree_skb(skb);
797 return NULL;
800 return skb;
803 static inline size_t xfrm_spdinfo_msgsize(void)
805 return NLMSG_ALIGN(4)
806 + nla_total_size(sizeof(struct xfrmu_spdinfo))
807 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
810 static int build_spdinfo(struct sk_buff *skb, struct net *net,
811 u32 pid, u32 seq, u32 flags)
813 struct xfrmk_spdinfo si;
814 struct xfrmu_spdinfo spc;
815 struct xfrmu_spdhinfo sph;
816 struct nlmsghdr *nlh;
817 u32 *f;
819 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
820 if (nlh == NULL) /* shouldnt really happen ... */
821 return -EMSGSIZE;
823 f = nlmsg_data(nlh);
824 *f = flags;
825 xfrm_spd_getinfo(net, &si);
826 spc.incnt = si.incnt;
827 spc.outcnt = si.outcnt;
828 spc.fwdcnt = si.fwdcnt;
829 spc.inscnt = si.inscnt;
830 spc.outscnt = si.outscnt;
831 spc.fwdscnt = si.fwdscnt;
832 sph.spdhcnt = si.spdhcnt;
833 sph.spdhmcnt = si.spdhmcnt;
835 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
836 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
838 return nlmsg_end(skb, nlh);
840 nla_put_failure:
841 nlmsg_cancel(skb, nlh);
842 return -EMSGSIZE;
845 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
846 struct nlattr **attrs)
848 struct net *net = sock_net(skb->sk);
849 struct sk_buff *r_skb;
850 u32 *flags = nlmsg_data(nlh);
851 u32 spid = NETLINK_CB(skb).pid;
852 u32 seq = nlh->nlmsg_seq;
854 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
855 if (r_skb == NULL)
856 return -ENOMEM;
858 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
859 BUG();
861 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
864 static inline size_t xfrm_sadinfo_msgsize(void)
866 return NLMSG_ALIGN(4)
867 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
868 + nla_total_size(4); /* XFRMA_SAD_CNT */
871 static int build_sadinfo(struct sk_buff *skb, struct net *net,
872 u32 pid, u32 seq, u32 flags)
874 struct xfrmk_sadinfo si;
875 struct xfrmu_sadhinfo sh;
876 struct nlmsghdr *nlh;
877 u32 *f;
879 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
880 if (nlh == NULL) /* shouldnt really happen ... */
881 return -EMSGSIZE;
883 f = nlmsg_data(nlh);
884 *f = flags;
885 xfrm_sad_getinfo(net, &si);
887 sh.sadhmcnt = si.sadhmcnt;
888 sh.sadhcnt = si.sadhcnt;
890 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
891 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
893 return nlmsg_end(skb, nlh);
895 nla_put_failure:
896 nlmsg_cancel(skb, nlh);
897 return -EMSGSIZE;
900 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
901 struct nlattr **attrs)
903 struct net *net = sock_net(skb->sk);
904 struct sk_buff *r_skb;
905 u32 *flags = nlmsg_data(nlh);
906 u32 spid = NETLINK_CB(skb).pid;
907 u32 seq = nlh->nlmsg_seq;
909 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
910 if (r_skb == NULL)
911 return -ENOMEM;
913 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
914 BUG();
916 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
919 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
920 struct nlattr **attrs)
922 struct net *net = sock_net(skb->sk);
923 struct xfrm_usersa_id *p = nlmsg_data(nlh);
924 struct xfrm_state *x;
925 struct sk_buff *resp_skb;
926 int err = -ESRCH;
928 x = xfrm_user_state_lookup(net, p, attrs, &err);
929 if (x == NULL)
930 goto out_noput;
932 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
933 if (IS_ERR(resp_skb)) {
934 err = PTR_ERR(resp_skb);
935 } else {
936 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
938 xfrm_state_put(x);
939 out_noput:
940 return err;
943 static int verify_userspi_info(struct xfrm_userspi_info *p)
945 switch (p->info.id.proto) {
946 case IPPROTO_AH:
947 case IPPROTO_ESP:
948 break;
950 case IPPROTO_COMP:
951 /* IPCOMP spi is 16-bits. */
952 if (p->max >= 0x10000)
953 return -EINVAL;
954 break;
956 default:
957 return -EINVAL;
960 if (p->min > p->max)
961 return -EINVAL;
963 return 0;
966 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
967 struct nlattr **attrs)
969 struct net *net = sock_net(skb->sk);
970 struct xfrm_state *x;
971 struct xfrm_userspi_info *p;
972 struct sk_buff *resp_skb;
973 xfrm_address_t *daddr;
974 int family;
975 int err;
976 u32 mark;
977 struct xfrm_mark m;
979 p = nlmsg_data(nlh);
980 err = verify_userspi_info(p);
981 if (err)
982 goto out_noput;
984 family = p->info.family;
985 daddr = &p->info.id.daddr;
987 x = NULL;
989 mark = xfrm_mark_get(attrs, &m);
990 if (p->info.seq) {
991 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
992 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
993 xfrm_state_put(x);
994 x = NULL;
998 if (!x)
999 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1000 p->info.id.proto, daddr,
1001 &p->info.saddr, 1,
1002 family);
1003 err = -ENOENT;
1004 if (x == NULL)
1005 goto out_noput;
1007 err = xfrm_alloc_spi(x, p->min, p->max);
1008 if (err)
1009 goto out;
1011 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1012 if (IS_ERR(resp_skb)) {
1013 err = PTR_ERR(resp_skb);
1014 goto out;
1017 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
1019 out:
1020 xfrm_state_put(x);
1021 out_noput:
1022 return err;
1025 static int verify_policy_dir(u8 dir)
1027 switch (dir) {
1028 case XFRM_POLICY_IN:
1029 case XFRM_POLICY_OUT:
1030 case XFRM_POLICY_FWD:
1031 break;
1033 default:
1034 return -EINVAL;
1037 return 0;
1040 static int verify_policy_type(u8 type)
1042 switch (type) {
1043 case XFRM_POLICY_TYPE_MAIN:
1044 #ifdef CONFIG_XFRM_SUB_POLICY
1045 case XFRM_POLICY_TYPE_SUB:
1046 #endif
1047 break;
1049 default:
1050 return -EINVAL;
1053 return 0;
1056 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1058 switch (p->share) {
1059 case XFRM_SHARE_ANY:
1060 case XFRM_SHARE_SESSION:
1061 case XFRM_SHARE_USER:
1062 case XFRM_SHARE_UNIQUE:
1063 break;
1065 default:
1066 return -EINVAL;
1069 switch (p->action) {
1070 case XFRM_POLICY_ALLOW:
1071 case XFRM_POLICY_BLOCK:
1072 break;
1074 default:
1075 return -EINVAL;
1078 switch (p->sel.family) {
1079 case AF_INET:
1080 break;
1082 case AF_INET6:
1083 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1084 break;
1085 #else
1086 return -EAFNOSUPPORT;
1087 #endif
1089 default:
1090 return -EINVAL;
1093 return verify_policy_dir(p->dir);
1096 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1098 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1099 struct xfrm_user_sec_ctx *uctx;
1101 if (!rt)
1102 return 0;
1104 uctx = nla_data(rt);
1105 return security_xfrm_policy_alloc(&pol->security, uctx);
1108 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1109 int nr)
1111 int i;
1113 xp->xfrm_nr = nr;
1114 for (i = 0; i < nr; i++, ut++) {
1115 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1117 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1118 memcpy(&t->saddr, &ut->saddr,
1119 sizeof(xfrm_address_t));
1120 t->reqid = ut->reqid;
1121 t->mode = ut->mode;
1122 t->share = ut->share;
1123 t->optional = ut->optional;
1124 t->aalgos = ut->aalgos;
1125 t->ealgos = ut->ealgos;
1126 t->calgos = ut->calgos;
1127 /* If all masks are ~0, then we allow all algorithms. */
1128 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1129 t->encap_family = ut->family;
1133 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1135 int i;
1137 if (nr > XFRM_MAX_DEPTH)
1138 return -EINVAL;
1140 for (i = 0; i < nr; i++) {
1141 /* We never validated the ut->family value, so many
1142 * applications simply leave it at zero. The check was
1143 * never made and ut->family was ignored because all
1144 * templates could be assumed to have the same family as
1145 * the policy itself. Now that we will have ipv4-in-ipv6
1146 * and ipv6-in-ipv4 tunnels, this is no longer true.
1148 if (!ut[i].family)
1149 ut[i].family = family;
1151 switch (ut[i].family) {
1152 case AF_INET:
1153 break;
1154 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1155 case AF_INET6:
1156 break;
1157 #endif
1158 default:
1159 return -EINVAL;
1163 return 0;
1166 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1168 struct nlattr *rt = attrs[XFRMA_TMPL];
1170 if (!rt) {
1171 pol->xfrm_nr = 0;
1172 } else {
1173 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1174 int nr = nla_len(rt) / sizeof(*utmpl);
1175 int err;
1177 err = validate_tmpl(nr, utmpl, pol->family);
1178 if (err)
1179 return err;
1181 copy_templates(pol, utmpl, nr);
1183 return 0;
1186 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1188 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1189 struct xfrm_userpolicy_type *upt;
1190 u8 type = XFRM_POLICY_TYPE_MAIN;
1191 int err;
1193 if (rt) {
1194 upt = nla_data(rt);
1195 type = upt->type;
1198 err = verify_policy_type(type);
1199 if (err)
1200 return err;
1202 *tp = type;
1203 return 0;
1206 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1208 xp->priority = p->priority;
1209 xp->index = p->index;
1210 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1211 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1212 xp->action = p->action;
1213 xp->flags = p->flags;
1214 xp->family = p->sel.family;
1215 /* XXX xp->share = p->share; */
1218 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1220 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1221 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1222 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1223 p->priority = xp->priority;
1224 p->index = xp->index;
1225 p->sel.family = xp->family;
1226 p->dir = dir;
1227 p->action = xp->action;
1228 p->flags = xp->flags;
1229 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1232 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1234 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1235 int err;
1237 if (!xp) {
1238 *errp = -ENOMEM;
1239 return NULL;
1242 copy_from_user_policy(xp, p);
1244 err = copy_from_user_policy_type(&xp->type, attrs);
1245 if (err)
1246 goto error;
1248 if (!(err = copy_from_user_tmpl(xp, attrs)))
1249 err = copy_from_user_sec_ctx(xp, attrs);
1250 if (err)
1251 goto error;
1253 xfrm_mark_get(attrs, &xp->mark);
1255 return xp;
1256 error:
1257 *errp = err;
1258 xp->walk.dead = 1;
1259 xfrm_policy_destroy(xp);
1260 return NULL;
1263 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1264 struct nlattr **attrs)
1266 struct net *net = sock_net(skb->sk);
1267 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1268 struct xfrm_policy *xp;
1269 struct km_event c;
1270 int err;
1271 int excl;
1272 uid_t loginuid = audit_get_loginuid(current);
1273 u32 sessionid = audit_get_sessionid(current);
1274 u32 sid;
1276 err = verify_newpolicy_info(p);
1277 if (err)
1278 return err;
1279 err = verify_sec_ctx_len(attrs);
1280 if (err)
1281 return err;
1283 xp = xfrm_policy_construct(net, p, attrs, &err);
1284 if (!xp)
1285 return err;
1287 /* shouldnt excl be based on nlh flags??
1288 * Aha! this is anti-netlink really i.e more pfkey derived
1289 * in netlink excl is a flag and you wouldnt need
1290 * a type XFRM_MSG_UPDPOLICY - JHS */
1291 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1292 err = xfrm_policy_insert(p->dir, xp, excl);
1293 security_task_getsecid(current, &sid);
1294 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1296 if (err) {
1297 security_xfrm_policy_free(xp->security);
1298 kfree(xp);
1299 return err;
1302 c.event = nlh->nlmsg_type;
1303 c.seq = nlh->nlmsg_seq;
1304 c.pid = nlh->nlmsg_pid;
1305 km_policy_notify(xp, p->dir, &c);
1307 xfrm_pol_put(xp);
1309 return 0;
1312 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1314 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1315 int i;
1317 if (xp->xfrm_nr == 0)
1318 return 0;
1320 for (i = 0; i < xp->xfrm_nr; i++) {
1321 struct xfrm_user_tmpl *up = &vec[i];
1322 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1324 memcpy(&up->id, &kp->id, sizeof(up->id));
1325 up->family = kp->encap_family;
1326 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1327 up->reqid = kp->reqid;
1328 up->mode = kp->mode;
1329 up->share = kp->share;
1330 up->optional = kp->optional;
1331 up->aalgos = kp->aalgos;
1332 up->ealgos = kp->ealgos;
1333 up->calgos = kp->calgos;
1336 return nla_put(skb, XFRMA_TMPL,
1337 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1340 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1342 if (x->security) {
1343 return copy_sec_ctx(x->security, skb);
1345 return 0;
1348 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1350 if (xp->security) {
1351 return copy_sec_ctx(xp->security, skb);
1353 return 0;
1355 static inline size_t userpolicy_type_attrsize(void)
1357 #ifdef CONFIG_XFRM_SUB_POLICY
1358 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1359 #else
1360 return 0;
1361 #endif
1364 #ifdef CONFIG_XFRM_SUB_POLICY
1365 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1367 struct xfrm_userpolicy_type upt = {
1368 .type = type,
1371 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1374 #else
1375 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1377 return 0;
1379 #endif
1381 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1383 struct xfrm_dump_info *sp = ptr;
1384 struct xfrm_userpolicy_info *p;
1385 struct sk_buff *in_skb = sp->in_skb;
1386 struct sk_buff *skb = sp->out_skb;
1387 struct nlmsghdr *nlh;
1389 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1390 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1391 if (nlh == NULL)
1392 return -EMSGSIZE;
1394 p = nlmsg_data(nlh);
1395 copy_to_user_policy(xp, p, dir);
1396 if (copy_to_user_tmpl(xp, skb) < 0)
1397 goto nlmsg_failure;
1398 if (copy_to_user_sec_ctx(xp, skb))
1399 goto nlmsg_failure;
1400 if (copy_to_user_policy_type(xp->type, skb) < 0)
1401 goto nlmsg_failure;
1402 if (xfrm_mark_put(skb, &xp->mark))
1403 goto nla_put_failure;
1405 nlmsg_end(skb, nlh);
1406 return 0;
1408 nla_put_failure:
1409 nlmsg_failure:
1410 nlmsg_cancel(skb, nlh);
1411 return -EMSGSIZE;
1414 static int xfrm_dump_policy_done(struct netlink_callback *cb)
1416 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1418 xfrm_policy_walk_done(walk);
1419 return 0;
1422 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1424 struct net *net = sock_net(skb->sk);
1425 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1426 struct xfrm_dump_info info;
1428 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1429 sizeof(cb->args) - sizeof(cb->args[0]));
1431 info.in_skb = cb->skb;
1432 info.out_skb = skb;
1433 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1434 info.nlmsg_flags = NLM_F_MULTI;
1436 if (!cb->args[0]) {
1437 cb->args[0] = 1;
1438 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1441 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1443 return skb->len;
1446 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1447 struct xfrm_policy *xp,
1448 int dir, u32 seq)
1450 struct xfrm_dump_info info;
1451 struct sk_buff *skb;
1453 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1454 if (!skb)
1455 return ERR_PTR(-ENOMEM);
1457 info.in_skb = in_skb;
1458 info.out_skb = skb;
1459 info.nlmsg_seq = seq;
1460 info.nlmsg_flags = 0;
1462 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1463 kfree_skb(skb);
1464 return NULL;
1467 return skb;
1470 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1471 struct nlattr **attrs)
1473 struct net *net = sock_net(skb->sk);
1474 struct xfrm_policy *xp;
1475 struct xfrm_userpolicy_id *p;
1476 u8 type = XFRM_POLICY_TYPE_MAIN;
1477 int err;
1478 struct km_event c;
1479 int delete;
1480 struct xfrm_mark m;
1481 u32 mark = xfrm_mark_get(attrs, &m);
1483 p = nlmsg_data(nlh);
1484 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1486 err = copy_from_user_policy_type(&type, attrs);
1487 if (err)
1488 return err;
1490 err = verify_policy_dir(p->dir);
1491 if (err)
1492 return err;
1494 if (p->index)
1495 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1496 else {
1497 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1498 struct xfrm_sec_ctx *ctx;
1500 err = verify_sec_ctx_len(attrs);
1501 if (err)
1502 return err;
1504 ctx = NULL;
1505 if (rt) {
1506 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1508 err = security_xfrm_policy_alloc(&ctx, uctx);
1509 if (err)
1510 return err;
1512 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1513 ctx, delete, &err);
1514 security_xfrm_policy_free(ctx);
1516 if (xp == NULL)
1517 return -ENOENT;
1519 if (!delete) {
1520 struct sk_buff *resp_skb;
1522 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1523 if (IS_ERR(resp_skb)) {
1524 err = PTR_ERR(resp_skb);
1525 } else {
1526 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1527 NETLINK_CB(skb).pid);
1529 } else {
1530 uid_t loginuid = audit_get_loginuid(current);
1531 u32 sessionid = audit_get_sessionid(current);
1532 u32 sid;
1534 security_task_getsecid(current, &sid);
1535 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1536 sid);
1538 if (err != 0)
1539 goto out;
1541 c.data.byid = p->index;
1542 c.event = nlh->nlmsg_type;
1543 c.seq = nlh->nlmsg_seq;
1544 c.pid = nlh->nlmsg_pid;
1545 km_policy_notify(xp, p->dir, &c);
1548 out:
1549 xfrm_pol_put(xp);
1550 return err;
1553 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1554 struct nlattr **attrs)
1556 struct net *net = sock_net(skb->sk);
1557 struct km_event c;
1558 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1559 struct xfrm_audit audit_info;
1560 int err;
1562 audit_info.loginuid = audit_get_loginuid(current);
1563 audit_info.sessionid = audit_get_sessionid(current);
1564 security_task_getsecid(current, &audit_info.secid);
1565 err = xfrm_state_flush(net, p->proto, &audit_info);
1566 if (err) {
1567 if (err == -ESRCH) /* empty table */
1568 return 0;
1569 return err;
1571 c.data.proto = p->proto;
1572 c.event = nlh->nlmsg_type;
1573 c.seq = nlh->nlmsg_seq;
1574 c.pid = nlh->nlmsg_pid;
1575 c.net = net;
1576 km_state_notify(NULL, &c);
1578 return 0;
1581 static inline size_t xfrm_aevent_msgsize(void)
1583 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1584 + nla_total_size(sizeof(struct xfrm_replay_state))
1585 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1586 + nla_total_size(sizeof(struct xfrm_mark))
1587 + nla_total_size(4) /* XFRM_AE_RTHR */
1588 + nla_total_size(4); /* XFRM_AE_ETHR */
1591 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
1593 struct xfrm_aevent_id *id;
1594 struct nlmsghdr *nlh;
1596 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1597 if (nlh == NULL)
1598 return -EMSGSIZE;
1600 id = nlmsg_data(nlh);
1601 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1602 id->sa_id.spi = x->id.spi;
1603 id->sa_id.family = x->props.family;
1604 id->sa_id.proto = x->id.proto;
1605 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1606 id->reqid = x->props.reqid;
1607 id->flags = c->data.aevent;
1609 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1610 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1612 if (id->flags & XFRM_AE_RTHR)
1613 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1615 if (id->flags & XFRM_AE_ETHR)
1616 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1617 x->replay_maxage * 10 / HZ);
1619 if (xfrm_mark_put(skb, &x->mark))
1620 goto nla_put_failure;
1622 return nlmsg_end(skb, nlh);
1624 nla_put_failure:
1625 nlmsg_cancel(skb, nlh);
1626 return -EMSGSIZE;
1629 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1630 struct nlattr **attrs)
1632 struct net *net = sock_net(skb->sk);
1633 struct xfrm_state *x;
1634 struct sk_buff *r_skb;
1635 int err;
1636 struct km_event c;
1637 u32 mark;
1638 struct xfrm_mark m;
1639 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1640 struct xfrm_usersa_id *id = &p->sa_id;
1642 r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
1643 if (r_skb == NULL)
1644 return -ENOMEM;
1646 mark = xfrm_mark_get(attrs, &m);
1648 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1649 if (x == NULL) {
1650 kfree_skb(r_skb);
1651 return -ESRCH;
1655 * XXX: is this lock really needed - none of the other
1656 * gets lock (the concern is things getting updated
1657 * while we are still reading) - jhs
1659 spin_lock_bh(&x->lock);
1660 c.data.aevent = p->flags;
1661 c.seq = nlh->nlmsg_seq;
1662 c.pid = nlh->nlmsg_pid;
1664 if (build_aevent(r_skb, x, &c) < 0)
1665 BUG();
1666 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid);
1667 spin_unlock_bh(&x->lock);
1668 xfrm_state_put(x);
1669 return err;
1672 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1673 struct nlattr **attrs)
1675 struct net *net = sock_net(skb->sk);
1676 struct xfrm_state *x;
1677 struct km_event c;
1678 int err = - EINVAL;
1679 u32 mark = 0;
1680 struct xfrm_mark m;
1681 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1682 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1683 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1685 if (!lt && !rp)
1686 return err;
1688 /* pedantic mode - thou shalt sayeth replaceth */
1689 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1690 return err;
1692 mark = xfrm_mark_get(attrs, &m);
1694 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1695 if (x == NULL)
1696 return -ESRCH;
1698 if (x->km.state != XFRM_STATE_VALID)
1699 goto out;
1701 spin_lock_bh(&x->lock);
1702 xfrm_update_ae_params(x, attrs);
1703 spin_unlock_bh(&x->lock);
1705 c.event = nlh->nlmsg_type;
1706 c.seq = nlh->nlmsg_seq;
1707 c.pid = nlh->nlmsg_pid;
1708 c.data.aevent = XFRM_AE_CU;
1709 km_state_notify(x, &c);
1710 err = 0;
1711 out:
1712 xfrm_state_put(x);
1713 return err;
1716 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1717 struct nlattr **attrs)
1719 struct net *net = sock_net(skb->sk);
1720 struct km_event c;
1721 u8 type = XFRM_POLICY_TYPE_MAIN;
1722 int err;
1723 struct xfrm_audit audit_info;
1725 err = copy_from_user_policy_type(&type, attrs);
1726 if (err)
1727 return err;
1729 audit_info.loginuid = audit_get_loginuid(current);
1730 audit_info.sessionid = audit_get_sessionid(current);
1731 security_task_getsecid(current, &audit_info.secid);
1732 err = xfrm_policy_flush(net, type, &audit_info);
1733 if (err) {
1734 if (err == -ESRCH) /* empty table */
1735 return 0;
1736 return err;
1739 c.data.type = type;
1740 c.event = nlh->nlmsg_type;
1741 c.seq = nlh->nlmsg_seq;
1742 c.pid = nlh->nlmsg_pid;
1743 c.net = net;
1744 km_policy_notify(NULL, 0, &c);
1745 return 0;
1748 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1749 struct nlattr **attrs)
1751 struct net *net = sock_net(skb->sk);
1752 struct xfrm_policy *xp;
1753 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1754 struct xfrm_userpolicy_info *p = &up->pol;
1755 u8 type = XFRM_POLICY_TYPE_MAIN;
1756 int err = -ENOENT;
1757 struct xfrm_mark m;
1758 u32 mark = xfrm_mark_get(attrs, &m);
1760 err = copy_from_user_policy_type(&type, attrs);
1761 if (err)
1762 return err;
1764 err = verify_policy_dir(p->dir);
1765 if (err)
1766 return err;
1768 if (p->index)
1769 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1770 else {
1771 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1772 struct xfrm_sec_ctx *ctx;
1774 err = verify_sec_ctx_len(attrs);
1775 if (err)
1776 return err;
1778 ctx = NULL;
1779 if (rt) {
1780 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1782 err = security_xfrm_policy_alloc(&ctx, uctx);
1783 if (err)
1784 return err;
1786 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
1787 &p->sel, ctx, 0, &err);
1788 security_xfrm_policy_free(ctx);
1790 if (xp == NULL)
1791 return -ENOENT;
1793 if (unlikely(xp->walk.dead))
1794 goto out;
1796 err = 0;
1797 if (up->hard) {
1798 uid_t loginuid = audit_get_loginuid(current);
1799 u32 sessionid = audit_get_sessionid(current);
1800 u32 sid;
1802 security_task_getsecid(current, &sid);
1803 xfrm_policy_delete(xp, p->dir);
1804 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1806 } else {
1807 // reset the timers here?
1808 WARN(1, "Dont know what to do with soft policy expire\n");
1810 km_policy_expired(xp, p->dir, up->hard, current->pid);
1812 out:
1813 xfrm_pol_put(xp);
1814 return err;
1817 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1818 struct nlattr **attrs)
1820 struct net *net = sock_net(skb->sk);
1821 struct xfrm_state *x;
1822 int err;
1823 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1824 struct xfrm_usersa_info *p = &ue->state;
1825 struct xfrm_mark m;
1826 u32 mark = xfrm_mark_get(attrs, &m);
1828 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1830 err = -ENOENT;
1831 if (x == NULL)
1832 return err;
1834 spin_lock_bh(&x->lock);
1835 err = -EINVAL;
1836 if (x->km.state != XFRM_STATE_VALID)
1837 goto out;
1838 km_state_expired(x, ue->hard, current->pid);
1840 if (ue->hard) {
1841 uid_t loginuid = audit_get_loginuid(current);
1842 u32 sessionid = audit_get_sessionid(current);
1843 u32 sid;
1845 security_task_getsecid(current, &sid);
1846 __xfrm_state_delete(x);
1847 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
1849 err = 0;
1850 out:
1851 spin_unlock_bh(&x->lock);
1852 xfrm_state_put(x);
1853 return err;
1856 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1857 struct nlattr **attrs)
1859 struct net *net = sock_net(skb->sk);
1860 struct xfrm_policy *xp;
1861 struct xfrm_user_tmpl *ut;
1862 int i;
1863 struct nlattr *rt = attrs[XFRMA_TMPL];
1864 struct xfrm_mark mark;
1866 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1867 struct xfrm_state *x = xfrm_state_alloc(net);
1868 int err = -ENOMEM;
1870 if (!x)
1871 goto nomem;
1873 xfrm_mark_get(attrs, &mark);
1875 err = verify_newpolicy_info(&ua->policy);
1876 if (err)
1877 goto bad_policy;
1879 /* build an XP */
1880 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
1881 if (!xp)
1882 goto free_state;
1884 memcpy(&x->id, &ua->id, sizeof(ua->id));
1885 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1886 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1887 xp->mark.m = x->mark.m = mark.m;
1888 xp->mark.v = x->mark.v = mark.v;
1889 ut = nla_data(rt);
1890 /* extract the templates and for each call km_key */
1891 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1892 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1893 memcpy(&x->id, &t->id, sizeof(x->id));
1894 x->props.mode = t->mode;
1895 x->props.reqid = t->reqid;
1896 x->props.family = ut->family;
1897 t->aalgos = ua->aalgos;
1898 t->ealgos = ua->ealgos;
1899 t->calgos = ua->calgos;
1900 err = km_query(x, t, xp);
1904 kfree(x);
1905 kfree(xp);
1907 return 0;
1909 bad_policy:
1910 WARN(1, "BAD policy passed\n");
1911 free_state:
1912 kfree(x);
1913 nomem:
1914 return err;
1917 #ifdef CONFIG_XFRM_MIGRATE
1918 static int copy_from_user_migrate(struct xfrm_migrate *ma,
1919 struct xfrm_kmaddress *k,
1920 struct nlattr **attrs, int *num)
1922 struct nlattr *rt = attrs[XFRMA_MIGRATE];
1923 struct xfrm_user_migrate *um;
1924 int i, num_migrate;
1926 if (k != NULL) {
1927 struct xfrm_user_kmaddress *uk;
1929 uk = nla_data(attrs[XFRMA_KMADDRESS]);
1930 memcpy(&k->local, &uk->local, sizeof(k->local));
1931 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
1932 k->family = uk->family;
1933 k->reserved = uk->reserved;
1936 um = nla_data(rt);
1937 num_migrate = nla_len(rt) / sizeof(*um);
1939 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1940 return -EINVAL;
1942 for (i = 0; i < num_migrate; i++, um++, ma++) {
1943 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1944 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1945 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1946 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1948 ma->proto = um->proto;
1949 ma->mode = um->mode;
1950 ma->reqid = um->reqid;
1952 ma->old_family = um->old_family;
1953 ma->new_family = um->new_family;
1956 *num = i;
1957 return 0;
1960 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1961 struct nlattr **attrs)
1963 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
1964 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1965 struct xfrm_kmaddress km, *kmp;
1966 u8 type;
1967 int err;
1968 int n = 0;
1970 if (attrs[XFRMA_MIGRATE] == NULL)
1971 return -EINVAL;
1973 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
1975 err = copy_from_user_policy_type(&type, attrs);
1976 if (err)
1977 return err;
1979 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
1980 if (err)
1981 return err;
1983 if (!n)
1984 return 0;
1986 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp);
1988 return 0;
1990 #else
1991 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1992 struct nlattr **attrs)
1994 return -ENOPROTOOPT;
1996 #endif
1998 #ifdef CONFIG_XFRM_MIGRATE
1999 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
2001 struct xfrm_user_migrate um;
2003 memset(&um, 0, sizeof(um));
2004 um.proto = m->proto;
2005 um.mode = m->mode;
2006 um.reqid = m->reqid;
2007 um.old_family = m->old_family;
2008 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
2009 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
2010 um.new_family = m->new_family;
2011 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
2012 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
2014 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
2017 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
2019 struct xfrm_user_kmaddress uk;
2021 memset(&uk, 0, sizeof(uk));
2022 uk.family = k->family;
2023 uk.reserved = k->reserved;
2024 memcpy(&uk.local, &k->local, sizeof(uk.local));
2025 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
2027 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
2030 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
2032 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
2033 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
2034 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
2035 + userpolicy_type_attrsize();
2038 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2039 int num_migrate, const struct xfrm_kmaddress *k,
2040 const struct xfrm_selector *sel, u8 dir, u8 type)
2042 const struct xfrm_migrate *mp;
2043 struct xfrm_userpolicy_id *pol_id;
2044 struct nlmsghdr *nlh;
2045 int i;
2047 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2048 if (nlh == NULL)
2049 return -EMSGSIZE;
2051 pol_id = nlmsg_data(nlh);
2052 /* copy data from selector, dir, and type to the pol_id */
2053 memset(pol_id, 0, sizeof(*pol_id));
2054 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2055 pol_id->dir = dir;
2057 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0))
2058 goto nlmsg_failure;
2060 if (copy_to_user_policy_type(type, skb) < 0)
2061 goto nlmsg_failure;
2063 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2064 if (copy_to_user_migrate(mp, skb) < 0)
2065 goto nlmsg_failure;
2068 return nlmsg_end(skb, nlh);
2069 nlmsg_failure:
2070 nlmsg_cancel(skb, nlh);
2071 return -EMSGSIZE;
2074 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2075 const struct xfrm_migrate *m, int num_migrate,
2076 const struct xfrm_kmaddress *k)
2078 struct net *net = &init_net;
2079 struct sk_buff *skb;
2081 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
2082 if (skb == NULL)
2083 return -ENOMEM;
2085 /* build migrate */
2086 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2087 BUG();
2089 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
2091 #else
2092 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2093 const struct xfrm_migrate *m, int num_migrate,
2094 const struct xfrm_kmaddress *k)
2096 return -ENOPROTOOPT;
2098 #endif
2100 #define XMSGSIZE(type) sizeof(struct type)
2102 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2103 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2104 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2105 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2106 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2107 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2108 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2109 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
2110 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
2111 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
2112 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2113 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2114 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
2115 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
2116 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
2117 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2118 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2119 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
2120 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2121 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
2122 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2125 #undef XMSGSIZE
2127 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2128 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2129 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2130 [XFRMA_LASTUSED] = { .type = NLA_U64},
2131 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2132 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2133 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2134 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
2135 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
2136 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
2137 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
2138 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
2139 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
2140 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
2141 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
2142 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
2143 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
2144 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
2145 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2146 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2147 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2148 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2149 [XFRMA_TFCPAD] = { .type = NLA_U32 },
2152 static struct xfrm_link {
2153 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2154 int (*dump)(struct sk_buff *, struct netlink_callback *);
2155 int (*done)(struct netlink_callback *);
2156 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
2157 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2158 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
2159 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
2160 .dump = xfrm_dump_sa,
2161 .done = xfrm_dump_sa_done },
2162 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2163 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2164 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2165 .dump = xfrm_dump_policy,
2166 .done = xfrm_dump_policy_done },
2167 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
2168 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
2169 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
2170 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2171 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2172 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
2173 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
2174 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
2175 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
2176 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
2177 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
2178 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
2179 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
2182 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2184 struct net *net = sock_net(skb->sk);
2185 struct nlattr *attrs[XFRMA_MAX+1];
2186 struct xfrm_link *link;
2187 int type, err;
2189 type = nlh->nlmsg_type;
2190 if (type > XFRM_MSG_MAX)
2191 return -EINVAL;
2193 type -= XFRM_MSG_BASE;
2194 link = &xfrm_dispatch[type];
2196 /* All operations require privileges, even GET */
2197 if (security_netlink_recv(skb, CAP_NET_ADMIN))
2198 return -EPERM;
2200 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2201 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2202 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2203 if (link->dump == NULL)
2204 return -EINVAL;
2206 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, link->dump, link->done);
2209 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
2210 xfrma_policy);
2211 if (err < 0)
2212 return err;
2214 if (link->doit == NULL)
2215 return -EINVAL;
2217 return link->doit(skb, nlh, attrs);
2220 static void xfrm_netlink_rcv(struct sk_buff *skb)
2222 mutex_lock(&xfrm_cfg_mutex);
2223 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2224 mutex_unlock(&xfrm_cfg_mutex);
2227 static inline size_t xfrm_expire_msgsize(void)
2229 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2230 + nla_total_size(sizeof(struct xfrm_mark));
2233 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2235 struct xfrm_user_expire *ue;
2236 struct nlmsghdr *nlh;
2238 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2239 if (nlh == NULL)
2240 return -EMSGSIZE;
2242 ue = nlmsg_data(nlh);
2243 copy_to_user_state(x, &ue->state);
2244 ue->hard = (c->data.hard != 0) ? 1 : 0;
2246 if (xfrm_mark_put(skb, &x->mark))
2247 goto nla_put_failure;
2249 return nlmsg_end(skb, nlh);
2251 nla_put_failure:
2252 return -EMSGSIZE;
2255 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2257 struct net *net = xs_net(x);
2258 struct sk_buff *skb;
2260 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2261 if (skb == NULL)
2262 return -ENOMEM;
2264 if (build_expire(skb, x, c) < 0) {
2265 kfree_skb(skb);
2266 return -EMSGSIZE;
2269 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2272 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
2274 struct net *net = xs_net(x);
2275 struct sk_buff *skb;
2277 skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
2278 if (skb == NULL)
2279 return -ENOMEM;
2281 if (build_aevent(skb, x, c) < 0)
2282 BUG();
2284 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2287 static int xfrm_notify_sa_flush(const struct km_event *c)
2289 struct net *net = c->net;
2290 struct xfrm_usersa_flush *p;
2291 struct nlmsghdr *nlh;
2292 struct sk_buff *skb;
2293 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2295 skb = nlmsg_new(len, GFP_ATOMIC);
2296 if (skb == NULL)
2297 return -ENOMEM;
2299 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2300 if (nlh == NULL) {
2301 kfree_skb(skb);
2302 return -EMSGSIZE;
2305 p = nlmsg_data(nlh);
2306 p->proto = c->data.proto;
2308 nlmsg_end(skb, nlh);
2310 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2313 static inline size_t xfrm_sa_len(struct xfrm_state *x)
2315 size_t l = 0;
2316 if (x->aead)
2317 l += nla_total_size(aead_len(x->aead));
2318 if (x->aalg) {
2319 l += nla_total_size(sizeof(struct xfrm_algo) +
2320 (x->aalg->alg_key_len + 7) / 8);
2321 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
2323 if (x->ealg)
2324 l += nla_total_size(xfrm_alg_len(x->ealg));
2325 if (x->calg)
2326 l += nla_total_size(sizeof(*x->calg));
2327 if (x->encap)
2328 l += nla_total_size(sizeof(*x->encap));
2329 if (x->tfcpad)
2330 l += nla_total_size(sizeof(x->tfcpad));
2331 if (x->security)
2332 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2333 x->security->ctx_len);
2334 if (x->coaddr)
2335 l += nla_total_size(sizeof(*x->coaddr));
2337 /* Must count x->lastused as it may become non-zero behind our back. */
2338 l += nla_total_size(sizeof(u64));
2340 return l;
2343 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2345 struct net *net = xs_net(x);
2346 struct xfrm_usersa_info *p;
2347 struct xfrm_usersa_id *id;
2348 struct nlmsghdr *nlh;
2349 struct sk_buff *skb;
2350 int len = xfrm_sa_len(x);
2351 int headlen;
2353 headlen = sizeof(*p);
2354 if (c->event == XFRM_MSG_DELSA) {
2355 len += nla_total_size(headlen);
2356 headlen = sizeof(*id);
2357 len += nla_total_size(sizeof(struct xfrm_mark));
2359 len += NLMSG_ALIGN(headlen);
2361 skb = nlmsg_new(len, GFP_ATOMIC);
2362 if (skb == NULL)
2363 return -ENOMEM;
2365 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2366 if (nlh == NULL)
2367 goto nla_put_failure;
2369 p = nlmsg_data(nlh);
2370 if (c->event == XFRM_MSG_DELSA) {
2371 struct nlattr *attr;
2373 id = nlmsg_data(nlh);
2374 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2375 id->spi = x->id.spi;
2376 id->family = x->props.family;
2377 id->proto = x->id.proto;
2379 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2380 if (attr == NULL)
2381 goto nla_put_failure;
2383 p = nla_data(attr);
2386 if (copy_to_user_state_extra(x, p, skb))
2387 goto nla_put_failure;
2389 nlmsg_end(skb, nlh);
2391 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2393 nla_put_failure:
2394 /* Somebody screwed up with xfrm_sa_len! */
2395 WARN_ON(1);
2396 kfree_skb(skb);
2397 return -1;
2400 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
2403 switch (c->event) {
2404 case XFRM_MSG_EXPIRE:
2405 return xfrm_exp_state_notify(x, c);
2406 case XFRM_MSG_NEWAE:
2407 return xfrm_aevent_state_notify(x, c);
2408 case XFRM_MSG_DELSA:
2409 case XFRM_MSG_UPDSA:
2410 case XFRM_MSG_NEWSA:
2411 return xfrm_notify_sa(x, c);
2412 case XFRM_MSG_FLUSHSA:
2413 return xfrm_notify_sa_flush(c);
2414 default:
2415 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
2416 c->event);
2417 break;
2420 return 0;
2424 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2425 struct xfrm_policy *xp)
2427 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2428 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2429 + nla_total_size(sizeof(struct xfrm_mark))
2430 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2431 + userpolicy_type_attrsize();
2434 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2435 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2436 int dir)
2438 struct xfrm_user_acquire *ua;
2439 struct nlmsghdr *nlh;
2440 __u32 seq = xfrm_get_acqseq();
2442 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2443 if (nlh == NULL)
2444 return -EMSGSIZE;
2446 ua = nlmsg_data(nlh);
2447 memcpy(&ua->id, &x->id, sizeof(ua->id));
2448 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2449 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2450 copy_to_user_policy(xp, &ua->policy, dir);
2451 ua->aalgos = xt->aalgos;
2452 ua->ealgos = xt->ealgos;
2453 ua->calgos = xt->calgos;
2454 ua->seq = x->km.seq = seq;
2456 if (copy_to_user_tmpl(xp, skb) < 0)
2457 goto nlmsg_failure;
2458 if (copy_to_user_state_sec_ctx(x, skb))
2459 goto nlmsg_failure;
2460 if (copy_to_user_policy_type(xp->type, skb) < 0)
2461 goto nlmsg_failure;
2462 if (xfrm_mark_put(skb, &xp->mark))
2463 goto nla_put_failure;
2465 return nlmsg_end(skb, nlh);
2467 nla_put_failure:
2468 nlmsg_failure:
2469 nlmsg_cancel(skb, nlh);
2470 return -EMSGSIZE;
2473 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2474 struct xfrm_policy *xp, int dir)
2476 struct net *net = xs_net(x);
2477 struct sk_buff *skb;
2479 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2480 if (skb == NULL)
2481 return -ENOMEM;
2483 if (build_acquire(skb, x, xt, xp, dir) < 0)
2484 BUG();
2486 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2489 /* User gives us xfrm_user_policy_info followed by an array of 0
2490 * or more templates.
2492 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2493 u8 *data, int len, int *dir)
2495 struct net *net = sock_net(sk);
2496 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2497 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2498 struct xfrm_policy *xp;
2499 int nr;
2501 switch (sk->sk_family) {
2502 case AF_INET:
2503 if (opt != IP_XFRM_POLICY) {
2504 *dir = -EOPNOTSUPP;
2505 return NULL;
2507 break;
2508 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2509 case AF_INET6:
2510 if (opt != IPV6_XFRM_POLICY) {
2511 *dir = -EOPNOTSUPP;
2512 return NULL;
2514 break;
2515 #endif
2516 default:
2517 *dir = -EINVAL;
2518 return NULL;
2521 *dir = -EINVAL;
2523 if (len < sizeof(*p) ||
2524 verify_newpolicy_info(p))
2525 return NULL;
2527 nr = ((len - sizeof(*p)) / sizeof(*ut));
2528 if (validate_tmpl(nr, ut, p->sel.family))
2529 return NULL;
2531 if (p->dir > XFRM_POLICY_OUT)
2532 return NULL;
2534 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
2535 if (xp == NULL) {
2536 *dir = -ENOBUFS;
2537 return NULL;
2540 copy_from_user_policy(xp, p);
2541 xp->type = XFRM_POLICY_TYPE_MAIN;
2542 copy_templates(xp, ut, nr);
2544 *dir = p->dir;
2546 return xp;
2549 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2551 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2552 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2553 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2554 + nla_total_size(sizeof(struct xfrm_mark))
2555 + userpolicy_type_attrsize();
2558 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2559 int dir, const struct km_event *c)
2561 struct xfrm_user_polexpire *upe;
2562 struct nlmsghdr *nlh;
2563 int hard = c->data.hard;
2565 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2566 if (nlh == NULL)
2567 return -EMSGSIZE;
2569 upe = nlmsg_data(nlh);
2570 copy_to_user_policy(xp, &upe->pol, dir);
2571 if (copy_to_user_tmpl(xp, skb) < 0)
2572 goto nlmsg_failure;
2573 if (copy_to_user_sec_ctx(xp, skb))
2574 goto nlmsg_failure;
2575 if (copy_to_user_policy_type(xp->type, skb) < 0)
2576 goto nlmsg_failure;
2577 if (xfrm_mark_put(skb, &xp->mark))
2578 goto nla_put_failure;
2579 upe->hard = !!hard;
2581 return nlmsg_end(skb, nlh);
2583 nla_put_failure:
2584 nlmsg_failure:
2585 nlmsg_cancel(skb, nlh);
2586 return -EMSGSIZE;
2589 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2591 struct net *net = xp_net(xp);
2592 struct sk_buff *skb;
2594 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2595 if (skb == NULL)
2596 return -ENOMEM;
2598 if (build_polexpire(skb, xp, dir, c) < 0)
2599 BUG();
2601 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2604 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
2606 struct net *net = xp_net(xp);
2607 struct xfrm_userpolicy_info *p;
2608 struct xfrm_userpolicy_id *id;
2609 struct nlmsghdr *nlh;
2610 struct sk_buff *skb;
2611 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2612 int headlen;
2614 headlen = sizeof(*p);
2615 if (c->event == XFRM_MSG_DELPOLICY) {
2616 len += nla_total_size(headlen);
2617 headlen = sizeof(*id);
2619 len += userpolicy_type_attrsize();
2620 len += nla_total_size(sizeof(struct xfrm_mark));
2621 len += NLMSG_ALIGN(headlen);
2623 skb = nlmsg_new(len, GFP_ATOMIC);
2624 if (skb == NULL)
2625 return -ENOMEM;
2627 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2628 if (nlh == NULL)
2629 goto nlmsg_failure;
2631 p = nlmsg_data(nlh);
2632 if (c->event == XFRM_MSG_DELPOLICY) {
2633 struct nlattr *attr;
2635 id = nlmsg_data(nlh);
2636 memset(id, 0, sizeof(*id));
2637 id->dir = dir;
2638 if (c->data.byid)
2639 id->index = xp->index;
2640 else
2641 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2643 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2644 if (attr == NULL)
2645 goto nlmsg_failure;
2647 p = nla_data(attr);
2650 copy_to_user_policy(xp, p, dir);
2651 if (copy_to_user_tmpl(xp, skb) < 0)
2652 goto nlmsg_failure;
2653 if (copy_to_user_policy_type(xp->type, skb) < 0)
2654 goto nlmsg_failure;
2656 if (xfrm_mark_put(skb, &xp->mark))
2657 goto nla_put_failure;
2659 nlmsg_end(skb, nlh);
2661 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2663 nla_put_failure:
2664 nlmsg_failure:
2665 kfree_skb(skb);
2666 return -1;
2669 static int xfrm_notify_policy_flush(const struct km_event *c)
2671 struct net *net = c->net;
2672 struct nlmsghdr *nlh;
2673 struct sk_buff *skb;
2675 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2676 if (skb == NULL)
2677 return -ENOMEM;
2679 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2680 if (nlh == NULL)
2681 goto nlmsg_failure;
2682 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2683 goto nlmsg_failure;
2685 nlmsg_end(skb, nlh);
2687 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2689 nlmsg_failure:
2690 kfree_skb(skb);
2691 return -1;
2694 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2697 switch (c->event) {
2698 case XFRM_MSG_NEWPOLICY:
2699 case XFRM_MSG_UPDPOLICY:
2700 case XFRM_MSG_DELPOLICY:
2701 return xfrm_notify_policy(xp, dir, c);
2702 case XFRM_MSG_FLUSHPOLICY:
2703 return xfrm_notify_policy_flush(c);
2704 case XFRM_MSG_POLEXPIRE:
2705 return xfrm_exp_policy_notify(xp, dir, c);
2706 default:
2707 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
2708 c->event);
2711 return 0;
2715 static inline size_t xfrm_report_msgsize(void)
2717 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2720 static int build_report(struct sk_buff *skb, u8 proto,
2721 struct xfrm_selector *sel, xfrm_address_t *addr)
2723 struct xfrm_user_report *ur;
2724 struct nlmsghdr *nlh;
2726 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2727 if (nlh == NULL)
2728 return -EMSGSIZE;
2730 ur = nlmsg_data(nlh);
2731 ur->proto = proto;
2732 memcpy(&ur->sel, sel, sizeof(ur->sel));
2734 if (addr)
2735 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2737 return nlmsg_end(skb, nlh);
2739 nla_put_failure:
2740 nlmsg_cancel(skb, nlh);
2741 return -EMSGSIZE;
2744 static int xfrm_send_report(struct net *net, u8 proto,
2745 struct xfrm_selector *sel, xfrm_address_t *addr)
2747 struct sk_buff *skb;
2749 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2750 if (skb == NULL)
2751 return -ENOMEM;
2753 if (build_report(skb, proto, sel, addr) < 0)
2754 BUG();
2756 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2759 static inline size_t xfrm_mapping_msgsize(void)
2761 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
2764 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
2765 xfrm_address_t *new_saddr, __be16 new_sport)
2767 struct xfrm_user_mapping *um;
2768 struct nlmsghdr *nlh;
2770 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
2771 if (nlh == NULL)
2772 return -EMSGSIZE;
2774 um = nlmsg_data(nlh);
2776 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
2777 um->id.spi = x->id.spi;
2778 um->id.family = x->props.family;
2779 um->id.proto = x->id.proto;
2780 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
2781 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
2782 um->new_sport = new_sport;
2783 um->old_sport = x->encap->encap_sport;
2784 um->reqid = x->props.reqid;
2786 return nlmsg_end(skb, nlh);
2789 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2790 __be16 sport)
2792 struct net *net = xs_net(x);
2793 struct sk_buff *skb;
2795 if (x->id.proto != IPPROTO_ESP)
2796 return -EINVAL;
2798 if (!x->encap)
2799 return -EINVAL;
2801 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
2802 if (skb == NULL)
2803 return -ENOMEM;
2805 if (build_mapping(skb, x, ipaddr, sport) < 0)
2806 BUG();
2808 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
2811 static struct xfrm_mgr netlink_mgr = {
2812 .id = "netlink",
2813 .notify = xfrm_send_state_notify,
2814 .acquire = xfrm_send_acquire,
2815 .compile_policy = xfrm_compile_policy,
2816 .notify_policy = xfrm_send_policy_notify,
2817 .report = xfrm_send_report,
2818 .migrate = xfrm_send_migrate,
2819 .new_mapping = xfrm_send_mapping,
2822 static int __net_init xfrm_user_net_init(struct net *net)
2824 struct sock *nlsk;
2826 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX,
2827 xfrm_netlink_rcv, NULL, THIS_MODULE);
2828 if (nlsk == NULL)
2829 return -ENOMEM;
2830 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
2831 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
2832 return 0;
2835 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
2837 struct net *net;
2838 list_for_each_entry(net, net_exit_list, exit_list)
2839 rcu_assign_pointer(net->xfrm.nlsk, NULL);
2840 synchronize_net();
2841 list_for_each_entry(net, net_exit_list, exit_list)
2842 netlink_kernel_release(net->xfrm.nlsk_stash);
2845 static struct pernet_operations xfrm_user_net_ops = {
2846 .init = xfrm_user_net_init,
2847 .exit_batch = xfrm_user_net_exit,
2850 static int __init xfrm_user_init(void)
2852 int rv;
2854 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2856 rv = register_pernet_subsys(&xfrm_user_net_ops);
2857 if (rv < 0)
2858 return rv;
2859 rv = xfrm_register_km(&netlink_mgr);
2860 if (rv < 0)
2861 unregister_pernet_subsys(&xfrm_user_net_ops);
2862 return rv;
2865 static void __exit xfrm_user_exit(void)
2867 xfrm_unregister_km(&netlink_mgr);
2868 unregister_pernet_subsys(&xfrm_user_net_ops);
2871 module_init(xfrm_user_init);
2872 module_exit(xfrm_user_exit);
2873 MODULE_LICENSE("GPL");
2874 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);