6877528 flushing tunnel policy doesn't bump tunnel link MTU
[illumos-gate.git] / usr / src / uts / common / inet / ip / spdsock.c
blob1ff0cff31d11a16a064185595aeb02dc83180bc0
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/param.h>
27 #include <sys/types.h>
28 #include <sys/stream.h>
29 #include <sys/strsubr.h>
30 #include <sys/strsun.h>
31 #include <sys/stropts.h>
32 #include <sys/zone.h>
33 #include <sys/vnode.h>
34 #include <sys/sysmacros.h>
35 #define _SUN_TPI_VERSION 2
36 #include <sys/tihdr.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/mkdev.h>
40 #include <sys/debug.h>
41 #include <sys/kmem.h>
42 #include <sys/cmn_err.h>
43 #include <sys/suntpi.h>
44 #include <sys/policy.h>
45 #include <sys/dls.h>
47 #include <sys/socket.h>
48 #include <netinet/in.h>
49 #include <net/pfkeyv2.h>
50 #include <net/pfpolicy.h>
52 #include <inet/common.h>
53 #include <netinet/ip6.h>
54 #include <inet/ip.h>
55 #include <inet/ip6.h>
56 #include <inet/mi.h>
57 #include <inet/proto_set.h>
58 #include <inet/nd.h>
59 #include <inet/ip_if.h>
60 #include <inet/optcom.h>
61 #include <inet/ipsec_impl.h>
62 #include <inet/spdsock.h>
63 #include <inet/sadb.h>
64 #include <inet/iptun.h>
65 #include <inet/iptun/iptun_impl.h>
67 #include <sys/isa_defs.h>
69 #include <c2/audit.h>
72 * This is a transport provider for the PF_POLICY IPsec policy
73 * management socket, which provides a management interface into the
74 * SPD, allowing policy rules to be added, deleted, and queried.
76 * This effectively replaces the old private SIOC*IPSECONFIG ioctls
77 * with an extensible interface which will hopefully be public some
78 * day.
80 * See <net/pfpolicy.h> for more details on the protocol.
82 * We link against drv/ip and call directly into it to manipulate the
83 * SPD; see ipsec_impl.h for the policy data structures and spd.c for
84 * the code which maintains them.
86 * The MT model of this is QPAIR with the addition of some explicit
87 * locking to protect system-wide policy data structures.
90 static vmem_t *spdsock_vmem; /* for minor numbers. */
92 #define ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t))
94 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */
95 static struct T_info_ack spdsock_g_t_info_ack = {
96 T_INFO_ACK,
97 T_INFINITE, /* TSDU_size. Maximum size messages. */
98 T_INVALID, /* ETSDU_size. No expedited data. */
99 T_INVALID, /* CDATA_size. No connect data. */
100 T_INVALID, /* DDATA_size. No disconnect data. */
101 0, /* ADDR_size. */
102 0, /* OPT_size. No user-settable options */
103 64 * 1024, /* TIDU_size. spdsock allows maximum size messages. */
104 T_COTS, /* SERV_type. spdsock supports connection oriented. */
105 TS_UNBND, /* CURRENT_state. This is set from spdsock_state. */
106 (XPG4_1) /* Provider flags */
109 /* Named Dispatch Parameter Management Structure */
110 typedef struct spdsockparam_s {
111 uint_t spdsock_param_min;
112 uint_t spdsock_param_max;
113 uint_t spdsock_param_value;
114 char *spdsock_param_name;
115 } spdsockparam_t;
118 * Table of NDD variables supported by spdsock. These are loaded into
119 * spdsock_g_nd in spdsock_init_nd.
120 * All of these are alterable, within the min/max values given, at run time.
122 static spdsockparam_t lcl_param_arr[] = {
123 /* min max value name */
124 { 4096, 65536, 8192, "spdsock_xmit_hiwat"},
125 { 0, 65536, 1024, "spdsock_xmit_lowat"},
126 { 4096, 65536, 8192, "spdsock_recv_hiwat"},
127 { 65536, 1024*1024*1024, 256*1024, "spdsock_max_buf"},
128 { 0, 3, 0, "spdsock_debug"},
130 #define spds_xmit_hiwat spds_params[0].spdsock_param_value
131 #define spds_xmit_lowat spds_params[1].spdsock_param_value
132 #define spds_recv_hiwat spds_params[2].spdsock_param_value
133 #define spds_max_buf spds_params[3].spdsock_param_value
134 #define spds_debug spds_params[4].spdsock_param_value
136 #define ss0dbg(a) printf a
137 /* NOTE: != 0 instead of > 0 so lint doesn't complain. */
138 #define ss1dbg(spds, a) if (spds->spds_debug != 0) printf a
139 #define ss2dbg(spds, a) if (spds->spds_debug > 1) printf a
140 #define ss3dbg(spds, a) if (spds->spds_debug > 2) printf a
142 #define RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \
143 ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \
144 (ss)->spdsock_dump_head = (iph); \
145 (ss)->spdsock_dump_gen = (iph)->iph_gen; \
146 (ss)->spdsock_dump_cur_type = 0; \
147 (ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \
148 (ss)->spdsock_dump_cur_rule = NULL; \
149 (ss)->spdsock_dump_count = 0; \
150 (ss)->spdsock_dump_cur_chain = 0; \
153 static int spdsock_close(queue_t *);
154 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *);
155 static void spdsock_wput(queue_t *, mblk_t *);
156 static void spdsock_wsrv(queue_t *);
157 static void spdsock_rsrv(queue_t *);
158 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns);
159 static void spdsock_stack_fini(netstackid_t stackid, void *arg);
160 static void spdsock_loadcheck(void *);
161 static void spdsock_merge_algs(spd_stack_t *);
162 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *);
163 static mblk_t *spdsock_dump_next_record(spdsock_t *);
164 static void update_iptun_policy(ipsec_tun_pol_t *);
166 static struct module_info info = {
167 5138, "spdsock", 1, INFPSZ, 512, 128
170 static struct qinit rinit = {
171 NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close,
172 NULL, &info
175 static struct qinit winit = {
176 (pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info
179 struct streamtab spdsockinfo = {
180 &rinit, &winit
183 /* mapping from alg type to protocol number, as per RFC 2407 */
184 static const uint_t algproto[] = {
185 PROTO_IPSEC_AH,
186 PROTO_IPSEC_ESP,
189 #define NALGPROTOS (sizeof (algproto) / sizeof (algproto[0]))
191 /* mapping from kernel exec mode to spdsock exec mode */
192 static const uint_t execmodes[] = {
193 SPD_ALG_EXEC_MODE_SYNC,
194 SPD_ALG_EXEC_MODE_ASYNC
197 #define NEXECMODES (sizeof (execmodes) / sizeof (execmodes[0]))
199 #define ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1)
200 #define ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2)
202 #define ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL)
204 /* ARGSUSED */
205 static int
206 spdsock_param_get(q, mp, cp, cr)
207 queue_t *q;
208 mblk_t *mp;
209 caddr_t cp;
210 cred_t *cr;
212 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp;
213 uint_t value;
214 spdsock_t *ss = (spdsock_t *)q->q_ptr;
215 spd_stack_t *spds = ss->spdsock_spds;
217 mutex_enter(&spds->spds_param_lock);
218 value = spdsockpa->spdsock_param_value;
219 mutex_exit(&spds->spds_param_lock);
221 (void) mi_mpprintf(mp, "%u", value);
222 return (0);
225 /* This routine sets an NDD variable in a spdsockparam_t structure. */
226 /* ARGSUSED */
227 static int
228 spdsock_param_set(q, mp, value, cp, cr)
229 queue_t *q;
230 mblk_t *mp;
231 char *value;
232 caddr_t cp;
233 cred_t *cr;
235 ulong_t new_value;
236 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp;
237 spdsock_t *ss = (spdsock_t *)q->q_ptr;
238 spd_stack_t *spds = ss->spdsock_spds;
240 /* Convert the value from a string into a long integer. */
241 if (ddi_strtoul(value, NULL, 10, &new_value) != 0)
242 return (EINVAL);
244 mutex_enter(&spds->spds_param_lock);
246 * Fail the request if the new value does not lie within the
247 * required bounds.
249 if (new_value < spdsockpa->spdsock_param_min ||
250 new_value > spdsockpa->spdsock_param_max) {
251 mutex_exit(&spds->spds_param_lock);
252 return (EINVAL);
255 /* Set the new value */
256 spdsockpa->spdsock_param_value = new_value;
257 mutex_exit(&spds->spds_param_lock);
259 return (0);
263 * Initialize at module load time
265 boolean_t
266 spdsock_ddi_init(void)
268 spdsock_max_optsize = optcom_max_optsize(
269 spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt);
271 spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1,
272 NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER);
275 * We want to be informed each time a stack is created or
276 * destroyed in the kernel, so we can maintain the
277 * set of spd_stack_t's.
279 netstack_register(NS_SPDSOCK, spdsock_stack_init, NULL,
280 spdsock_stack_fini);
282 return (B_TRUE);
286 * Walk through the param array specified registering each element with the
287 * named dispatch handler.
289 static boolean_t
290 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt)
292 for (; cnt-- > 0; ssp++) {
293 if (ssp->spdsock_param_name != NULL &&
294 ssp->spdsock_param_name[0]) {
295 if (!nd_load(ndp,
296 ssp->spdsock_param_name,
297 spdsock_param_get, spdsock_param_set,
298 (caddr_t)ssp)) {
299 nd_free(ndp);
300 return (B_FALSE);
304 return (B_TRUE);
308 * Initialize for each stack instance
310 /* ARGSUSED */
311 static void *
312 spdsock_stack_init(netstackid_t stackid, netstack_t *ns)
314 spd_stack_t *spds;
315 spdsockparam_t *ssp;
317 spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP);
318 spds->spds_netstack = ns;
320 ASSERT(spds->spds_g_nd == NULL);
322 ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP);
323 spds->spds_params = ssp;
324 bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr));
326 (void) spdsock_param_register(&spds->spds_g_nd, ssp,
327 A_CNT(lcl_param_arr));
329 mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL);
330 mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL);
332 return (spds);
335 void
336 spdsock_ddi_destroy(void)
338 vmem_destroy(spdsock_vmem);
340 netstack_unregister(NS_SPDSOCK);
343 /* ARGSUSED */
344 static void
345 spdsock_stack_fini(netstackid_t stackid, void *arg)
347 spd_stack_t *spds = (spd_stack_t *)arg;
349 freemsg(spds->spds_mp_algs);
350 mutex_destroy(&spds->spds_param_lock);
351 mutex_destroy(&spds->spds_alg_lock);
352 nd_free(&spds->spds_g_nd);
353 kmem_free(spds->spds_params, sizeof (lcl_param_arr));
354 spds->spds_params = NULL;
356 kmem_free(spds, sizeof (*spds));
360 * NOTE: large quantities of this should be shared with keysock.
361 * Would be nice to combine some of this into a common module, but
362 * not possible given time pressures.
366 * High-level reality checking of extensions.
368 /* ARGSUSED */ /* XXX */
369 static boolean_t
370 ext_check(spd_ext_t *ext)
372 spd_if_t *tunname = (spd_if_t *)ext;
373 int i;
374 char *idstr;
376 if (ext->spd_ext_type == SPD_EXT_TUN_NAME) {
377 /* (NOTE: Modified from SADB_EXT_IDENTITY..) */
380 * Make sure the strings in these identities are
381 * null-terminated. Let's "proactively" null-terminate the
382 * string at the last byte if it's not terminated sooner.
384 i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t);
385 idstr = (char *)(tunname + 1);
386 while (*idstr != '\0' && i > 0) {
387 i--;
388 idstr++;
390 if (i == 0) {
392 * I.e., if the bozo user didn't NULL-terminate the
393 * string...
395 idstr--;
396 *idstr = '\0';
399 return (B_TRUE); /* For now... */
404 /* Return values for spdsock_get_ext(). */
405 #define KGE_OK 0
406 #define KGE_DUP 1
407 #define KGE_UNK 2
408 #define KGE_LEN 3
409 #define KGE_CHK 4
412 * Parse basic extension headers and return in the passed-in pointer vector.
413 * Return values include:
415 * KGE_OK Everything's nice and parsed out.
416 * If there are no extensions, place NULL in extv[0].
417 * KGE_DUP There is a duplicate extension.
418 * First instance in appropriate bin. First duplicate in
419 * extv[0].
420 * KGE_UNK Unknown extension type encountered. extv[0] contains
421 * unknown header.
422 * KGE_LEN Extension length error.
423 * KGE_CHK High-level reality check failed on specific extension.
425 * My apologies for some of the pointer arithmetic in here. I'm thinking
426 * like an assembly programmer, yet trying to make the compiler happy.
428 static int
429 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize)
431 bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
433 /* Use extv[0] as the "current working pointer". */
435 extv[0] = (spd_ext_t *)(basehdr + 1);
437 while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) {
438 /* Check for unknown headers. */
439 if (extv[0]->spd_ext_type == 0 ||
440 extv[0]->spd_ext_type > SPD_EXT_MAX)
441 return (KGE_UNK);
444 * Check length. Use uint64_t because extlen is in units
445 * of 64-bit words. If length goes beyond the msgsize,
446 * return an error. (Zero length also qualifies here.)
448 if (extv[0]->spd_ext_len == 0 ||
449 (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) >
450 (void *)((uint8_t *)basehdr + msgsize))
451 return (KGE_LEN);
453 /* Check for redundant headers. */
454 if (extv[extv[0]->spd_ext_type] != NULL)
455 return (KGE_DUP);
458 * Reality check the extension if possible at the spdsock
459 * level.
461 if (!ext_check(extv[0]))
462 return (KGE_CHK);
464 /* If I make it here, assign the appropriate bin. */
465 extv[extv[0]->spd_ext_type] = extv[0];
467 /* Advance pointer (See above for uint64_t ptr reasoning.) */
468 extv[0] = (spd_ext_t *)
469 ((uint64_t *)extv[0] + extv[0]->spd_ext_len);
472 /* Everything's cool. */
475 * If extv[0] == NULL, then there are no extension headers in this
476 * message. Ensure that this is the case.
478 if (extv[0] == (spd_ext_t *)(basehdr + 1))
479 extv[0] = NULL;
481 return (KGE_OK);
484 static const int bad_ext_diag[] = {
485 SPD_DIAGNOSTIC_MALFORMED_LCLPORT,
486 SPD_DIAGNOSTIC_MALFORMED_REMPORT,
487 SPD_DIAGNOSTIC_MALFORMED_PROTO,
488 SPD_DIAGNOSTIC_MALFORMED_LCLADDR,
489 SPD_DIAGNOSTIC_MALFORMED_REMADDR,
490 SPD_DIAGNOSTIC_MALFORMED_ACTION,
491 SPD_DIAGNOSTIC_MALFORMED_RULE,
492 SPD_DIAGNOSTIC_MALFORMED_RULESET,
493 SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE
496 static const int dup_ext_diag[] = {
497 SPD_DIAGNOSTIC_DUPLICATE_LCLPORT,
498 SPD_DIAGNOSTIC_DUPLICATE_REMPORT,
499 SPD_DIAGNOSTIC_DUPLICATE_PROTO,
500 SPD_DIAGNOSTIC_DUPLICATE_LCLADDR,
501 SPD_DIAGNOSTIC_DUPLICATE_REMADDR,
502 SPD_DIAGNOSTIC_DUPLICATE_ACTION,
503 SPD_DIAGNOSTIC_DUPLICATE_RULE,
504 SPD_DIAGNOSTIC_DUPLICATE_RULESET,
505 SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE
509 * Transmit a PF_POLICY error message to the instance either pointed to
510 * by ks, the instance with serial number serial, or more, depending.
512 * The faulty message (or a reasonable facsimile thereof) is in mp.
513 * This function will free mp or recycle it for delivery, thereby causing
514 * the stream head to free it.
516 static void
517 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic)
519 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
521 ASSERT(mp->b_datap->db_type == M_DATA);
523 if (spmsg->spd_msg_type < SPD_MIN ||
524 spmsg->spd_msg_type > SPD_MAX)
525 spmsg->spd_msg_type = SPD_RESERVED;
528 * Strip out extension headers.
530 ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim);
531 mp->b_wptr = mp->b_rptr + sizeof (*spmsg);
532 spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t));
533 spmsg->spd_msg_errno = (uint8_t)error;
534 spmsg->spd_msg_diagnostic = (uint16_t)diagnostic;
536 qreply(q, mp);
539 static void
540 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic)
542 spdsock_error(q, mp, EINVAL, diagnostic);
545 static void
546 spd_echo(queue_t *q, mblk_t *mp)
548 qreply(q, mp);
552 * Do NOT consume a reference to itp.
554 /*ARGSUSED*/
555 static void
556 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns)
558 boolean_t active = (boolean_t)cookie;
559 ipsec_policy_head_t *iph;
561 iph = active ? itp->itp_policy : itp->itp_inactive;
562 IPPH_REFHOLD(iph);
563 mutex_enter(&itp->itp_lock);
564 spdsock_flush_one(iph, ns); /* Releases iph refhold. */
565 if (active)
566 itp->itp_flags &= ~ITPF_PFLAGS;
567 else
568 itp->itp_flags &= ~ITPF_IFLAGS;
569 mutex_exit(&itp->itp_lock);
570 /* SPD_FLUSH is worth a tunnel MTU check. */
571 update_iptun_policy(itp);
575 * Clear out one polhead.
577 static void
578 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns)
580 rw_enter(&iph->iph_lock, RW_WRITER);
581 ipsec_polhead_flush(iph, ns);
582 rw_exit(&iph->iph_lock);
583 IPPH_REFRELE(iph, ns);
586 static void
587 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp,
588 mblk_t *mp)
590 boolean_t active;
591 spdsock_t *ss = (spdsock_t *)q->q_ptr;
592 netstack_t *ns = ss->spdsock_spds->spds_netstack;
594 if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) {
595 spdsock_flush_one(iph, ns);
596 if (audit_active) {
597 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
598 cred_t *cr;
599 pid_t cpid;
601 cr = msg_getcred(mp, &cpid);
602 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
603 audit_pf_policy(SPD_FLUSH, cr, ns,
604 ITP_NAME(itp), active, 0, cpid);
606 } else {
607 active = (iph == ALL_ACTIVE_POLHEADS);
609 /* First flush the global policy. */
610 spdsock_flush_one(active ? ipsec_system_policy(ns) :
611 ipsec_inactive_policy(ns), ns);
612 if (audit_active) {
613 cred_t *cr;
614 pid_t cpid;
616 cr = msg_getcred(mp, &cpid);
617 audit_pf_policy(SPD_FLUSH, cr, ns, NULL,
618 active, 0, cpid);
620 /* Then flush every tunnel's appropriate one. */
621 itp_walk(spdsock_flush_node, (void *)active, ns);
622 if (audit_active) {
623 cred_t *cr;
624 pid_t cpid;
626 cr = msg_getcred(mp, &cpid);
627 audit_pf_policy(SPD_FLUSH, cr, ns,
628 "all tunnels", active, 0, cpid);
632 spd_echo(q, mp);
635 static boolean_t
636 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag)
638 bzero(sel, sizeof (*sel));
640 if (extv[SPD_EXT_PROTO] != NULL) {
641 struct spd_proto *pr =
642 (struct spd_proto *)extv[SPD_EXT_PROTO];
643 sel->ipsl_proto = pr->spd_proto_number;
644 sel->ipsl_valid |= IPSL_PROTOCOL;
646 if (extv[SPD_EXT_LCLPORT] != NULL) {
647 struct spd_portrange *pr =
648 (struct spd_portrange *)extv[SPD_EXT_LCLPORT];
649 sel->ipsl_lport = pr->spd_ports_minport;
650 sel->ipsl_valid |= IPSL_LOCAL_PORT;
652 if (extv[SPD_EXT_REMPORT] != NULL) {
653 struct spd_portrange *pr =
654 (struct spd_portrange *)extv[SPD_EXT_REMPORT];
655 sel->ipsl_rport = pr->spd_ports_minport;
656 sel->ipsl_valid |= IPSL_REMOTE_PORT;
659 if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) {
660 struct spd_typecode *tc=
661 (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE];
663 sel->ipsl_valid |= IPSL_ICMP_TYPE;
664 sel->ipsl_icmp_type = tc->spd_typecode_type;
665 if (tc->spd_typecode_type_end < tc->spd_typecode_type)
666 sel->ipsl_icmp_type_end = tc->spd_typecode_type;
667 else
668 sel->ipsl_icmp_type_end = tc->spd_typecode_type_end;
670 if (tc->spd_typecode_code != 255) {
671 sel->ipsl_valid |= IPSL_ICMP_CODE;
672 sel->ipsl_icmp_code = tc->spd_typecode_code;
673 if (tc->spd_typecode_code_end < tc->spd_typecode_code)
674 sel->ipsl_icmp_code_end = tc->spd_typecode_code;
675 else
676 sel->ipsl_icmp_code_end =
677 tc->spd_typecode_code_end;
680 #define ADDR2SEL(sel, extv, field, pfield, extn, bit) \
681 if ((extv)[(extn)] != NULL) { \
682 uint_t addrlen; \
683 struct spd_address *ap = \
684 (struct spd_address *)((extv)[(extn)]); \
685 addrlen = (ap->spd_address_af == AF_INET6) ? \
686 IPV6_ADDR_LEN : IP_ADDR_LEN; \
687 if (SPD_64TO8(ap->spd_address_len) < \
688 (addrlen + sizeof (*ap))) { \
689 *diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN; \
690 return (B_FALSE); \
692 bcopy((ap+1), &((sel)->field), addrlen); \
693 (sel)->pfield = ap->spd_address_prefixlen; \
694 (sel)->ipsl_valid |= (bit); \
695 (sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ? \
696 IPSL_IPV6 : IPSL_IPV4; \
699 ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen,
700 SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR);
701 ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen,
702 SPD_EXT_REMADDR, IPSL_REMOTE_ADDR);
704 if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) ==
705 (IPSL_IPV6|IPSL_IPV4)) {
706 *diag = SPD_DIAGNOSTIC_MIXED_AF;
707 return (B_FALSE);
710 #undef ADDR2SEL
712 return (B_TRUE);
715 static boolean_t
716 spd_convert_type(uint32_t type, ipsec_act_t *act)
718 switch (type) {
719 case SPD_ACTTYPE_DROP:
720 act->ipa_type = IPSEC_ACT_DISCARD;
721 return (B_TRUE);
723 case SPD_ACTTYPE_PASS:
724 act->ipa_type = IPSEC_ACT_CLEAR;
725 return (B_TRUE);
727 case SPD_ACTTYPE_IPSEC:
728 act->ipa_type = IPSEC_ACT_APPLY;
729 return (B_TRUE);
731 return (B_FALSE);
734 static boolean_t
735 spd_convert_flags(uint32_t flags, ipsec_act_t *act)
738 * Note use of !! for boolean canonicalization.
740 act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH);
741 act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP);
742 act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA);
743 act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE);
744 act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE);
745 return (B_TRUE);
748 static void
749 spdsock_reset_act(ipsec_act_t *act)
751 bzero(act, sizeof (*act));
752 act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS;
753 act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS;
754 act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS;
758 * Sanity check action against reality, and shrink-wrap key sizes..
760 static boolean_t
761 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag,
762 spd_stack_t *spds)
764 if (tunnel_polhead && act->ipa_apply.ipp_use_unique) {
765 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
766 return (B_FALSE);
768 if ((act->ipa_type != IPSEC_ACT_APPLY) &&
769 (act->ipa_apply.ipp_use_ah ||
770 act->ipa_apply.ipp_use_esp ||
771 act->ipa_apply.ipp_use_espa ||
772 act->ipa_apply.ipp_use_se ||
773 act->ipa_apply.ipp_use_unique)) {
774 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
775 return (B_FALSE);
777 if ((act->ipa_type == IPSEC_ACT_APPLY) &&
778 !act->ipa_apply.ipp_use_ah &&
779 !act->ipa_apply.ipp_use_esp) {
780 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
781 return (B_FALSE);
783 return (ipsec_check_action(act, diag, spds->spds_netstack));
787 * We may be short a few error checks here..
789 static boolean_t
790 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp,
791 int *diag, spd_stack_t *spds)
793 struct spd_ext_actions *sactp =
794 (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
795 ipsec_act_t act, *actp, *endactp;
796 struct spd_attribute *attrp, *endattrp;
797 uint64_t *endp;
798 int nact;
799 boolean_t tunnel_polhead;
801 tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL &&
802 (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags &
803 SPD_RULE_FLAG_TUNNEL));
805 *actpp = NULL;
806 *nactp = 0;
808 if (sactp == NULL) {
809 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
810 return (B_FALSE);
814 * Parse the "action" extension and convert into an action chain.
817 nact = sactp->spd_actions_count;
819 endp = (uint64_t *)sactp;
820 endp += sactp->spd_actions_len;
821 endattrp = (struct spd_attribute *)endp;
823 actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP);
824 if (actp == NULL) {
825 *diag = SPD_DIAGNOSTIC_ADD_NO_MEM;
826 return (B_FALSE);
828 *actpp = actp;
829 *nactp = nact;
830 endactp = actp + nact;
832 spdsock_reset_act(&act);
833 attrp = (struct spd_attribute *)(&sactp[1]);
835 for (; attrp < endattrp; attrp++) {
836 switch (attrp->spd_attr_tag) {
837 case SPD_ATTR_NOP:
838 break;
840 case SPD_ATTR_EMPTY:
841 spdsock_reset_act(&act);
842 break;
844 case SPD_ATTR_END:
845 attrp = endattrp;
846 /* FALLTHRU */
847 case SPD_ATTR_NEXT:
848 if (actp >= endactp) {
849 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
850 goto fail;
852 if (!spdsock_check_action(&act, tunnel_polhead,
853 diag, spds))
854 goto fail;
855 *actp++ = act;
856 spdsock_reset_act(&act);
857 break;
859 case SPD_ATTR_TYPE:
860 if (!spd_convert_type(attrp->spd_attr_value, &act)) {
861 *diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE;
862 goto fail;
864 break;
866 case SPD_ATTR_FLAGS:
867 if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) {
869 * Set "sa unique" for transport-mode
870 * tunnels whether we want to or not.
872 attrp->spd_attr_value |= SPD_APPLY_UNIQUE;
874 if (!spd_convert_flags(attrp->spd_attr_value, &act)) {
875 *diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS;
876 goto fail;
878 break;
880 case SPD_ATTR_AH_AUTH:
881 if (attrp->spd_attr_value == 0) {
882 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
883 goto fail;
885 act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value;
886 break;
888 case SPD_ATTR_ESP_ENCR:
889 if (attrp->spd_attr_value == 0) {
890 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
891 goto fail;
893 act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value;
894 break;
896 case SPD_ATTR_ESP_AUTH:
897 if (attrp->spd_attr_value == 0) {
898 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
899 goto fail;
901 act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value;
902 break;
904 case SPD_ATTR_ENCR_MINBITS:
905 act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value;
906 break;
908 case SPD_ATTR_ENCR_MAXBITS:
909 act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value;
910 break;
912 case SPD_ATTR_AH_MINBITS:
913 act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value;
914 break;
916 case SPD_ATTR_AH_MAXBITS:
917 act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value;
918 break;
920 case SPD_ATTR_ESPA_MINBITS:
921 act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value;
922 break;
924 case SPD_ATTR_ESPA_MAXBITS:
925 act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value;
926 break;
928 case SPD_ATTR_LIFE_SOFT_TIME:
929 case SPD_ATTR_LIFE_HARD_TIME:
930 case SPD_ATTR_LIFE_SOFT_BYTES:
931 case SPD_ATTR_LIFE_HARD_BYTES:
932 break;
934 case SPD_ATTR_KM_PROTO:
935 act.ipa_apply.ipp_km_proto = attrp->spd_attr_value;
936 break;
938 case SPD_ATTR_KM_COOKIE:
939 act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value;
940 break;
942 case SPD_ATTR_REPLAY_DEPTH:
943 act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value;
944 break;
947 if (actp != endactp) {
948 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
949 goto fail;
952 return (B_TRUE);
953 fail:
954 ipsec_actvec_free(*actpp, nact);
955 *actpp = NULL;
956 return (B_FALSE);
959 typedef struct
961 ipsec_policy_t *pol;
962 int dir;
963 } tmprule_t;
965 static int
966 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule,
967 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af,
968 tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
970 ipsec_policy_t *pol;
972 sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4);
973 sel->ipsl_valid |= af;
975 pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority,
976 index, spds->spds_netstack);
977 if (pol == NULL)
978 return (ENOMEM);
980 (*rp)->pol = pol;
981 (*rp)->dir = dir;
982 (*rp)++;
984 if (!ipsec_check_policy(iph, pol, dir))
985 return (EEXIST);
987 rule->spd_rule_index = pol->ipsp_index;
988 return (0);
991 static int
992 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule,
993 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs,
994 tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
996 int error;
998 if (afs & IPSL_IPV4) {
999 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp,
1000 index, spds);
1001 if (error != 0)
1002 return (error);
1004 if (afs & IPSL_IPV6) {
1005 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp,
1006 index, spds);
1007 if (error != 0)
1008 return (error);
1010 return (0);
1014 static void
1015 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1016 spd_ext_t **extv, ipsec_tun_pol_t *itp)
1018 ipsec_selkey_t sel;
1019 ipsec_act_t *actp;
1020 uint_t nact;
1021 int diag = 0, error, afs;
1022 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1023 tmprule_t rules[4], *rulep = &rules[0];
1024 boolean_t tunnel_mode, empty_itp, active;
1025 uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index;
1026 spdsock_t *ss = (spdsock_t *)q->q_ptr;
1027 spd_stack_t *spds = ss->spdsock_spds;
1029 if (rule == NULL) {
1030 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1031 if (audit_active) {
1032 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1033 cred_t *cr;
1034 pid_t cpid;
1036 cr = msg_getcred(mp, &cpid);
1037 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1038 audit_pf_policy(SPD_ADDRULE, cr,
1039 spds->spds_netstack, ITP_NAME(itp), active,
1040 SPD_DIAGNOSTIC_NO_RULE_EXT, cpid);
1042 return;
1045 tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL);
1047 if (itp != NULL) {
1048 mutex_enter(&itp->itp_lock);
1049 ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph);
1050 active = (itp->itp_policy == iph);
1051 if (ITP_P_ISACTIVE(itp, iph)) {
1052 /* Check for mix-and-match of tunnel/transport. */
1053 if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) ||
1054 (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) {
1055 mutex_exit(&itp->itp_lock);
1056 spdsock_error(q, mp, EBUSY, 0);
1057 return;
1059 empty_itp = B_FALSE;
1060 } else {
1061 empty_itp = B_TRUE;
1062 itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE;
1063 if (tunnel_mode)
1064 itp->itp_flags |= active ? ITPF_P_TUNNEL :
1065 ITPF_I_TUNNEL;
1067 } else {
1068 empty_itp = B_FALSE;
1071 if (rule->spd_rule_index != 0) {
1072 diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX;
1073 error = EINVAL;
1074 goto fail2;
1077 if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1078 error = EINVAL;
1079 goto fail2;
1082 if (itp != NULL) {
1083 if (tunnel_mode) {
1084 if (sel.ipsl_valid &
1085 (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) {
1086 itp->itp_flags |= active ?
1087 ITPF_P_PER_PORT_SECURITY :
1088 ITPF_I_PER_PORT_SECURITY;
1090 } else {
1092 * For now, we don't allow transport-mode on a tunnel
1093 * with ANY specific selectors. Bail if we have such
1094 * a request.
1096 if (sel.ipsl_valid & IPSL_WILDCARD) {
1097 diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS;
1098 error = EINVAL;
1099 goto fail2;
1104 if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) {
1105 error = EINVAL;
1106 goto fail2;
1109 * If no addresses were specified, add both.
1111 afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4);
1112 if (afs == 0)
1113 afs = (IPSL_IPV6|IPSL_IPV4);
1115 rw_enter(&iph->iph_lock, RW_WRITER);
1117 if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) {
1118 error = mkrulepair(iph, rule, &sel, actp, nact,
1119 IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds);
1120 if (error != 0)
1121 goto fail;
1124 if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) {
1125 error = mkrulepair(iph, rule, &sel, actp, nact,
1126 IPSEC_TYPE_INBOUND, afs, &rulep, index, spds);
1127 if (error != 0)
1128 goto fail;
1131 while ((--rulep) >= &rules[0]) {
1132 ipsec_enter_policy(iph, rulep->pol, rulep->dir,
1133 spds->spds_netstack);
1135 rw_exit(&iph->iph_lock);
1136 if (itp != NULL)
1137 mutex_exit(&itp->itp_lock);
1139 ipsec_actvec_free(actp, nact);
1140 spd_echo(q, mp);
1141 if (audit_active) {
1142 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1143 cred_t *cr;
1144 pid_t cpid;
1146 cr = msg_getcred(mp, &cpid);
1147 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1148 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1149 ITP_NAME(itp), active, 0, cpid);
1151 return;
1153 fail:
1154 rw_exit(&iph->iph_lock);
1155 while ((--rulep) >= &rules[0])
1156 IPPOL_REFRELE(rulep->pol);
1157 ipsec_actvec_free(actp, nact);
1158 fail2:
1159 if (itp != NULL) {
1160 if (empty_itp)
1161 itp->itp_flags = 0;
1162 mutex_exit(&itp->itp_lock);
1164 spdsock_error(q, mp, error, diag);
1165 if (audit_active) {
1166 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1167 cred_t *cr;
1168 pid_t cpid;
1170 cr = msg_getcred(mp, &cpid);
1171 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1172 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1173 ITP_NAME(itp), active, error, cpid);
1177 void
1178 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1179 spd_ext_t **extv, ipsec_tun_pol_t *itp)
1181 ipsec_selkey_t sel;
1182 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1183 int err, diag = 0;
1184 spdsock_t *ss = (spdsock_t *)q->q_ptr;
1185 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1187 if (rule == NULL) {
1188 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1189 if (audit_active) {
1190 boolean_t active;
1191 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1192 cred_t *cr;
1193 pid_t cpid;
1195 cr = msg_getcred(mp, &cpid);
1196 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1197 audit_pf_policy(SPD_DELETERULE, cr, ns,
1198 ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT,
1199 cpid);
1201 return;
1205 * Must enter itp_lock first to avoid deadlock. See tun.c's
1206 * set_sec_simple() for the other case of itp_lock and iph_lock.
1208 if (itp != NULL)
1209 mutex_enter(&itp->itp_lock);
1211 if (rule->spd_rule_index != 0) {
1212 if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) !=
1213 0) {
1214 err = ESRCH;
1215 goto fail;
1217 } else {
1218 if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1219 err = EINVAL; /* diag already set... */
1220 goto fail;
1223 if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) &&
1224 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) {
1225 err = ESRCH;
1226 goto fail;
1229 if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) &&
1230 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) {
1231 err = ESRCH;
1232 goto fail;
1236 if (itp != NULL) {
1237 ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive);
1238 rw_enter(&iph->iph_lock, RW_READER);
1239 if (avl_numnodes(&iph->iph_rulebyid) == 0) {
1240 if (iph == itp->itp_policy)
1241 itp->itp_flags &= ~ITPF_PFLAGS;
1242 else
1243 itp->itp_flags &= ~ITPF_IFLAGS;
1245 /* Can exit locks in any order. */
1246 rw_exit(&iph->iph_lock);
1247 mutex_exit(&itp->itp_lock);
1249 spd_echo(q, mp);
1250 if (audit_active) {
1251 boolean_t active;
1252 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1253 cred_t *cr;
1254 pid_t cpid;
1256 cr = msg_getcred(mp, &cpid);
1257 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1258 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1259 active, 0, cpid);
1261 return;
1262 fail:
1263 if (itp != NULL)
1264 mutex_exit(&itp->itp_lock);
1265 spdsock_error(q, mp, err, diag);
1266 if (audit_active) {
1267 boolean_t active;
1268 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1269 cred_t *cr;
1270 pid_t cpid;
1272 cr = msg_getcred(mp, &cpid);
1273 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1274 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1275 active, err, cpid);
1279 /* Do NOT consume a reference to itp. */
1280 /* ARGSUSED */
1281 static void
1282 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns)
1284 mutex_enter(&itp->itp_lock);
1285 ITPF_SWAP(itp->itp_flags);
1286 ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns);
1287 mutex_exit(&itp->itp_lock);
1288 /* SPD_FLIP is worth a tunnel MTU check. */
1289 update_iptun_policy(itp);
1292 void
1293 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname)
1295 char *tname;
1296 ipsec_tun_pol_t *itp;
1297 spdsock_t *ss = (spdsock_t *)q->q_ptr;
1298 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1300 if (tunname != NULL) {
1301 tname = (char *)tunname->spd_if_name;
1302 if (*tname == '\0') {
1303 /* can't fail */
1304 ipsec_swap_global_policy(ns);
1305 if (audit_active) {
1306 boolean_t active;
1307 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1308 cred_t *cr;
1309 pid_t cpid;
1311 cr = msg_getcred(mp, &cpid);
1312 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1313 audit_pf_policy(SPD_FLIP, cr, ns,
1314 NULL, active, 0, cpid);
1316 itp_walk(spdsock_flip_node, NULL, ns);
1317 if (audit_active) {
1318 boolean_t active;
1319 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1320 cred_t *cr;
1321 pid_t cpid;
1323 cr = msg_getcred(mp, &cpid);
1324 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1325 audit_pf_policy(SPD_FLIP, cr, ns,
1326 "all tunnels", active, 0, cpid);
1328 } else {
1329 itp = get_tunnel_policy(tname, ns);
1330 if (itp == NULL) {
1331 /* Better idea for "tunnel not found"? */
1332 spdsock_error(q, mp, ESRCH, 0);
1333 if (audit_active) {
1334 boolean_t active;
1335 spd_msg_t *spmsg =
1336 (spd_msg_t *)mp->b_rptr;
1337 cred_t *cr;
1338 pid_t cpid;
1340 cr = msg_getcred(mp, &cpid);
1341 active = (spmsg->spd_msg_spdid ==
1342 SPD_ACTIVE);
1343 audit_pf_policy(SPD_FLIP, cr, ns,
1344 ITP_NAME(itp), active,
1345 ESRCH, cpid);
1347 return;
1349 spdsock_flip_node(itp, NULL, ns);
1350 if (audit_active) {
1351 boolean_t active;
1352 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1353 cred_t *cr;
1354 pid_t cpid;
1356 cr = msg_getcred(mp, &cpid);
1357 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1358 audit_pf_policy(SPD_FLIP, cr, ns,
1359 ITP_NAME(itp), active, 0, cpid);
1361 ITP_REFRELE(itp, ns);
1363 } else {
1364 ipsec_swap_global_policy(ns); /* can't fail */
1365 if (audit_active) {
1366 boolean_t active;
1367 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1368 cred_t *cr;
1369 pid_t cpid;
1371 cr = msg_getcred(mp, &cpid);
1372 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1373 audit_pf_policy(SPD_FLIP, cr,
1374 ns, NULL, active, 0, cpid);
1377 spd_echo(q, mp);
1381 * Unimplemented feature
1383 /* ARGSUSED */
1384 static void
1385 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1386 spd_ext_t **extv, ipsec_tun_pol_t *itp)
1388 spdsock_error(q, mp, EINVAL, 0);
1392 static mblk_t *
1393 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph,
1394 uint32_t count, uint16_t error)
1396 size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t);
1397 spd_msg_t *msg;
1398 spd_ruleset_ext_t *ruleset;
1399 mblk_t *m = allocb(len, BPRI_HI);
1401 ASSERT(RW_READ_HELD(&iph->iph_lock));
1403 if (m == NULL) {
1404 return (NULL);
1406 msg = (spd_msg_t *)m->b_rptr;
1407 ruleset = (spd_ruleset_ext_t *)(&msg[1]);
1409 m->b_wptr = (uint8_t *)&ruleset[1];
1411 *msg = *(spd_msg_t *)(req->b_rptr);
1412 msg->spd_msg_len = SPD_8TO64(len);
1413 msg->spd_msg_errno = error;
1415 ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset));
1416 ruleset->spd_ruleset_type = SPD_EXT_RULESET;
1417 ruleset->spd_ruleset_count = count;
1418 ruleset->spd_ruleset_version = iph->iph_gen;
1419 return (m);
1422 static mblk_t *
1423 spdsock_dump_finish(spdsock_t *ss, int error)
1425 mblk_t *m;
1426 ipsec_policy_head_t *iph = ss->spdsock_dump_head;
1427 mblk_t *req = ss->spdsock_dump_req;
1428 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1430 rw_enter(&iph->iph_lock, RW_READER);
1431 m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error);
1432 rw_exit(&iph->iph_lock);
1433 IPPH_REFRELE(iph, ns);
1434 if (ss->spdsock_itp != NULL) {
1435 ITP_REFRELE(ss->spdsock_itp, ns);
1436 ss->spdsock_itp = NULL;
1438 ss->spdsock_dump_req = NULL;
1439 freemsg(req);
1441 return (m);
1445 * Rule encoding functions.
1446 * We do a two-pass encode.
1447 * If base != NULL, fill in encoded rule part starting at base+offset.
1448 * Always return "offset" plus length of to-be-encoded data.
1450 static uint_t
1451 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type,
1452 uint8_t type_end, uint8_t code, uint8_t code_end)
1454 struct spd_typecode *tcp;
1456 ASSERT(ALIGNED64(offset));
1458 if (base != NULL) {
1459 tcp = (struct spd_typecode *)(base + offset);
1460 tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp));
1461 tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE;
1462 tcp->spd_typecode_code = code;
1463 tcp->spd_typecode_type = type;
1464 tcp->spd_typecode_type_end = type_end;
1465 tcp->spd_typecode_code_end = code_end;
1467 offset += sizeof (*tcp);
1469 ASSERT(ALIGNED64(offset));
1471 return (offset);
1474 static uint_t
1475 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto)
1477 struct spd_proto *spp;
1479 ASSERT(ALIGNED64(offset));
1481 if (base != NULL) {
1482 spp = (struct spd_proto *)(base + offset);
1483 spp->spd_proto_len = SPD_8TO64(sizeof (*spp));
1484 spp->spd_proto_exttype = SPD_EXT_PROTO;
1485 spp->spd_proto_number = proto;
1486 spp->spd_proto_reserved1 = 0;
1487 spp->spd_proto_reserved2 = 0;
1489 offset += sizeof (*spp);
1491 ASSERT(ALIGNED64(offset));
1493 return (offset);
1496 static uint_t
1497 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port)
1499 struct spd_portrange *spp;
1501 ASSERT(ALIGNED64(offset));
1503 if (base != NULL) {
1504 spp = (struct spd_portrange *)(base + offset);
1505 spp->spd_ports_len = SPD_8TO64(sizeof (*spp));
1506 spp->spd_ports_exttype = ext;
1507 spp->spd_ports_minport = port;
1508 spp->spd_ports_maxport = port;
1510 offset += sizeof (*spp);
1512 ASSERT(ALIGNED64(offset));
1514 return (offset);
1517 static uint_t
1518 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext,
1519 const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen)
1521 struct spd_address *sae;
1522 ipsec_addr_t *spdaddr;
1523 uint_t start = offset;
1524 uint_t addrlen;
1525 uint_t af;
1527 if (sel->ipsl_valid & IPSL_IPV4) {
1528 af = AF_INET;
1529 addrlen = IP_ADDR_LEN;
1530 } else {
1531 af = AF_INET6;
1532 addrlen = IPV6_ADDR_LEN;
1535 ASSERT(ALIGNED64(offset));
1537 if (base != NULL) {
1538 sae = (struct spd_address *)(base + offset);
1539 sae->spd_address_exttype = ext;
1540 sae->spd_address_af = af;
1541 sae->spd_address_prefixlen = pfxlen;
1542 sae->spd_address_reserved2 = 0;
1544 spdaddr = (ipsec_addr_t *)(&sae[1]);
1545 bcopy(addr, spdaddr, addrlen);
1547 offset += sizeof (*sae);
1548 addrlen = roundup(addrlen, sizeof (uint64_t));
1549 offset += addrlen;
1551 ASSERT(ALIGNED64(offset));
1553 if (base != NULL)
1554 sae->spd_address_len = SPD_8TO64(offset - start);
1555 return (offset);
1558 static uint_t
1559 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel)
1561 const ipsec_selkey_t *selkey = &sel->ipsl_key;
1563 if (selkey->ipsl_valid & IPSL_PROTOCOL)
1564 offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto);
1565 if (selkey->ipsl_valid & IPSL_LOCAL_PORT)
1566 offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT,
1567 selkey->ipsl_lport);
1568 if (selkey->ipsl_valid & IPSL_REMOTE_PORT)
1569 offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT,
1570 selkey->ipsl_rport);
1571 if (selkey->ipsl_valid & IPSL_REMOTE_ADDR)
1572 offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR,
1573 selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen);
1574 if (selkey->ipsl_valid & IPSL_LOCAL_ADDR)
1575 offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR,
1576 selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen);
1577 if (selkey->ipsl_valid & IPSL_ICMP_TYPE) {
1578 offset = spdsock_encode_typecode(base, offset,
1579 selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end,
1580 (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1581 selkey->ipsl_icmp_code : 255,
1582 (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1583 selkey->ipsl_icmp_code_end : 255);
1585 return (offset);
1588 static uint_t
1589 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag,
1590 uint32_t value)
1592 struct spd_attribute *attr;
1594 ASSERT(ALIGNED64(offset));
1596 if (base != NULL) {
1597 attr = (struct spd_attribute *)(base + offset);
1598 attr->spd_attr_tag = tag;
1599 attr->spd_attr_value = value;
1601 offset += sizeof (struct spd_attribute);
1603 ASSERT(ALIGNED64(offset));
1605 return (offset);
1609 #define EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v))
1611 static uint_t
1612 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap)
1614 const struct ipsec_act *act = &(ap->ipa_act);
1615 uint_t flags;
1617 EMIT(SPD_ATTR_EMPTY, 0);
1618 switch (act->ipa_type) {
1619 case IPSEC_ACT_DISCARD:
1620 case IPSEC_ACT_REJECT:
1621 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP);
1622 break;
1623 case IPSEC_ACT_BYPASS:
1624 case IPSEC_ACT_CLEAR:
1625 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS);
1626 break;
1628 case IPSEC_ACT_APPLY:
1629 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC);
1630 flags = 0;
1631 if (act->ipa_apply.ipp_use_ah)
1632 flags |= SPD_APPLY_AH;
1633 if (act->ipa_apply.ipp_use_esp)
1634 flags |= SPD_APPLY_ESP;
1635 if (act->ipa_apply.ipp_use_espa)
1636 flags |= SPD_APPLY_ESPA;
1637 if (act->ipa_apply.ipp_use_se)
1638 flags |= SPD_APPLY_SE;
1639 if (act->ipa_apply.ipp_use_unique)
1640 flags |= SPD_APPLY_UNIQUE;
1641 EMIT(SPD_ATTR_FLAGS, flags);
1642 if (flags & SPD_APPLY_AH) {
1643 EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg);
1644 EMIT(SPD_ATTR_AH_MINBITS,
1645 act->ipa_apply.ipp_ah_minbits);
1646 EMIT(SPD_ATTR_AH_MAXBITS,
1647 act->ipa_apply.ipp_ah_maxbits);
1649 if (flags & SPD_APPLY_ESP) {
1650 EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg);
1651 EMIT(SPD_ATTR_ENCR_MINBITS,
1652 act->ipa_apply.ipp_espe_minbits);
1653 EMIT(SPD_ATTR_ENCR_MAXBITS,
1654 act->ipa_apply.ipp_espe_maxbits);
1655 if (flags & SPD_APPLY_ESPA) {
1656 EMIT(SPD_ATTR_ESP_AUTH,
1657 act->ipa_apply.ipp_esp_auth_alg);
1658 EMIT(SPD_ATTR_ESPA_MINBITS,
1659 act->ipa_apply.ipp_espa_minbits);
1660 EMIT(SPD_ATTR_ESPA_MAXBITS,
1661 act->ipa_apply.ipp_espa_maxbits);
1664 if (act->ipa_apply.ipp_km_proto != 0)
1665 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto);
1666 if (act->ipa_apply.ipp_km_cookie != 0)
1667 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie);
1668 if (act->ipa_apply.ipp_replay_depth != 0)
1669 EMIT(SPD_ATTR_REPLAY_DEPTH,
1670 act->ipa_apply.ipp_replay_depth);
1671 /* Add more here */
1672 break;
1675 return (offset);
1678 static uint_t
1679 spdsock_encode_action_list(uint8_t *base, uint_t offset,
1680 const ipsec_action_t *ap)
1682 struct spd_ext_actions *act;
1683 uint_t nact = 0;
1684 uint_t start = offset;
1686 ASSERT(ALIGNED64(offset));
1688 if (base != NULL) {
1689 act = (struct spd_ext_actions *)(base + offset);
1690 act->spd_actions_len = 0;
1691 act->spd_actions_exttype = SPD_EXT_ACTION;
1692 act->spd_actions_count = 0;
1693 act->spd_actions_reserved = 0;
1696 offset += sizeof (*act);
1698 ASSERT(ALIGNED64(offset));
1700 while (ap != NULL) {
1701 offset = spdsock_encode_action(base, offset, ap);
1702 ap = ap->ipa_next;
1703 nact++;
1704 if (ap != NULL) {
1705 EMIT(SPD_ATTR_NEXT, 0);
1708 EMIT(SPD_ATTR_END, 0);
1710 ASSERT(ALIGNED64(offset));
1712 if (base != NULL) {
1713 act->spd_actions_count = nact;
1714 act->spd_actions_len = SPD_8TO64(offset - start);
1717 return (offset);
1720 #undef EMIT
1722 /* ARGSUSED */
1723 static uint_t
1724 spdsock_rule_flags(uint_t dir, uint_t af)
1726 uint_t flags = 0;
1728 if (dir == IPSEC_TYPE_INBOUND)
1729 flags |= SPD_RULE_FLAG_INBOUND;
1730 if (dir == IPSEC_TYPE_OUTBOUND)
1731 flags |= SPD_RULE_FLAG_OUTBOUND;
1733 return (flags);
1737 static uint_t
1738 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req,
1739 const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name,
1740 boolean_t tunnel)
1742 struct spd_msg *spmsg;
1743 struct spd_rule *spr;
1744 spd_if_t *sid;
1746 uint_t start = offset;
1748 ASSERT(ALIGNED64(offset));
1750 if (base != NULL) {
1751 spmsg = (struct spd_msg *)(base + offset);
1752 bzero(spmsg, sizeof (*spmsg));
1753 spmsg->spd_msg_version = PF_POLICY_V1;
1754 spmsg->spd_msg_type = SPD_DUMP;
1755 spmsg->spd_msg_seq = req->spd_msg_seq;
1756 spmsg->spd_msg_pid = req->spd_msg_pid;
1758 offset += sizeof (struct spd_msg);
1760 ASSERT(ALIGNED64(offset));
1762 if (base != NULL) {
1763 spr = (struct spd_rule *)(base + offset);
1764 spr->spd_rule_type = SPD_EXT_RULE;
1765 spr->spd_rule_priority = rule->ipsp_prio;
1766 spr->spd_rule_flags = spdsock_rule_flags(dir, af);
1767 if (tunnel)
1768 spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL;
1769 spr->spd_rule_unused = 0;
1770 spr->spd_rule_len = SPD_8TO64(sizeof (*spr));
1771 spr->spd_rule_index = rule->ipsp_index;
1773 offset += sizeof (struct spd_rule);
1776 * If we have an interface name (i.e. if this policy head came from
1777 * a tunnel), add the SPD_EXT_TUN_NAME extension.
1779 if (name != NULL) {
1781 ASSERT(ALIGNED64(offset));
1783 if (base != NULL) {
1784 sid = (spd_if_t *)(base + offset);
1785 sid->spd_if_exttype = SPD_EXT_TUN_NAME;
1786 sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) +
1787 roundup((strlen(name) - 4), 8));
1788 (void) strlcpy((char *)sid->spd_if_name, name,
1789 LIFNAMSIZ);
1792 offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8);
1795 offset = spdsock_encode_sel(base, offset, rule->ipsp_sel);
1796 offset = spdsock_encode_action_list(base, offset, rule->ipsp_act);
1798 ASSERT(ALIGNED64(offset));
1800 if (base != NULL) {
1801 spmsg->spd_msg_len = SPD_8TO64(offset - start);
1803 return (offset);
1806 /* ARGSUSED */
1807 static mblk_t *
1808 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule,
1809 uint_t dir, uint_t af, char *name, boolean_t tunnel)
1811 mblk_t *m;
1812 uint_t len;
1813 spd_msg_t *mreq = (spd_msg_t *)req->b_rptr;
1816 * Figure out how much space we'll need.
1818 len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name,
1819 tunnel);
1822 * Allocate mblk.
1824 m = allocb(len, BPRI_HI);
1825 if (m == NULL)
1826 return (NULL);
1829 * Fill it in..
1831 m->b_wptr = m->b_rptr + len;
1832 bzero(m->b_rptr, len);
1833 (void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af,
1834 name, tunnel);
1835 return (m);
1838 static ipsec_policy_t *
1839 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph,
1840 ipsec_policy_t *cur)
1842 ASSERT(RW_READ_HELD(&iph->iph_lock));
1844 ss->spdsock_dump_count++;
1845 ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next;
1846 return (cur);
1849 static ipsec_policy_t *
1850 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph)
1852 ipsec_policy_t *cur;
1853 ipsec_policy_root_t *ipr;
1854 int chain, nchains, type, af;
1856 ASSERT(RW_READ_HELD(&iph->iph_lock));
1858 cur = ss->spdsock_dump_cur_rule;
1860 if (cur != NULL)
1861 return (spdsock_dump_next_in_chain(ss, iph, cur));
1863 type = ss->spdsock_dump_cur_type;
1865 next:
1866 chain = ss->spdsock_dump_cur_chain;
1867 ipr = &iph->iph_root[type];
1868 nchains = ipr->ipr_nchains;
1870 while (chain < nchains) {
1871 cur = ipr->ipr_hash[chain].hash_head;
1872 chain++;
1873 if (cur != NULL) {
1874 ss->spdsock_dump_cur_chain = chain;
1875 return (spdsock_dump_next_in_chain(ss, iph, cur));
1878 ss->spdsock_dump_cur_chain = nchains;
1880 af = ss->spdsock_dump_cur_af;
1881 while (af < IPSEC_NAF) {
1882 cur = ipr->ipr_nonhash[af];
1883 af++;
1884 if (cur != NULL) {
1885 ss->spdsock_dump_cur_af = af;
1886 return (spdsock_dump_next_in_chain(ss, iph, cur));
1890 type++;
1891 if (type >= IPSEC_NTYPES)
1892 return (NULL);
1894 ss->spdsock_dump_cur_chain = 0;
1895 ss->spdsock_dump_cur_type = type;
1896 ss->spdsock_dump_cur_af = IPSEC_AF_V4;
1897 goto next;
1902 * If we're done with one policy head, but have more to go, we iterate through
1903 * another IPsec tunnel policy head (itp). Return NULL if it is an error
1904 * worthy of returning EAGAIN via PF_POLICY.
1906 static ipsec_tun_pol_t *
1907 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss)
1909 ipsec_tun_pol_t *itp;
1911 ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock));
1912 if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) {
1913 /* Oops, state of the tunnel polheads changed. */
1914 itp = NULL;
1915 } else if (ss->spdsock_itp == NULL) {
1916 /* Just finished global, find first node. */
1917 itp = avl_first(&ipss->ipsec_tunnel_policies);
1918 } else {
1919 /* We just finished current polhead, find the next one. */
1920 itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp);
1922 if (itp != NULL) {
1923 ITP_REFHOLD(itp);
1925 if (ss->spdsock_itp != NULL) {
1926 ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack);
1928 ss->spdsock_itp = itp;
1929 return (itp);
1932 static mblk_t *
1933 spdsock_dump_next_record(spdsock_t *ss)
1935 ipsec_policy_head_t *iph;
1936 ipsec_policy_t *rule;
1937 mblk_t *m;
1938 ipsec_tun_pol_t *itp;
1939 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1940 ipsec_stack_t *ipss = ns->netstack_ipsec;
1942 iph = ss->spdsock_dump_head;
1944 ASSERT(iph != NULL);
1946 rw_enter(&iph->iph_lock, RW_READER);
1948 if (iph->iph_gen != ss->spdsock_dump_gen) {
1949 rw_exit(&iph->iph_lock);
1950 return (spdsock_dump_finish(ss, EAGAIN));
1953 while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) {
1954 rw_exit(&iph->iph_lock);
1955 if (--(ss->spdsock_dump_remaining_polheads) == 0)
1956 return (spdsock_dump_finish(ss, 0));
1960 * If we reach here, we have more policy heads (tunnel
1961 * entries) to dump. Let's reset to a new policy head
1962 * and get some more rules.
1964 * An empty policy head will have spdsock_dump_next_rule()
1965 * return NULL, and we loop (while dropping the number of
1966 * remaining polheads). If we loop to 0, we finish. We
1967 * keep looping until we hit 0 or until we have a rule to
1968 * encode.
1970 * NOTE: No need for ITP_REF*() macros here as we're only
1971 * going after and refholding the policy head itself.
1973 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
1974 itp = spdsock_dump_iterate_next_tunnel(ss, ipss);
1975 if (itp == NULL) {
1976 rw_exit(&ipss->ipsec_tunnel_policy_lock);
1977 return (spdsock_dump_finish(ss, EAGAIN));
1980 /* Reset other spdsock_dump thingies. */
1981 IPPH_REFRELE(ss->spdsock_dump_head, ns);
1982 if (ss->spdsock_dump_active) {
1983 ss->spdsock_dump_tunnel =
1984 itp->itp_flags & ITPF_P_TUNNEL;
1985 iph = itp->itp_policy;
1986 } else {
1987 ss->spdsock_dump_tunnel =
1988 itp->itp_flags & ITPF_I_TUNNEL;
1989 iph = itp->itp_inactive;
1991 IPPH_REFHOLD(iph);
1992 rw_exit(&ipss->ipsec_tunnel_policy_lock);
1994 rw_enter(&iph->iph_lock, RW_READER);
1995 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
1998 m = spdsock_encode_rule(ss->spdsock_dump_req, rule,
1999 ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af,
2000 (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name,
2001 ss->spdsock_dump_tunnel);
2002 rw_exit(&iph->iph_lock);
2004 if (m == NULL)
2005 return (spdsock_dump_finish(ss, ENOMEM));
2006 return (m);
2010 * Dump records until we run into flow-control back-pressure.
2012 static void
2013 spdsock_dump_some(queue_t *q, spdsock_t *ss)
2015 mblk_t *m, *dataind;
2017 while ((ss->spdsock_dump_req != NULL) && canputnext(q)) {
2018 m = spdsock_dump_next_record(ss);
2019 if (m == NULL)
2020 return;
2021 dataind = allocb(sizeof (struct T_data_req), BPRI_HI);
2022 if (dataind == NULL) {
2023 freemsg(m);
2024 return;
2026 dataind->b_cont = m;
2027 dataind->b_wptr += sizeof (struct T_data_req);
2028 ((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND;
2029 ((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0;
2030 dataind->b_datap->db_type = M_PROTO;
2031 putnext(q, dataind);
2036 * Start dumping.
2037 * Format a start-of-dump record, and set up the stream and kick the rsrv
2038 * procedure to continue the job..
2040 /* ARGSUSED */
2041 static void
2042 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp)
2044 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2045 netstack_t *ns = ss->spdsock_spds->spds_netstack;
2046 ipsec_stack_t *ipss = ns->netstack_ipsec;
2047 mblk_t *mr;
2049 /* spdsock_open() already set spdsock_itp to NULL. */
2050 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
2051 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
2052 ss->spdsock_dump_remaining_polheads = 1 +
2053 avl_numnodes(&ipss->ipsec_tunnel_policies);
2054 ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen;
2055 rw_exit(&ipss->ipsec_tunnel_policy_lock);
2056 if (iph == ALL_ACTIVE_POLHEADS) {
2057 iph = ipsec_system_policy(ns);
2058 ss->spdsock_dump_active = B_TRUE;
2059 } else {
2060 iph = ipsec_inactive_policy(ns);
2061 ss->spdsock_dump_active = B_FALSE;
2063 ASSERT(ss->spdsock_itp == NULL);
2064 } else {
2065 ss->spdsock_dump_remaining_polheads = 1;
2068 rw_enter(&iph->iph_lock, RW_READER);
2070 mr = spdsock_dump_ruleset(mp, iph, 0, 0);
2072 if (!mr) {
2073 rw_exit(&iph->iph_lock);
2074 spdsock_error(q, mp, ENOMEM, 0);
2075 return;
2078 ss->spdsock_dump_req = mp;
2079 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
2081 rw_exit(&iph->iph_lock);
2083 qreply(q, mr);
2084 qenable(OTHERQ(q));
2087 /* Do NOT consume a reference to ITP. */
2088 void
2089 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns)
2091 int *errptr = (int *)ep;
2093 if (*errptr != 0)
2094 return; /* We've failed already for some reason. */
2095 mutex_enter(&itp->itp_lock);
2096 ITPF_CLONE(itp->itp_flags);
2097 *errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns);
2098 mutex_exit(&itp->itp_lock);
2101 void
2102 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname)
2104 int error;
2105 char *tname;
2106 ipsec_tun_pol_t *itp;
2107 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2108 netstack_t *ns = ss->spdsock_spds->spds_netstack;
2110 if (tunname != NULL) {
2111 tname = (char *)tunname->spd_if_name;
2112 if (*tname == '\0') {
2113 error = ipsec_clone_system_policy(ns);
2114 if (audit_active) {
2115 boolean_t active;
2116 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2117 cred_t *cr;
2118 pid_t cpid;
2120 cr = msg_getcred(mp, &cpid);
2121 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2122 audit_pf_policy(SPD_CLONE, cr, ns,
2123 NULL, active, error, cpid);
2125 if (error == 0) {
2126 itp_walk(spdsock_clone_node, &error, ns);
2127 if (audit_active) {
2128 boolean_t active;
2129 spd_msg_t *spmsg =
2130 (spd_msg_t *)mp->b_rptr;
2131 cred_t *cr;
2132 pid_t cpid;
2134 cr = msg_getcred(mp, &cpid);
2135 active = (spmsg->spd_msg_spdid ==
2136 SPD_ACTIVE);
2137 audit_pf_policy(SPD_CLONE, cr,
2138 ns, "all tunnels", active, 0,
2139 cpid);
2142 } else {
2143 itp = get_tunnel_policy(tname, ns);
2144 if (itp == NULL) {
2145 spdsock_error(q, mp, ENOENT, 0);
2146 if (audit_active) {
2147 boolean_t active;
2148 spd_msg_t *spmsg =
2149 (spd_msg_t *)mp->b_rptr;
2150 cred_t *cr;
2151 pid_t cpid;
2153 cr = msg_getcred(mp, &cpid);
2154 active = (spmsg->spd_msg_spdid ==
2155 SPD_ACTIVE);
2156 audit_pf_policy(SPD_CLONE, cr,
2157 ns, NULL, active, ENOENT, cpid);
2159 return;
2161 spdsock_clone_node(itp, &error, NULL);
2162 if (audit_active) {
2163 boolean_t active;
2164 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2165 cred_t *cr;
2166 pid_t cpid;
2168 cr = msg_getcred(mp, &cpid);
2169 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2170 audit_pf_policy(SPD_CLONE, cr, ns,
2171 ITP_NAME(itp), active, error, cpid);
2173 ITP_REFRELE(itp, ns);
2175 } else {
2176 error = ipsec_clone_system_policy(ns);
2177 if (audit_active) {
2178 boolean_t active;
2179 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2180 cred_t *cr;
2181 pid_t cpid;
2183 cr = msg_getcred(mp, &cpid);
2184 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2185 audit_pf_policy(SPD_CLONE, cr, ns, NULL,
2186 active, error, cpid);
2190 if (error != 0)
2191 spdsock_error(q, mp, error, 0);
2192 else
2193 spd_echo(q, mp);
2197 * Process a SPD_ALGLIST request. The caller expects separate alg entries
2198 * for AH authentication, ESP authentication, and ESP encryption.
2199 * The same distinction is then used when setting the min and max key
2200 * sizes when defining policies.
2203 #define SPDSOCK_AH_AUTH 0
2204 #define SPDSOCK_ESP_AUTH 1
2205 #define SPDSOCK_ESP_ENCR 2
2206 #define SPDSOCK_NTYPES 3
2208 static const uint_t algattr[SPDSOCK_NTYPES] = {
2209 SPD_ATTR_AH_AUTH,
2210 SPD_ATTR_ESP_AUTH,
2211 SPD_ATTR_ESP_ENCR
2213 static const uint_t minbitsattr[SPDSOCK_NTYPES] = {
2214 SPD_ATTR_AH_MINBITS,
2215 SPD_ATTR_ESPA_MINBITS,
2216 SPD_ATTR_ENCR_MINBITS
2218 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = {
2219 SPD_ATTR_AH_MAXBITS,
2220 SPD_ATTR_ESPA_MAXBITS,
2221 SPD_ATTR_ENCR_MAXBITS
2223 static const uint_t defbitsattr[SPDSOCK_NTYPES] = {
2224 SPD_ATTR_AH_DEFBITS,
2225 SPD_ATTR_ESPA_DEFBITS,
2226 SPD_ATTR_ENCR_DEFBITS
2228 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = {
2229 SPD_ATTR_AH_INCRBITS,
2230 SPD_ATTR_ESPA_INCRBITS,
2231 SPD_ATTR_ENCR_INCRBITS
2234 #define ATTRPERALG 6 /* fixed attributes per algs */
2236 void
2237 spdsock_alglist(queue_t *q, mblk_t *mp)
2239 uint_t algtype;
2240 uint_t algidx;
2241 uint_t algcount;
2242 uint_t size;
2243 mblk_t *m;
2244 uint8_t *cur;
2245 spd_msg_t *msg;
2246 struct spd_ext_actions *act;
2247 struct spd_attribute *attr;
2248 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2249 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2251 mutex_enter(&ipss->ipsec_alg_lock);
2253 * The SPD client expects to receive separate entries for
2254 * AH authentication and ESP authentication supported algorithms.
2256 * Don't return the "any" algorithms, if defined, as no
2257 * kernel policies can be set for these algorithms.
2259 algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2260 ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2262 if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL)
2263 algcount--;
2264 if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL)
2265 algcount--;
2268 * For each algorithm, we encode:
2269 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2272 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) +
2273 ATTRPERALG * sizeof (struct spd_attribute) * algcount;
2275 ASSERT(ALIGNED64(size));
2277 m = allocb(size, BPRI_HI);
2278 if (m == NULL) {
2279 mutex_exit(&ipss->ipsec_alg_lock);
2280 spdsock_error(q, mp, ENOMEM, 0);
2281 return;
2284 m->b_wptr = m->b_rptr + size;
2285 cur = m->b_rptr;
2287 msg = (spd_msg_t *)cur;
2288 bcopy(mp->b_rptr, cur, sizeof (*msg));
2290 msg->spd_msg_len = SPD_8TO64(size);
2291 msg->spd_msg_errno = 0;
2292 msg->spd_msg_diagnostic = 0;
2294 cur += sizeof (*msg);
2296 act = (struct spd_ext_actions *)cur;
2297 cur += sizeof (*act);
2299 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2300 act->spd_actions_exttype = SPD_EXT_ACTION;
2301 act->spd_actions_count = algcount;
2302 act->spd_actions_reserved = 0;
2304 attr = (struct spd_attribute *)cur;
2306 #define EMIT(tag, value) { \
2307 attr->spd_attr_tag = (tag); \
2308 attr->spd_attr_value = (value); \
2309 attr++; \
2313 * If you change the number of EMIT's here, change
2314 * ATTRPERALG above to match
2316 #define EMITALGATTRS(_type) { \
2317 EMIT(algattr[_type], algid); /* 1 */ \
2318 EMIT(minbitsattr[_type], minbits); /* 2 */ \
2319 EMIT(maxbitsattr[_type], maxbits); /* 3 */ \
2320 EMIT(defbitsattr[_type], defbits); /* 4 */ \
2321 EMIT(incrbitsattr[_type], incr); /* 5 */ \
2322 EMIT(SPD_ATTR_NEXT, 0); /* 6 */ \
2325 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2326 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2327 algidx++) {
2328 int algid = ipss->ipsec_sortlist[algtype][algidx];
2329 ipsec_alginfo_t *alg =
2330 ipss->ipsec_alglists[algtype][algid];
2331 uint_t minbits = alg->alg_minbits;
2332 uint_t maxbits = alg->alg_maxbits;
2333 uint_t defbits = alg->alg_default_bits;
2334 uint_t incr = alg->alg_increment;
2336 if (algtype == IPSEC_ALG_AUTH) {
2337 if (algid == SADB_AALG_NONE)
2338 continue;
2339 EMITALGATTRS(SPDSOCK_AH_AUTH);
2340 EMITALGATTRS(SPDSOCK_ESP_AUTH);
2341 } else {
2342 if (algid == SADB_EALG_NONE)
2343 continue;
2344 ASSERT(algtype == IPSEC_ALG_ENCR);
2345 EMITALGATTRS(SPDSOCK_ESP_ENCR);
2350 mutex_exit(&ipss->ipsec_alg_lock);
2352 #undef EMITALGATTRS
2353 #undef EMIT
2354 #undef ATTRPERALG
2356 attr--;
2357 attr->spd_attr_tag = SPD_ATTR_END;
2359 freemsg(mp);
2360 qreply(q, m);
2364 * Process a SPD_DUMPALGS request.
2367 #define ATTRPERALG 9 /* fixed attributes per algs */
2369 void
2370 spdsock_dumpalgs(queue_t *q, mblk_t *mp)
2372 uint_t algtype;
2373 uint_t algidx;
2374 uint_t size;
2375 mblk_t *m;
2376 uint8_t *cur;
2377 spd_msg_t *msg;
2378 struct spd_ext_actions *act;
2379 struct spd_attribute *attr;
2380 ipsec_alginfo_t *alg;
2381 uint_t algid;
2382 uint_t i;
2383 uint_t alg_size;
2384 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2385 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2387 mutex_enter(&ipss->ipsec_alg_lock);
2390 * For each algorithm, we encode:
2391 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2393 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE*
2394 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_NPARAMS / ALG_PARAMS* /
2395 * ALG_MECHNAME / ALG_FLAGS / {END, NEXT}
2399 * Compute the size of the SPD message.
2401 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions);
2403 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2404 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2405 algidx++) {
2406 algid = ipss->ipsec_sortlist[algtype][algidx];
2407 alg = ipss->ipsec_alglists[algtype][algid];
2408 alg_size = sizeof (struct spd_attribute) *
2409 (ATTRPERALG + alg->alg_nkey_sizes +
2410 alg->alg_nblock_sizes + alg->alg_nparams) +
2411 CRYPTO_MAX_MECH_NAME;
2412 size += alg_size;
2416 ASSERT(ALIGNED64(size));
2418 m = allocb(size, BPRI_HI);
2419 if (m == NULL) {
2420 mutex_exit(&ipss->ipsec_alg_lock);
2421 spdsock_error(q, mp, ENOMEM, 0);
2422 return;
2425 m->b_wptr = m->b_rptr + size;
2426 cur = m->b_rptr;
2428 msg = (spd_msg_t *)cur;
2429 bcopy(mp->b_rptr, cur, sizeof (*msg));
2431 msg->spd_msg_len = SPD_8TO64(size);
2432 msg->spd_msg_errno = 0;
2433 msg->spd_msg_type = SPD_ALGLIST;
2435 msg->spd_msg_diagnostic = 0;
2437 cur += sizeof (*msg);
2439 act = (struct spd_ext_actions *)cur;
2440 cur += sizeof (*act);
2442 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2443 act->spd_actions_exttype = SPD_EXT_ACTION;
2444 act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2445 ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2446 act->spd_actions_reserved = 0;
2449 * If there aren't any algorithms registered, return an empty message.
2450 * spdsock_get_ext() knows how to deal with this.
2452 if (act->spd_actions_count == 0) {
2453 act->spd_actions_len = 0;
2454 mutex_exit(&ipss->ipsec_alg_lock);
2455 goto error;
2458 attr = (struct spd_attribute *)cur;
2460 #define EMIT(tag, value) { \
2461 attr->spd_attr_tag = (tag); \
2462 attr->spd_attr_value = (value); \
2463 attr++; \
2466 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2467 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2468 algidx++) {
2470 algid = ipss->ipsec_sortlist[algtype][algidx];
2471 alg = ipss->ipsec_alglists[algtype][algid];
2474 * If you change the number of EMIT's here, change
2475 * ATTRPERALG above to match
2477 EMIT(SPD_ATTR_ALG_ID, algid);
2478 EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]);
2479 EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment);
2480 EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes);
2481 for (i = 0; i < alg->alg_nkey_sizes; i++)
2482 EMIT(SPD_ATTR_ALG_KEYSIZE,
2483 alg->alg_key_sizes[i]);
2485 EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes);
2486 for (i = 0; i < alg->alg_nblock_sizes; i++)
2487 EMIT(SPD_ATTR_ALG_BLOCKSIZE,
2488 alg->alg_block_sizes[i]);
2490 EMIT(SPD_ATTR_ALG_NPARAMS, alg->alg_nparams);
2491 for (i = 0; i < alg->alg_nparams; i++)
2492 EMIT(SPD_ATTR_ALG_PARAMS,
2493 alg->alg_params[i]);
2495 EMIT(SPD_ATTR_ALG_FLAGS, alg->alg_flags);
2497 EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME);
2498 bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME);
2499 attr = (struct spd_attribute *)((char *)attr +
2500 CRYPTO_MAX_MECH_NAME);
2502 EMIT(SPD_ATTR_NEXT, 0);
2506 mutex_exit(&ipss->ipsec_alg_lock);
2508 #undef EMITALGATTRS
2509 #undef EMIT
2510 #undef ATTRPERALG
2512 attr--;
2513 attr->spd_attr_tag = SPD_ATTR_END;
2515 error:
2516 freemsg(mp);
2517 qreply(q, m);
2521 * Do the actual work of processing an SPD_UPDATEALGS request. Can
2522 * be invoked either once IPsec is loaded on a cached request, or
2523 * when a request is received while IPsec is loaded.
2525 static int
2526 spdsock_do_updatealg(spd_ext_t *extv[], spd_stack_t *spds)
2528 struct spd_ext_actions *actp;
2529 struct spd_attribute *attr, *endattr;
2530 uint64_t *start, *end;
2531 ipsec_alginfo_t *alg = NULL;
2532 ipsec_algtype_t alg_type = 0;
2533 boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE;
2534 uint_t i, cur_key, cur_block, algid;
2535 int diag = -1;
2537 ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
2539 /* parse the message, building the list of algorithms */
2541 actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
2542 if (actp == NULL)
2543 return (SPD_DIAGNOSTIC_NO_ACTION_EXT);
2545 start = (uint64_t *)actp;
2546 end = (start + actp->spd_actions_len);
2547 endattr = (struct spd_attribute *)end;
2548 attr = (struct spd_attribute *)&actp[1];
2550 bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS *
2551 sizeof (ipsec_alginfo_t *));
2553 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2555 #define ALG_KEY_SIZES(a) (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t))
2556 #define ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t))
2557 #define ALG_PARAM_SIZES(a) (((a)->alg_nparams + 1) * sizeof (uint16_t))
2559 while (attr < endattr) {
2560 switch (attr->spd_attr_tag) {
2561 case SPD_ATTR_NOP:
2562 case SPD_ATTR_EMPTY:
2563 break;
2564 case SPD_ATTR_END:
2565 attr = endattr;
2566 /* FALLTHRU */
2567 case SPD_ATTR_NEXT:
2568 if (doing_proto) {
2569 doing_proto = B_FALSE;
2570 break;
2572 if (skip_alg) {
2573 ipsec_alg_free(alg);
2574 } else {
2575 ipsec_alg_free(
2576 spds->spds_algs[alg_type][alg->alg_id]);
2577 spds->spds_algs[alg_type][alg->alg_id] =
2578 alg;
2580 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2581 break;
2583 case SPD_ATTR_ALG_ID:
2584 if (attr->spd_attr_value >= IPSEC_MAX_ALGS) {
2585 ss1dbg(spds, ("spdsock_do_updatealg: "
2586 "invalid alg id %d\n",
2587 attr->spd_attr_value));
2588 diag = SPD_DIAGNOSTIC_ALG_ID_RANGE;
2589 goto bail;
2591 alg->alg_id = attr->spd_attr_value;
2592 break;
2594 case SPD_ATTR_ALG_PROTO:
2595 /* find the alg type */
2596 for (i = 0; i < NALGPROTOS; i++)
2597 if (algproto[i] == attr->spd_attr_value)
2598 break;
2599 skip_alg = (i == NALGPROTOS);
2600 if (!skip_alg)
2601 alg_type = i;
2602 break;
2604 case SPD_ATTR_ALG_INCRBITS:
2605 alg->alg_increment = attr->spd_attr_value;
2606 break;
2608 case SPD_ATTR_ALG_NKEYSIZES:
2609 if (alg->alg_key_sizes != NULL) {
2610 kmem_free(alg->alg_key_sizes,
2611 ALG_KEY_SIZES(alg));
2613 alg->alg_nkey_sizes = attr->spd_attr_value;
2615 * Allocate room for the trailing zero key size
2616 * value as well.
2618 alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg),
2619 KM_SLEEP);
2620 cur_key = 0;
2621 break;
2623 case SPD_ATTR_ALG_KEYSIZE:
2624 if (alg->alg_key_sizes == NULL ||
2625 cur_key >= alg->alg_nkey_sizes) {
2626 ss1dbg(spds, ("spdsock_do_updatealg: "
2627 "too many key sizes\n"));
2628 diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES;
2629 goto bail;
2631 alg->alg_key_sizes[cur_key++] = attr->spd_attr_value;
2632 break;
2634 case SPD_ATTR_ALG_FLAGS:
2636 * Flags (bit mask). The alg_flags element of
2637 * ipsecalg_flags_t is only 8 bits wide. The
2638 * user can set the VALID bit, but we will ignore it
2639 * and make the decision is the algorithm is valid.
2641 alg->alg_flags |= (uint8_t)attr->spd_attr_value;
2642 break;
2644 case SPD_ATTR_ALG_NBLOCKSIZES:
2645 if (alg->alg_block_sizes != NULL) {
2646 kmem_free(alg->alg_block_sizes,
2647 ALG_BLOCK_SIZES(alg));
2649 alg->alg_nblock_sizes = attr->spd_attr_value;
2651 * Allocate room for the trailing zero block size
2652 * value as well.
2654 alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg),
2655 KM_SLEEP);
2656 cur_block = 0;
2657 break;
2659 case SPD_ATTR_ALG_BLOCKSIZE:
2660 if (alg->alg_block_sizes == NULL ||
2661 cur_block >= alg->alg_nblock_sizes) {
2662 ss1dbg(spds, ("spdsock_do_updatealg: "
2663 "too many block sizes\n"));
2664 diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2665 goto bail;
2667 alg->alg_block_sizes[cur_block++] =
2668 attr->spd_attr_value;
2669 break;
2671 case SPD_ATTR_ALG_NPARAMS:
2672 if (alg->alg_params != NULL) {
2673 kmem_free(alg->alg_params,
2674 ALG_PARAM_SIZES(alg));
2676 alg->alg_nparams = attr->spd_attr_value;
2678 * Allocate room for the trailing zero block size
2679 * value as well.
2681 alg->alg_params = kmem_zalloc(ALG_PARAM_SIZES(alg),
2682 KM_SLEEP);
2683 cur_block = 0;
2684 break;
2686 case SPD_ATTR_ALG_PARAMS:
2687 if (alg->alg_params == NULL ||
2688 cur_block >= alg->alg_nparams) {
2689 ss1dbg(spds, ("spdsock_do_updatealg: "
2690 "too many params\n"));
2691 diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2692 goto bail;
2695 * Array contains: iv_len, icv_len, salt_len
2696 * Any additional parameters are currently ignored.
2698 alg->alg_params[cur_block++] =
2699 attr->spd_attr_value;
2700 break;
2702 case SPD_ATTR_ALG_MECHNAME: {
2703 char *mech_name;
2705 if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) {
2706 ss1dbg(spds, ("spdsock_do_updatealg: "
2707 "mech name too long\n"));
2708 diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN;
2709 goto bail;
2711 mech_name = (char *)(attr + 1);
2712 bcopy(mech_name, alg->alg_mech_name,
2713 attr->spd_attr_value);
2714 alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
2715 attr = (struct spd_attribute *)((char *)attr +
2716 attr->spd_attr_value);
2717 break;
2720 case SPD_ATTR_PROTO_ID:
2721 doing_proto = B_TRUE;
2722 for (i = 0; i < NALGPROTOS; i++) {
2723 if (algproto[i] == attr->spd_attr_value) {
2724 alg_type = i;
2725 break;
2728 break;
2730 case SPD_ATTR_PROTO_EXEC_MODE:
2731 if (!doing_proto)
2732 break;
2733 for (i = 0; i < NEXECMODES; i++) {
2734 if (execmodes[i] == attr->spd_attr_value) {
2735 spds->spds_algs_exec_mode[alg_type] = i;
2736 break;
2739 break;
2741 attr++;
2744 #undef ALG_KEY_SIZES
2745 #undef ALG_BLOCK_SIZES
2746 #undef ALG_PARAM_SIZES
2748 /* update the algorithm tables */
2749 spdsock_merge_algs(spds);
2750 bail:
2751 /* cleanup */
2752 ipsec_alg_free(alg);
2753 for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++)
2754 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++)
2755 if (spds->spds_algs[alg_type][algid] != NULL)
2756 ipsec_alg_free(spds->spds_algs[alg_type][algid]);
2757 return (diag);
2761 * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue
2762 * the request until IPsec loads. If IPsec is loaded, act on it
2763 * immediately.
2766 static void
2767 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[])
2769 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2770 spd_stack_t *spds = ss->spdsock_spds;
2771 ipsec_stack_t *ipss = spds->spds_netstack->netstack_ipsec;
2773 if (!ipsec_loaded(ipss)) {
2775 * IPsec is not loaded, save request and return nicely,
2776 * the message will be processed once IPsec loads.
2778 mblk_t *new_mp;
2780 /* last update message wins */
2781 if ((new_mp = copymsg(mp)) == NULL) {
2782 spdsock_error(q, mp, ENOMEM, 0);
2783 return;
2785 mutex_enter(&spds->spds_alg_lock);
2786 bcopy(extv, spds->spds_extv_algs,
2787 sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
2788 if (spds->spds_mp_algs != NULL)
2789 freemsg(spds->spds_mp_algs);
2790 spds->spds_mp_algs = mp;
2791 spds->spds_algs_pending = B_TRUE;
2792 mutex_exit(&spds->spds_alg_lock);
2793 if (audit_active) {
2794 cred_t *cr;
2795 pid_t cpid;
2797 cr = msg_getcred(mp, &cpid);
2798 audit_pf_policy(SPD_UPDATEALGS, cr,
2799 spds->spds_netstack, NULL, B_TRUE, EAGAIN,
2800 cpid);
2802 spd_echo(q, new_mp);
2803 } else {
2805 * IPsec is loaded, act on the message immediately.
2807 int diag;
2809 mutex_enter(&spds->spds_alg_lock);
2810 diag = spdsock_do_updatealg(extv, spds);
2811 if (diag == -1) {
2812 /* Keep the lock held while we walk the SA tables. */
2813 sadb_alg_update(IPSEC_ALG_ALL, 0, 0,
2814 spds->spds_netstack);
2815 mutex_exit(&spds->spds_alg_lock);
2816 spd_echo(q, mp);
2817 if (audit_active) {
2818 cred_t *cr;
2819 pid_t cpid;
2821 cr = msg_getcred(mp, &cpid);
2822 audit_pf_policy(SPD_UPDATEALGS, cr,
2823 spds->spds_netstack, NULL, B_TRUE, 0,
2824 cpid);
2826 } else {
2827 mutex_exit(&spds->spds_alg_lock);
2828 spdsock_diag(q, mp, diag);
2829 if (audit_active) {
2830 cred_t *cr;
2831 pid_t cpid;
2833 cr = msg_getcred(mp, &cpid);
2834 audit_pf_policy(SPD_UPDATEALGS, cr,
2835 spds->spds_netstack, NULL, B_TRUE, diag,
2836 cpid);
2843 * Find a tunnel instance (using the name to link ID mapping), and
2844 * update it after an IPsec change. We need to do this always in case
2845 * we add policy AFTER plumbing a tunnel. We also need to do this
2846 * because, as a side-effect, the tunnel's MTU is updated to reflect
2847 * any IPsec overhead in the itp's policy.
2849 static void
2850 update_iptun_policy(ipsec_tun_pol_t *itp)
2852 datalink_id_t linkid;
2854 if (dls_mgmt_get_linkid(itp->itp_name, &linkid) == 0)
2855 iptun_set_policy(linkid, itp);
2859 * Sort through the mess of polhead options to retrieve an appropriate one.
2860 * Returns NULL if we send an spdsock error. Returns a valid pointer if we
2861 * found a valid polhead. Returns ALL_ACTIVE_POLHEADS (aka. -1) or
2862 * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to
2863 * act on ALL policy heads.
2865 static ipsec_policy_head_t *
2866 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid,
2867 int msgtype, ipsec_tun_pol_t **itpp)
2869 ipsec_tun_pol_t *itp;
2870 ipsec_policy_head_t *iph;
2871 int errno;
2872 char *tname;
2873 boolean_t active;
2874 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2875 netstack_t *ns = ss->spdsock_spds->spds_netstack;
2876 uint64_t gen; /* Placeholder */
2878 active = (spdid == SPD_ACTIVE);
2879 *itpp = NULL;
2880 if (!active && spdid != SPD_STANDBY) {
2881 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID);
2882 return (NULL);
2885 if (tunname != NULL) {
2886 /* Acting on a tunnel's SPD. */
2887 tname = (char *)tunname->spd_if_name;
2888 if (*tname == '\0') {
2889 /* Handle all-polhead cases here. */
2890 if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) {
2891 spdsock_diag(q, mp,
2892 SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
2893 return (NULL);
2895 return (active ? ALL_ACTIVE_POLHEADS :
2896 ALL_INACTIVE_POLHEADS);
2899 itp = get_tunnel_policy(tname, ns);
2900 if (itp == NULL) {
2901 if (msgtype != SPD_ADDRULE) {
2902 /* "Tunnel not found" */
2903 spdsock_error(q, mp, ENOENT, 0);
2904 return (NULL);
2907 errno = 0;
2908 itp = create_tunnel_policy(tname, &errno, &gen, ns);
2909 if (itp == NULL) {
2911 * Something very bad happened, most likely
2912 * ENOMEM. Return an indicator.
2914 spdsock_error(q, mp, errno, 0);
2915 return (NULL);
2919 /* Match up the itp to an iptun instance. */
2920 update_iptun_policy(itp);
2922 *itpp = itp;
2923 /* For spdsock dump state, set the polhead's name. */
2924 if (msgtype == SPD_DUMP) {
2925 ITP_REFHOLD(itp);
2926 ss->spdsock_itp = itp;
2927 ss->spdsock_dump_tunnel = itp->itp_flags &
2928 (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL);
2930 } else {
2931 itp = NULL;
2932 /* For spdsock dump state, indicate it's global policy. */
2933 if (msgtype == SPD_DUMP)
2934 ss->spdsock_itp = NULL;
2937 if (active)
2938 iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy;
2939 else
2940 iph = (itp == NULL) ? ipsec_inactive_policy(ns) :
2941 itp->itp_inactive;
2943 ASSERT(iph != NULL);
2944 if (itp != NULL) {
2945 IPPH_REFHOLD(iph);
2948 return (iph);
2951 static void
2952 spdsock_parse(queue_t *q, mblk_t *mp)
2954 spd_msg_t *spmsg;
2955 spd_ext_t *extv[SPD_EXT_MAX + 1];
2956 uint_t msgsize;
2957 ipsec_policy_head_t *iph;
2958 ipsec_tun_pol_t *itp;
2959 spd_if_t *tunname;
2960 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2961 spd_stack_t *spds = ss->spdsock_spds;
2962 netstack_t *ns = spds->spds_netstack;
2963 ipsec_stack_t *ipss = ns->netstack_ipsec;
2965 /* Make sure nothing's below me. */
2966 ASSERT(WR(q)->q_next == NULL);
2968 spmsg = (spd_msg_t *)mp->b_rptr;
2970 msgsize = SPD_64TO8(spmsg->spd_msg_len);
2972 if (msgdsize(mp) != msgsize) {
2974 * Message len incorrect w.r.t. actual size. Send an error
2975 * (EMSGSIZE). It may be necessary to massage things a
2976 * bit. For example, if the spd_msg_type is hosed,
2977 * I need to set it to SPD_RESERVED to get delivery to
2978 * do the right thing. Then again, maybe just letting
2979 * the error delivery do the right thing.
2981 ss2dbg(spds,
2982 ("mblk (%lu) and base (%d) message sizes don't jibe.\n",
2983 msgdsize(mp), msgsize));
2984 spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE);
2985 return;
2988 if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) {
2989 /* Get all message into one mblk. */
2990 if (pullupmsg(mp, -1) == 0) {
2992 * Something screwy happened.
2994 ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n"));
2995 return;
2996 } else {
2997 spmsg = (spd_msg_t *)mp->b_rptr;
3001 switch (spdsock_get_ext(extv, spmsg, msgsize)) {
3002 case KGE_DUP:
3003 /* Handle duplicate extension. */
3004 ss1dbg(spds, ("Got duplicate extension of type %d.\n",
3005 extv[0]->spd_ext_type));
3006 spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]);
3007 return;
3008 case KGE_UNK:
3009 /* Handle unknown extension. */
3010 ss1dbg(spds, ("Got unknown extension of type %d.\n",
3011 extv[0]->spd_ext_type));
3012 spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT);
3013 return;
3014 case KGE_LEN:
3015 /* Length error. */
3016 ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n",
3017 extv[0]->spd_ext_len, extv[0]->spd_ext_type));
3018 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN);
3019 return;
3020 case KGE_CHK:
3021 /* Reality check failed. */
3022 ss1dbg(spds, ("Reality check failed on extension type %d.\n",
3023 extv[0]->spd_ext_type));
3024 spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]);
3025 return;
3026 default:
3027 /* Default case is no errors. */
3028 break;
3032 * Special-case SPD_UPDATEALGS so as not to load IPsec.
3034 if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) {
3035 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3037 ASSERT(ss != NULL);
3038 ipsec_loader_loadnow(ipss);
3039 ss->spdsock_timeout_arg = mp;
3040 ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck,
3041 q, LOADCHECK_INTERVAL);
3042 return;
3045 /* First check for messages that need no polheads at all. */
3046 switch (spmsg->spd_msg_type) {
3047 case SPD_UPDATEALGS:
3048 spdsock_updatealg(q, mp, extv);
3049 return;
3050 case SPD_ALGLIST:
3051 spdsock_alglist(q, mp);
3052 return;
3053 case SPD_DUMPALGS:
3054 spdsock_dumpalgs(q, mp);
3055 return;
3059 * Then check for ones that need both primary/secondary polheads,
3060 * finding the appropriate tunnel policy if need be.
3062 tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME];
3063 switch (spmsg->spd_msg_type) {
3064 case SPD_FLIP:
3065 spdsock_flip(q, mp, tunname);
3066 return;
3067 case SPD_CLONE:
3068 spdsock_clone(q, mp, tunname);
3069 return;
3073 * Finally, find ones that operate on exactly one polhead, or
3074 * "all polheads" of a given type (active/inactive).
3076 iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid,
3077 spmsg->spd_msg_type, &itp);
3078 if (iph == NULL)
3079 return;
3081 /* All-polheads-ready operations. */
3082 switch (spmsg->spd_msg_type) {
3083 case SPD_FLUSH:
3084 if (itp != NULL) {
3085 mutex_enter(&itp->itp_lock);
3086 if (spmsg->spd_msg_spdid == SPD_ACTIVE)
3087 itp->itp_flags &= ~ITPF_PFLAGS;
3088 else
3089 itp->itp_flags &= ~ITPF_IFLAGS;
3090 mutex_exit(&itp->itp_lock);
3093 spdsock_flush(q, iph, itp, mp);
3095 if (itp != NULL) {
3096 /* SPD_FLUSH is worth a tunnel MTU check. */
3097 update_iptun_policy(itp);
3098 ITP_REFRELE(itp, ns);
3100 return;
3101 case SPD_DUMP:
3102 if (itp != NULL)
3103 ITP_REFRELE(itp, ns);
3104 spdsock_dump(q, iph, mp);
3105 return;
3108 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
3109 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
3110 return;
3113 /* Single-polhead-only operations. */
3114 switch (spmsg->spd_msg_type) {
3115 case SPD_ADDRULE:
3116 spdsock_addrule(q, iph, mp, extv, itp);
3117 break;
3118 case SPD_DELETERULE:
3119 spdsock_deleterule(q, iph, mp, extv, itp);
3120 break;
3121 case SPD_LOOKUP:
3122 spdsock_lookup(q, iph, mp, extv, itp);
3123 break;
3124 default:
3125 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE);
3126 break;
3129 IPPH_REFRELE(iph, ns);
3130 if (itp != NULL) {
3131 /* SPD_{ADD,DELETE}RULE are worth a tunnel MTU check. */
3132 if (spmsg->spd_msg_type == SPD_ADDRULE ||
3133 spmsg->spd_msg_type == SPD_DELETERULE)
3134 update_iptun_policy(itp);
3135 ITP_REFRELE(itp, ns);
3140 * If an algorithm mapping was received before IPsec was loaded, process it.
3141 * Called from the IPsec loader.
3143 void
3144 spdsock_update_pending_algs(netstack_t *ns)
3146 spd_stack_t *spds = ns->netstack_spdsock;
3148 mutex_enter(&spds->spds_alg_lock);
3149 if (spds->spds_algs_pending) {
3150 (void) spdsock_do_updatealg(spds->spds_extv_algs, spds);
3151 spds->spds_algs_pending = B_FALSE;
3153 mutex_exit(&spds->spds_alg_lock);
3156 static void
3157 spdsock_loadcheck(void *arg)
3159 queue_t *q = (queue_t *)arg;
3160 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3161 mblk_t *mp;
3162 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3164 ASSERT(ss != NULL);
3166 ss->spdsock_timeout = 0;
3167 mp = ss->spdsock_timeout_arg;
3168 ASSERT(mp != NULL);
3169 ss->spdsock_timeout_arg = NULL;
3170 if (ipsec_failed(ipss))
3171 spdsock_error(q, mp, EPROTONOSUPPORT, 0);
3172 else
3173 spdsock_parse(q, mp);
3177 * Copy relevant state bits.
3179 static void
3180 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss)
3182 *tap = spdsock_g_t_info_ack;
3183 tap->CURRENT_state = ss->spdsock_state;
3184 tap->OPT_size = spdsock_max_optsize;
3188 * This routine responds to T_CAPABILITY_REQ messages. It is called by
3189 * spdsock_wput. Much of the T_CAPABILITY_ACK information is copied from
3190 * spdsock_g_t_info_ack. The current state of the stream is copied from
3191 * spdsock_state.
3193 static void
3194 spdsock_capability_req(queue_t *q, mblk_t *mp)
3196 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3197 t_uscalar_t cap_bits1;
3198 struct T_capability_ack *tcap;
3200 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
3202 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
3203 mp->b_datap->db_type, T_CAPABILITY_ACK);
3204 if (mp == NULL)
3205 return;
3207 tcap = (struct T_capability_ack *)mp->b_rptr;
3208 tcap->CAP_bits1 = 0;
3210 if (cap_bits1 & TC1_INFO) {
3211 spdsock_copy_info(&tcap->INFO_ack, ss);
3212 tcap->CAP_bits1 |= TC1_INFO;
3215 qreply(q, mp);
3219 * This routine responds to T_INFO_REQ messages. It is called by
3220 * spdsock_wput_other.
3221 * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack.
3222 * The current state of the stream is copied from spdsock_state.
3224 static void
3225 spdsock_info_req(q, mp)
3226 queue_t *q;
3227 mblk_t *mp;
3229 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
3230 T_INFO_ACK);
3231 if (mp == NULL)
3232 return;
3233 spdsock_copy_info((struct T_info_ack *)mp->b_rptr,
3234 (spdsock_t *)q->q_ptr);
3235 qreply(q, mp);
3239 * spdsock_err_ack. This routine creates a
3240 * T_ERROR_ACK message and passes it
3241 * upstream.
3243 static void
3244 spdsock_err_ack(q, mp, t_error, sys_error)
3245 queue_t *q;
3246 mblk_t *mp;
3247 int t_error;
3248 int sys_error;
3250 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
3251 qreply(q, mp);
3255 * This routine retrieves the current status of socket options.
3256 * It returns the size of the option retrieved.
3258 /* ARGSUSED */
3260 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr)
3262 int *i1 = (int *)ptr;
3264 switch (level) {
3265 case SOL_SOCKET:
3266 switch (name) {
3267 case SO_TYPE:
3268 *i1 = SOCK_RAW;
3269 break;
3271 * The following two items can be manipulated,
3272 * but changing them should do nothing.
3274 case SO_SNDBUF:
3275 *i1 = (int)q->q_hiwat;
3276 break;
3277 case SO_RCVBUF:
3278 *i1 = (int)(RD(q)->q_hiwat);
3279 break;
3281 break;
3282 default:
3283 return (0);
3285 return (sizeof (int));
3289 * This routine sets socket options.
3291 /* ARGSUSED */
3293 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name,
3294 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp,
3295 void *thisdg_attrs, cred_t *cr)
3297 int *i1 = (int *)invalp;
3298 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3299 spd_stack_t *spds = ss->spdsock_spds;
3301 switch (level) {
3302 case SOL_SOCKET:
3303 switch (name) {
3304 case SO_SNDBUF:
3305 if (*i1 > spds->spds_max_buf)
3306 return (ENOBUFS);
3307 q->q_hiwat = *i1;
3308 break;
3309 case SO_RCVBUF:
3310 if (*i1 > spds->spds_max_buf)
3311 return (ENOBUFS);
3312 RD(q)->q_hiwat = *i1;
3313 (void) proto_set_rx_hiwat(RD(q), NULL, *i1);
3314 break;
3316 break;
3318 return (0);
3323 * Handle STREAMS messages.
3325 static void
3326 spdsock_wput_other(queue_t *q, mblk_t *mp)
3328 struct iocblk *iocp;
3329 int error;
3330 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3331 spd_stack_t *spds = ss->spdsock_spds;
3332 cred_t *cr;
3334 switch (mp->b_datap->db_type) {
3335 case M_PROTO:
3336 case M_PCPROTO:
3337 if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) {
3338 ss3dbg(spds, (
3339 "spdsock_wput_other: Not big enough M_PROTO\n"));
3340 freemsg(mp);
3341 return;
3343 switch (((union T_primitives *)mp->b_rptr)->type) {
3344 case T_CAPABILITY_REQ:
3345 spdsock_capability_req(q, mp);
3346 break;
3347 case T_INFO_REQ:
3348 spdsock_info_req(q, mp);
3349 break;
3350 case T_SVR4_OPTMGMT_REQ:
3351 case T_OPTMGMT_REQ:
3353 * All Solaris components should pass a db_credp
3354 * for this TPI message, hence we ASSERT.
3355 * But in case there is some other M_PROTO that looks
3356 * like a TPI message sent by some other kernel
3357 * component, we check and return an error.
3359 cr = msg_getcred(mp, NULL);
3360 ASSERT(cr != NULL);
3361 if (cr == NULL) {
3362 spdsock_err_ack(q, mp, TSYSERR, EINVAL);
3363 return;
3365 if (((union T_primitives *)mp->b_rptr)->type ==
3366 T_SVR4_OPTMGMT_REQ) {
3367 svr4_optcom_req(q, mp, cr, &spdsock_opt_obj);
3368 } else {
3369 tpi_optcom_req(q, mp, cr, &spdsock_opt_obj);
3371 break;
3372 case T_DATA_REQ:
3373 case T_EXDATA_REQ:
3374 case T_ORDREL_REQ:
3375 /* Illegal for spdsock. */
3376 freemsg(mp);
3377 (void) putnextctl1(RD(q), M_ERROR, EPROTO);
3378 break;
3379 default:
3380 /* Not supported by spdsock. */
3381 spdsock_err_ack(q, mp, TNOTSUPPORT, 0);
3382 break;
3384 return;
3385 case M_IOCTL:
3386 iocp = (struct iocblk *)mp->b_rptr;
3387 error = EINVAL;
3389 switch (iocp->ioc_cmd) {
3390 case ND_SET:
3391 case ND_GET:
3392 if (nd_getset(q, spds->spds_g_nd, mp)) {
3393 qreply(q, mp);
3394 return;
3395 } else
3396 error = ENOENT;
3397 /* FALLTHRU */
3398 default:
3399 miocnak(q, mp, 0, error);
3400 return;
3402 case M_FLUSH:
3403 if (*mp->b_rptr & FLUSHW) {
3404 flushq(q, FLUSHALL);
3405 *mp->b_rptr &= ~FLUSHW;
3407 if (*mp->b_rptr & FLUSHR) {
3408 qreply(q, mp);
3409 return;
3411 /* Else FALLTHRU */
3414 /* If fell through, just black-hole the message. */
3415 freemsg(mp);
3418 static void
3419 spdsock_wput(queue_t *q, mblk_t *mp)
3421 uint8_t *rptr = mp->b_rptr;
3422 mblk_t *mp1;
3423 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3424 spd_stack_t *spds = ss->spdsock_spds;
3427 * If we're dumping, defer processing other messages until the
3428 * dump completes.
3430 if (ss->spdsock_dump_req != NULL) {
3431 if (!putq(q, mp))
3432 freemsg(mp);
3433 return;
3436 switch (mp->b_datap->db_type) {
3437 case M_DATA:
3439 * Silently discard.
3441 ss2dbg(spds, ("raw M_DATA in spdsock.\n"));
3442 freemsg(mp);
3443 return;
3444 case M_PROTO:
3445 case M_PCPROTO:
3446 if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) {
3447 if (((union T_primitives *)rptr)->type == T_DATA_REQ) {
3448 if ((mp1 = mp->b_cont) == NULL) {
3449 /* No data after T_DATA_REQ. */
3450 ss2dbg(spds,
3451 ("No data after DATA_REQ.\n"));
3452 freemsg(mp);
3453 return;
3455 freeb(mp);
3456 mp = mp1;
3457 ss2dbg(spds, ("T_DATA_REQ\n"));
3458 break; /* Out of switch. */
3461 /* FALLTHRU */
3462 default:
3463 ss3dbg(spds, ("In default wput case (%d %d).\n",
3464 mp->b_datap->db_type, ((union T_primitives *)rptr)->type));
3465 spdsock_wput_other(q, mp);
3466 return;
3469 /* I now have a PF_POLICY message in an M_DATA block. */
3470 spdsock_parse(q, mp);
3474 * Device open procedure, called when new queue pair created.
3475 * We are passed the read-side queue.
3477 /* ARGSUSED */
3478 static int
3479 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
3481 spdsock_t *ss;
3482 queue_t *oq = OTHERQ(q);
3483 minor_t ssminor;
3484 netstack_t *ns;
3485 spd_stack_t *spds;
3487 if (secpolicy_ip_config(credp, B_FALSE) != 0)
3488 return (EPERM);
3490 if (q->q_ptr != NULL)
3491 return (0); /* Re-open of an already open instance. */
3493 if (sflag & MODOPEN)
3494 return (EINVAL);
3496 ns = netstack_find_by_cred(credp);
3497 ASSERT(ns != NULL);
3498 spds = ns->netstack_spdsock;
3499 ASSERT(spds != NULL);
3501 ss2dbg(spds, ("Made it into PF_POLICY socket open.\n"));
3503 ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP);
3504 if (ssminor == 0) {
3505 netstack_rele(spds->spds_netstack);
3506 return (ENOMEM);
3508 ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP);
3509 if (ss == NULL) {
3510 vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1);
3511 netstack_rele(spds->spds_netstack);
3512 return (ENOMEM);
3515 ss->spdsock_minor = ssminor;
3516 ss->spdsock_state = TS_UNBND;
3517 ss->spdsock_dump_req = NULL;
3519 ss->spdsock_spds = spds;
3521 q->q_ptr = ss;
3522 oq->q_ptr = ss;
3524 q->q_hiwat = spds->spds_recv_hiwat;
3526 oq->q_hiwat = spds->spds_xmit_hiwat;
3527 oq->q_lowat = spds->spds_xmit_lowat;
3529 qprocson(q);
3530 (void) proto_set_rx_hiwat(q, NULL, spds->spds_recv_hiwat);
3532 *devp = makedevice(getmajor(*devp), ss->spdsock_minor);
3533 return (0);
3537 * Read-side service procedure, invoked when we get back-enabled
3538 * when buffer space becomes available.
3540 * Dump another chunk if we were dumping before; when we finish, kick
3541 * the write-side queue in case it's waiting for read queue space.
3543 void
3544 spdsock_rsrv(queue_t *q)
3546 spdsock_t *ss = q->q_ptr;
3548 if (ss->spdsock_dump_req != NULL)
3549 spdsock_dump_some(q, ss);
3551 if (ss->spdsock_dump_req == NULL)
3552 qenable(OTHERQ(q));
3556 * Write-side service procedure, invoked when we defer processing
3557 * if another message is received while a dump is in progress.
3559 void
3560 spdsock_wsrv(queue_t *q)
3562 spdsock_t *ss = q->q_ptr;
3563 mblk_t *mp;
3564 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3566 if (ss->spdsock_dump_req != NULL) {
3567 qenable(OTHERQ(q));
3568 return;
3571 while ((mp = getq(q)) != NULL) {
3572 if (ipsec_loaded(ipss)) {
3573 spdsock_wput(q, mp);
3574 if (ss->spdsock_dump_req != NULL)
3575 return;
3576 } else if (!ipsec_failed(ipss)) {
3577 (void) putq(q, mp);
3578 } else {
3579 spdsock_error(q, mp, EPFNOSUPPORT, 0);
3584 static int
3585 spdsock_close(queue_t *q)
3587 spdsock_t *ss = q->q_ptr;
3588 spd_stack_t *spds = ss->spdsock_spds;
3590 qprocsoff(q);
3592 /* Safe assumption. */
3593 ASSERT(ss != NULL);
3595 if (ss->spdsock_timeout != 0)
3596 (void) quntimeout(q, ss->spdsock_timeout);
3598 ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n"));
3600 vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1);
3601 netstack_rele(ss->spdsock_spds->spds_netstack);
3603 kmem_free(ss, sizeof (spdsock_t));
3604 return (0);
3608 * Merge the IPsec algorithms tables with the received algorithm information.
3610 void
3611 spdsock_merge_algs(spd_stack_t *spds)
3613 ipsec_alginfo_t *alg, *oalg;
3614 ipsec_algtype_t algtype;
3615 uint_t algidx, algid, nalgs;
3616 crypto_mech_name_t *mechs;
3617 uint_t mech_count, mech_idx;
3618 netstack_t *ns = spds->spds_netstack;
3619 ipsec_stack_t *ipss = ns->netstack_ipsec;
3621 ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
3624 * Get the list of supported mechanisms from the crypto framework.
3625 * If a mechanism is supported by KCF, resolve its mechanism
3626 * id and mark it as being valid. This operation must be done
3627 * without holding alg_lock, since it can cause a provider
3628 * module to be loaded and the provider notification callback to
3629 * be invoked.
3631 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
3632 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3633 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3634 int algflags = 0;
3635 crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID;
3637 alg = spds->spds_algs[algtype][algid];
3638 if (alg == NULL)
3639 continue;
3642 * The NULL encryption algorithm is a special
3643 * case because there are no mechanisms, yet
3644 * the algorithm is still valid.
3646 if (alg->alg_id == SADB_EALG_NULL) {
3647 alg->alg_mech_type = CRYPTO_MECHANISM_INVALID;
3648 alg->alg_flags |= ALG_FLAG_VALID;
3649 continue;
3652 for (mech_idx = 0; mech_idx < mech_count; mech_idx++) {
3653 if (strncmp(alg->alg_mech_name, mechs[mech_idx],
3654 CRYPTO_MAX_MECH_NAME) == 0) {
3655 mt = crypto_mech2id(alg->alg_mech_name);
3656 ASSERT(mt != CRYPTO_MECHANISM_INVALID);
3657 algflags = ALG_FLAG_VALID;
3658 break;
3661 alg->alg_mech_type = mt;
3662 alg->alg_flags |= algflags;
3666 mutex_enter(&ipss->ipsec_alg_lock);
3669 * For each algorithm currently defined, check if it is
3670 * present in the new tables created from the SPD_UPDATEALGS
3671 * message received from user-space.
3672 * Delete the algorithm entries that are currently defined
3673 * but not part of the new tables.
3675 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3676 nalgs = ipss->ipsec_nalgs[algtype];
3677 for (algidx = 0; algidx < nalgs; algidx++) {
3678 algid = ipss->ipsec_sortlist[algtype][algidx];
3679 if (spds->spds_algs[algtype][algid] == NULL)
3680 ipsec_alg_unreg(algtype, algid, ns);
3685 * For each algorithm we just received, check if it is
3686 * present in the currently defined tables. If it is, swap
3687 * the entry with the one we just allocated.
3688 * If the new algorithm is not in the current tables,
3689 * add it.
3691 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3692 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3693 alg = spds->spds_algs[algtype][algid];
3694 if (alg == NULL)
3695 continue;
3697 if ((oalg = ipss->ipsec_alglists[algtype][algid]) ==
3698 NULL) {
3700 * New algorithm, add it to the algorithm
3701 * table.
3703 ipsec_alg_reg(algtype, alg, ns);
3704 } else {
3706 * Algorithm is already in the table. Swap
3707 * the existing entry with the new one.
3709 ipsec_alg_fix_min_max(alg, algtype, ns);
3710 ipss->ipsec_alglists[algtype][algid] = alg;
3711 ipsec_alg_free(oalg);
3713 spds->spds_algs[algtype][algid] = NULL;
3717 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3718 ipss->ipsec_algs_exec_mode[algtype] =
3719 spds->spds_algs_exec_mode[algtype];
3722 mutex_exit(&ipss->ipsec_alg_lock);
3724 crypto_free_mech_list(mechs, mech_count);
3726 ipsecah_algs_changed(ns);
3727 ipsecesp_algs_changed(ns);