Unleashed v1.4
[unleashed.git] / usr / src / cmd / svc / configd / rc_node.c
blobbd6febc0ea5b235e22da2e37b10a96708d9c3e35
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
29 * rc_node.c - In-memory SCF object management
31 * This layer manages the in-memory cache (the Repository Cache) of SCF
32 * data. Read requests are usually satisfied from here, but may require
33 * load calls to the "object" layer. Modify requests always write-through
34 * to the object layer.
36 * SCF data comprises scopes, services, instances, snapshots, snaplevels,
37 * property groups, properties, and property values. All but the last are
38 * known here as "entities" and are represented by rc_node_t data
39 * structures. (Property values are kept in the rn_values member of the
40 * respective property, not as separate objects.) All entities besides
41 * the "localhost" scope have some entity as a parent, and therefore form
42 * a tree.
44 * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
45 * the "localhost" scope. The tree is filled in from the database on-demand
46 * by rc_node_fill_children().
48 * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
49 * lookup.
51 * Multiple threads may service client requests, so access to each
52 * rc_node_t is synchronized by its rn_lock member. Some fields are
53 * protected by bits in the rn_flags field instead, to support operations
54 * which need to drop rn_lock, for example to respect locking order. Such
55 * flags should be manipulated with the rc_node_{hold,rele}_flag()
56 * functions.
58 * We track references to nodes to tell when they can be free()d. rn_refs
59 * should be incremented with rc_node_hold() on the creation of client
60 * references (rc_node_ptr_t's and rc_iter_t's). rn_erefs ("ephemeral
61 * references") should be incremented when a pointer is read into a local
62 * variable of a thread, with rc_node_hold_ephemeral_locked(). This
63 * hasn't been fully implemented, however, so rc_node_rele() tolerates
64 * rn_erefs being 0. Some code which predates rn_erefs counts ephemeral
65 * references in rn_refs. Other references are tracked by the
66 * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
67 * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
69 * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
70 * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
71 * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
72 * etc.). Once you have locked an rc_node_t you must check its rn_flags for
73 * RC_NODE_DEAD before you can use it. This is usually done with the
74 * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
75 * functions & RC_NODE_*() macros), which fail if the object has died.
77 * When a transactional node (property group or snapshot) is updated,
78 * a new node takes the place of the old node in the global hash and the
79 * old node is hung off of the rn_former list of the new node. At the
80 * same time, all of its children have their rn_parent_ref pointer set,
81 * and any holds they have are reflected in the old node's rn_other_refs
82 * count. This is automatically kept up to date until the final reference
83 * to the subgraph is dropped, at which point the node is unrefed and
84 * destroyed, along with all of its children.
86 * Because name service lookups may take a long time and, more importantly
87 * may trigger additional accesses to the repository, perm_granted() must be
88 * called without holding any locks.
90 * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
91 * call via rc_node_setup_iter() to populate the rn_children uu_list of the
92 * rc_node_t * in question and a call to uu_list_walk_start() on that list. For
93 * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
94 * apropriate child.
96 * An ITER_START for an ENTITY_VALUE makes sure the node has its values
97 * filled, and sets up the iterator. An ITER_READ_VALUE just copies out
98 * the proper values and updates the offset information.
100 * To allow aliases, snapshots are implemented with a level of indirection.
101 * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
102 * snapshot.c which contains the authoritative snaplevel information. The
103 * snapid is "assigned" by rc_attach_snapshot().
105 * We provide the client layer with rc_node_ptr_t's to reference objects.
106 * Objects referred to by them are automatically held & released by
107 * rc_node_assign() & rc_node_clear(). The RC_NODE_PTR_*() macros are used at
108 * client.c entry points to read the pointers. They fetch the pointer to the
109 * object, return (from the function) if it is dead, and lock, hold, or hold
110 * a flag of the object.
114 * Permission checking is authorization-based: some operations may only
115 * proceed if the user has been assigned at least one of a set of
116 * authorization strings. The set of enabling authorizations depends on the
117 * operation and the target object. The set of authorizations assigned to
118 * a user is determined by an algorithm defined in libsecdb.
120 * The fastest way to decide whether the two sets intersect is by entering the
121 * strings into a hash table and detecting collisions, which takes linear time
122 * in the total size of the sets. Except for the authorization patterns which
123 * may be assigned to users, which without advanced pattern-matching
124 * algorithms will take O(n) in the number of enabling authorizations, per
125 * pattern.
127 * We can achieve some practical speed-ups by noting that if we enter all of
128 * the authorizations from one of the sets into the hash table we can merely
129 * check the elements of the second set for existence without adding them.
130 * This reduces memory requirements and hash table clutter. The enabling set
131 * is well suited for this because it is internal to configd (for now, at
132 * least). Combine this with short-circuiting and we can even minimize the
133 * number of queries to the security databases (user_attr & prof_attr).
135 * To force this usage onto clients we provide functions for adding
136 * authorizations to the enabling set of a permission context structure
137 * (perm_add_*()) and one to decide whether the the user associated with the
138 * current door call client possesses any of them (perm_granted()).
140 * At some point, a generic version of this should move to libsecdb.
144 * Composition is the combination of sets of properties. The sets are ordered
145 * and properties in higher sets obscure properties of the same name in lower
146 * sets. Here we present a composed view of an instance's properties as the
147 * union of its properties and its service's properties. Similarly the
148 * properties of snaplevels are combined to form a composed view of the
149 * properties of a snapshot (which should match the composed view of the
150 * properties of the instance when the snapshot was taken).
152 * In terms of the client interface, the client may request that a property
153 * group iterator for an instance or snapshot be composed. Property groups
154 * traversed by such an iterator may not have the target entity as a parent.
155 * Similarly, the properties traversed by a property iterator for those
156 * property groups may not have the property groups iterated as parents.
158 * Implementation requires that iterators for instances and snapshots be
159 * composition-savvy, and that we have a "composed property group" entity
160 * which represents the composition of a number of property groups. Iteration
161 * over "composed property groups" yields properties which may have different
162 * parents, but for all other operations a composed property group behaves
163 * like the top-most property group it represents.
165 * The implementation is based on the rn_cchain[] array of rc_node_t pointers
166 * in rc_node_t. For instances, the pointers point to the instance and its
167 * parent service. For snapshots they point to the child snaplevels, and for
168 * composed property groups they point to property groups. A composed
169 * iterator carries an index into rn_cchain[]. Thus most of the magic ends up
170 * int the rc_iter_*() code.
173 #include <assert.h>
174 #include <atomic.h>
175 #include <errno.h>
176 #include <libuutil.h>
177 #include <libscf.h>
178 #include <libscf_priv.h>
179 #include <pthread.h>
180 #include <pwd.h>
181 #include <stdio.h>
182 #include <stdlib.h>
183 #include <strings.h>
184 #include <sys/types.h>
185 #include <syslog.h>
186 #include <unistd.h>
187 #include <secdb.h>
189 #include "configd.h"
191 #define AUTH_PREFIX "solaris.smf."
192 #define AUTH_MANAGE AUTH_PREFIX "manage"
193 #define AUTH_MODIFY AUTH_PREFIX "modify"
194 #define AUTH_MODIFY_PREFIX AUTH_MODIFY "."
195 #define AUTH_PG_ACTIONS SCF_PG_RESTARTER_ACTIONS
196 #define AUTH_PG_ACTIONS_TYPE SCF_PG_RESTARTER_ACTIONS_TYPE
197 #define AUTH_PG_GENERAL SCF_PG_GENERAL
198 #define AUTH_PG_GENERAL_TYPE SCF_PG_GENERAL_TYPE
199 #define AUTH_PG_GENERAL_OVR SCF_PG_GENERAL_OVR
200 #define AUTH_PG_GENERAL_OVR_TYPE SCF_PG_GENERAL_OVR_TYPE
201 #define AUTH_PROP_ACTION "action_authorization"
202 #define AUTH_PROP_ENABLED "enabled"
203 #define AUTH_PROP_MODIFY "modify_authorization"
204 #define AUTH_PROP_VALUE "value_authorization"
205 #define AUTH_PROP_READ "read_authorization"
207 #define MAX_VALID_CHILDREN 3
209 typedef struct rc_type_info {
210 uint32_t rt_type; /* matches array index */
211 uint32_t rt_num_ids;
212 uint32_t rt_name_flags;
213 uint32_t rt_valid_children[MAX_VALID_CHILDREN];
214 } rc_type_info_t;
216 #define RT_NO_NAME -1U
218 static rc_type_info_t rc_types[] = {
219 {REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
220 {REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
221 {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
222 {REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
223 {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
224 {REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
225 {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
226 {REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
227 {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
228 {REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
229 {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
230 {REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
231 {REP_PROTOCOL_ENTITY_PROPERTY}},
232 {REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
233 {REP_PROTOCOL_ENTITY_PROPERTY}},
234 {REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
235 {-1U}
237 #define NUM_TYPES ((sizeof (rc_types) / sizeof (*rc_types)))
239 /* Element of a permcheck_t hash table. */
240 struct pc_elt {
241 struct pc_elt *pce_next;
242 char pce_auth[1];
246 * The pc_auth_type specifies the types (sources) of authorization
247 * strings. The enum is ordered in increasing specificity.
249 typedef enum pc_auth_type {
250 PC_AUTH_NONE = 0, /* no auth string available. */
251 PC_AUTH_SMF, /* strings coded into SMF. */
252 PC_AUTH_SVC, /* strings specified in PG of a service. */
253 PC_AUTH_INST /* strings specified in PG of an instance. */
254 } pc_auth_type_t;
257 * The following enum is used to represent the results of the checks to see
258 * if the client has the appropriate permissions to perform an action.
260 typedef enum perm_status {
261 PERM_DENIED = 0, /* Permission denied. */
262 PERM_GRANTED, /* Client has authorizations. */
263 PERM_GONE, /* Door client went away. */
264 PERM_FAIL /* Generic failure. e.g. resources */
265 } perm_status_t;
267 /* An authorization set hash table. */
268 typedef struct {
269 struct pc_elt **pc_buckets;
270 uint_t pc_bnum; /* number of buckets */
271 uint_t pc_enum; /* number of elements */
272 struct pc_elt *pc_specific; /* most specific element */
273 pc_auth_type_t pc_specific_type; /* type of pc_specific */
274 } permcheck_t;
276 static uu_list_pool_t *rc_children_pool;
277 static uu_list_pool_t *rc_pg_notify_pool;
278 static uu_list_pool_t *rc_notify_pool;
279 static uu_list_pool_t *rc_notify_info_pool;
281 static rc_node_t *rc_scope;
283 static pthread_mutex_t rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
284 static pthread_cond_t rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
285 static uint_t rc_notify_in_use; /* blocks removals */
288 * We support an arbitrary number of clients interested in events for certain
289 * types of changes. Each client is represented by an rc_notify_info_t, and
290 * all clients are chained onto the rc_notify_info_list.
292 * The rc_notify_list is the global notification list. Each entry is of
293 * type rc_notify_t, which is embedded in one of three other structures:
295 * rc_node_t property group update notification
296 * rc_notify_delete_t object deletion notification
297 * rc_notify_info_t notification clients
299 * Which type of object is determined by which pointer in the rc_notify_t is
300 * non-NULL.
302 * New notifications and clients are added to the end of the list.
303 * Notifications no-one is interested in are never added to the list.
305 * Clients use their position in the list to track which notifications they
306 * have not yet reported. As they process notifications, they move forward
307 * in the list past them. There is always a client at the beginning of the
308 * list -- as it moves past notifications, it removes them from the list and
309 * cleans them up.
311 * The rc_pg_notify_lock protects all notification state. The rc_pg_notify_cv
312 * is used for global signalling, and each client has a cv which it waits for
313 * events of interest on.
315 * rc_notify_in_use is used to protect rc_notify_list from deletions when
316 * the rc_pg_notify_lock is dropped. Specifically, rc_notify_info_wait()
317 * must drop the lock to call rc_node_assign(), and then it reacquires the
318 * lock. Deletions from rc_notify_list during this period are not
319 * allowed. Insertions do not matter, because they are always done at the
320 * end of the list.
322 static uu_list_t *rc_notify_info_list;
323 static uu_list_t *rc_notify_list;
325 #define HASH_SIZE 512
326 #define HASH_MASK (HASH_SIZE - 1)
328 #pragma align 64(cache_hash)
329 static cache_bucket_t cache_hash[HASH_SIZE];
331 #define CACHE_BUCKET(h) (&cache_hash[(h) & HASH_MASK])
334 static void rc_node_no_client_refs(rc_node_t *np);
337 static uint32_t
338 rc_node_hash(rc_node_lookup_t *lp)
340 uint32_t type = lp->rl_type;
341 uint32_t backend = lp->rl_backend;
342 uint32_t mainid = lp->rl_main_id;
343 uint32_t *ids = lp->rl_ids;
345 rc_type_info_t *tp = &rc_types[type];
346 uint32_t num_ids;
347 uint32_t left;
348 uint32_t hash;
350 assert(backend == BACKEND_TYPE_NORMAL ||
351 backend == BACKEND_TYPE_NONPERSIST);
353 assert(type > 0 && type < NUM_TYPES);
354 num_ids = tp->rt_num_ids;
356 left = MAX_IDS - num_ids;
357 assert(num_ids <= MAX_IDS);
359 hash = type * 7 + mainid * 5 + backend;
361 while (num_ids-- > 0)
362 hash = hash * 11 + *ids++ * 7;
365 * the rest should be zeroed
367 while (left-- > 0)
368 assert(*ids++ == 0);
370 return (hash);
373 static int
374 rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
376 rc_node_lookup_t *r = &np->rn_id;
377 rc_type_info_t *tp;
378 uint32_t type;
379 uint32_t num_ids;
381 if (r->rl_main_id != l->rl_main_id)
382 return (0);
384 type = r->rl_type;
385 if (type != l->rl_type)
386 return (0);
388 assert(type > 0 && type < NUM_TYPES);
390 tp = &rc_types[r->rl_type];
391 num_ids = tp->rt_num_ids;
393 assert(num_ids <= MAX_IDS);
394 while (num_ids-- > 0)
395 if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
396 return (0);
398 return (1);
402 * Register an ephemeral reference to np. This should be done while both
403 * the persistent reference from which the np pointer was read is locked
404 * and np itself is locked. This guarantees that another thread which
405 * thinks it has the last reference will yield without destroying the
406 * node.
408 static void
409 rc_node_hold_ephemeral_locked(rc_node_t *np)
411 assert(MUTEX_HELD(&np->rn_lock));
413 ++np->rn_erefs;
417 * the "other" references on a node are maintained in an atomically
418 * updated refcount, rn_other_refs. This can be bumped from arbitrary
419 * context, and tracks references to a possibly out-of-date node's children.
421 * To prevent the node from disappearing between the final drop of
422 * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
423 * 0->1 transitions and decremented (with the node lock held) on 1->0
424 * transitions.
426 static void
427 rc_node_hold_other(rc_node_t *np)
429 if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
430 atomic_add_32(&np->rn_other_refs_held, 1);
431 assert(np->rn_other_refs_held > 0);
433 assert(np->rn_other_refs > 0);
437 * No node locks may be held
439 static void
440 rc_node_rele_other(rc_node_t *np)
442 assert(np->rn_other_refs > 0);
443 if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
444 (void) pthread_mutex_lock(&np->rn_lock);
445 assert(np->rn_other_refs_held > 0);
446 if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
447 np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) {
449 * This was the last client reference. Destroy
450 * any other references and free() the node.
452 rc_node_no_client_refs(np);
453 } else {
454 (void) pthread_mutex_unlock(&np->rn_lock);
459 static void
460 rc_node_hold_locked(rc_node_t *np)
462 assert(MUTEX_HELD(&np->rn_lock));
464 if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
465 rc_node_hold_other(np->rn_parent_ref);
466 np->rn_refs++;
467 assert(np->rn_refs > 0);
470 static void
471 rc_node_hold(rc_node_t *np)
473 (void) pthread_mutex_lock(&np->rn_lock);
474 rc_node_hold_locked(np);
475 (void) pthread_mutex_unlock(&np->rn_lock);
478 static void
479 rc_node_rele_locked(rc_node_t *np)
481 int unref = 0;
482 rc_node_t *par_ref = NULL;
484 assert(MUTEX_HELD(&np->rn_lock));
485 assert(np->rn_refs > 0);
487 if (--np->rn_refs == 0) {
488 if (np->rn_flags & RC_NODE_PARENT_REF)
489 par_ref = np->rn_parent_ref;
492 * Composed property groups are only as good as their
493 * references.
495 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
496 np->rn_flags |= RC_NODE_DEAD;
498 if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
499 np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
500 unref = 1;
503 if (unref) {
505 * This was the last client reference. Destroy any other
506 * references and free() the node.
508 rc_node_no_client_refs(np);
509 } else {
511 * rn_erefs can be 0 if we acquired the reference in
512 * a path which hasn't been updated to increment rn_erefs.
513 * When all paths which end here are updated, we should
514 * assert rn_erefs > 0 and always decrement it.
516 if (np->rn_erefs > 0)
517 --np->rn_erefs;
518 (void) pthread_mutex_unlock(&np->rn_lock);
521 if (par_ref != NULL)
522 rc_node_rele_other(par_ref);
525 void
526 rc_node_rele(rc_node_t *np)
528 (void) pthread_mutex_lock(&np->rn_lock);
529 rc_node_rele_locked(np);
532 static cache_bucket_t *
533 cache_hold(uint32_t h)
535 cache_bucket_t *bp = CACHE_BUCKET(h);
536 (void) pthread_mutex_lock(&bp->cb_lock);
537 return (bp);
540 static void
541 cache_release(cache_bucket_t *bp)
543 (void) pthread_mutex_unlock(&bp->cb_lock);
546 static rc_node_t *
547 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
549 uint32_t h = rc_node_hash(lp);
550 rc_node_t *np;
552 assert(MUTEX_HELD(&bp->cb_lock));
553 assert(bp == CACHE_BUCKET(h));
555 for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
556 if (np->rn_hash == h && rc_node_match(np, lp)) {
557 rc_node_hold(np);
558 return (np);
562 return (NULL);
565 static rc_node_t *
566 cache_lookup(rc_node_lookup_t *lp)
568 uint32_t h;
569 cache_bucket_t *bp;
570 rc_node_t *np;
572 h = rc_node_hash(lp);
573 bp = cache_hold(h);
575 np = cache_lookup_unlocked(bp, lp);
577 cache_release(bp);
579 return (np);
582 static void
583 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
585 assert(MUTEX_HELD(&bp->cb_lock));
586 assert(np->rn_hash == rc_node_hash(&np->rn_id));
587 assert(bp == CACHE_BUCKET(np->rn_hash));
589 assert(np->rn_hash_next == NULL);
591 np->rn_hash_next = bp->cb_head;
592 bp->cb_head = np;
595 static void
596 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
598 rc_node_t **npp;
600 assert(MUTEX_HELD(&bp->cb_lock));
601 assert(np->rn_hash == rc_node_hash(&np->rn_id));
602 assert(bp == CACHE_BUCKET(np->rn_hash));
604 for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
605 if (*npp == np)
606 break;
608 assert(*npp == np);
609 *npp = np->rn_hash_next;
610 np->rn_hash_next = NULL;
614 * verify that the 'parent' type can have a child typed 'child'
615 * Fails with
616 * _INVALID_TYPE - argument is invalid
617 * _TYPE_MISMATCH - parent type cannot have children of type child
619 static int
620 rc_check_parent_child(uint32_t parent, uint32_t child)
622 int idx;
623 uint32_t type;
625 if (parent == 0 || parent >= NUM_TYPES ||
626 child == 0 || child >= NUM_TYPES)
627 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
629 for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
630 type = rc_types[parent].rt_valid_children[idx];
631 if (type == child)
632 return (REP_PROTOCOL_SUCCESS);
635 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
639 * Fails with
640 * _INVALID_TYPE - type is invalid
641 * _BAD_REQUEST - name is an invalid name for a node of type type
644 rc_check_type_name(uint32_t type, const char *name)
646 if (type == 0 || type >= NUM_TYPES)
647 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
649 if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
650 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
652 return (REP_PROTOCOL_SUCCESS);
655 static int
656 rc_check_pgtype_name(const char *name)
658 if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
659 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
661 return (REP_PROTOCOL_SUCCESS);
665 * rc_node_free_fmri should be called whenever a node loses its parent.
666 * The reason is that the node's fmri string is built up by concatenating
667 * its name to the parent's fmri. Thus, when the node no longer has a
668 * parent, its fmri is no longer valid.
670 static void
671 rc_node_free_fmri(rc_node_t *np)
673 if (np->rn_fmri != NULL) {
674 free((void *)np->rn_fmri);
675 np->rn_fmri = NULL;
680 * Concatenate the appropriate separator and the FMRI element to the base
681 * FMRI string at fmri.
683 * Fails with
684 * _TRUNCATED Not enough room in buffer at fmri.
686 static int
687 rc_concat_fmri_element(
688 char *fmri, /* base fmri */
689 size_t bufsize, /* size of buf at fmri */
690 size_t *sz_out, /* receives result size. */
691 const char *element, /* element name to concat */
692 rep_protocol_entity_t type) /* type of element */
694 size_t actual;
695 const char *name = element;
696 int rc;
697 const char *separator;
699 if (bufsize > 0)
700 *sz_out = strlen(fmri);
701 else
702 *sz_out = 0;
704 switch (type) {
705 case REP_PROTOCOL_ENTITY_SCOPE:
706 if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) {
708 * No need to display scope information if we are
709 * in the local scope.
711 separator = SCF_FMRI_SVC_PREFIX;
712 name = NULL;
713 } else {
715 * Need to display scope information, because it is
716 * not the local scope.
718 separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX;
720 break;
721 case REP_PROTOCOL_ENTITY_SERVICE:
722 separator = SCF_FMRI_SERVICE_PREFIX;
723 break;
724 case REP_PROTOCOL_ENTITY_INSTANCE:
725 separator = SCF_FMRI_INSTANCE_PREFIX;
726 break;
727 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
728 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
729 separator = SCF_FMRI_PROPERTYGRP_PREFIX;
730 break;
731 case REP_PROTOCOL_ENTITY_PROPERTY:
732 separator = SCF_FMRI_PROPERTY_PREFIX;
733 break;
734 case REP_PROTOCOL_ENTITY_VALUE:
736 * A value does not have a separate FMRI from its property,
737 * so there is nothing to concat.
739 return (REP_PROTOCOL_SUCCESS);
740 case REP_PROTOCOL_ENTITY_SNAPSHOT:
741 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
742 /* Snapshots do not have FMRIs, so there is nothing to do. */
743 return (REP_PROTOCOL_SUCCESS);
744 default:
745 (void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n",
746 __FILE__, __LINE__, type);
747 abort(); /* Missing a case in switch if we get here. */
750 /* Concatenate separator and element to the fmri buffer. */
752 actual = strlcat(fmri, separator, bufsize);
753 if (name != NULL) {
754 if (actual < bufsize) {
755 actual = strlcat(fmri, name, bufsize);
756 } else {
757 actual += strlen(name);
760 if (actual < bufsize) {
761 rc = REP_PROTOCOL_SUCCESS;
762 } else {
763 rc = REP_PROTOCOL_FAIL_TRUNCATED;
765 *sz_out = actual;
766 return (rc);
770 * Get the FMRI for the node at np. The fmri will be placed in buf. On
771 * success sz_out will be set to the size of the fmri in buf. If
772 * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
773 * of the buffer that would be required to avoid truncation.
775 * Fails with
776 * _TRUNCATED not enough room in buf for the FMRI.
778 static int
779 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize,
780 size_t *sz_out)
782 size_t fmri_len = 0;
783 int r;
785 if (bufsize > 0)
786 *buf = 0;
787 *sz_out = 0;
789 if (np->rn_fmri == NULL) {
791 * A NULL rn_fmri implies that this is a top level scope.
792 * Child nodes will always have an rn_fmri established
793 * because both rc_node_link_child() and
794 * rc_node_relink_child() call rc_node_build_fmri(). In
795 * this case, we'll just return our name preceded by the
796 * appropriate FMRI decorations.
798 assert(np->rn_parent == NULL);
799 r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name,
800 np->rn_id.rl_type);
801 if (r != REP_PROTOCOL_SUCCESS)
802 return (r);
803 } else {
804 /* We have an fmri, so return it. */
805 fmri_len = strlcpy(buf, np->rn_fmri, bufsize);
808 *sz_out = fmri_len;
810 if (fmri_len >= bufsize)
811 return (REP_PROTOCOL_FAIL_TRUNCATED);
813 return (REP_PROTOCOL_SUCCESS);
817 * Build an FMRI string for this node and save it in rn_fmri.
819 * The basic strategy here is to get the fmri of our parent and then
820 * concatenate the appropriate separator followed by our name. If our name
821 * is null, the resulting fmri will just be a copy of the parent fmri.
822 * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
823 * set. Also the rn_lock for this node should be held.
825 * Fails with
826 * _NO_RESOURCES Could not allocate memory.
828 static int
829 rc_node_build_fmri(rc_node_t *np)
831 size_t actual;
832 char fmri[REP_PROTOCOL_FMRI_LEN];
833 int rc;
834 size_t sz = REP_PROTOCOL_FMRI_LEN;
836 assert(MUTEX_HELD(&np->rn_lock));
837 assert(np->rn_flags & RC_NODE_USING_PARENT);
839 rc_node_free_fmri(np);
841 rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual);
842 assert(rc == REP_PROTOCOL_SUCCESS);
844 if (np->rn_name != NULL) {
845 rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name,
846 np->rn_id.rl_type);
847 assert(rc == REP_PROTOCOL_SUCCESS);
848 np->rn_fmri = strdup(fmri);
849 } else {
850 np->rn_fmri = strdup(fmri);
852 if (np->rn_fmri == NULL) {
853 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
854 } else {
855 rc = REP_PROTOCOL_SUCCESS;
858 return (rc);
862 * Get the FMRI of the node at np placing the result in fmri. Then
863 * concatenate the additional element to fmri. The type variable indicates
864 * the type of element, so that the appropriate separator can be
865 * generated. size is the number of bytes in the buffer at fmri, and
866 * sz_out receives the size of the generated string. If the result is
867 * truncated, sz_out will receive the size of the buffer that would be
868 * required to avoid truncation.
870 * Fails with
871 * _TRUNCATED Not enough room in buffer at fmri.
873 static int
874 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out,
875 const char *element, rep_protocol_entity_t type)
877 int rc;
879 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) !=
880 REP_PROTOCOL_SUCCESS) {
881 return (rc);
883 if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) !=
884 REP_PROTOCOL_SUCCESS) {
885 return (rc);
888 return (REP_PROTOCOL_SUCCESS);
891 static int
892 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
894 rc_node_t *nnp = np->rcn_node;
895 int i;
897 assert(MUTEX_HELD(&rc_pg_notify_lock));
899 if (np->rcn_delete != NULL) {
900 assert(np->rcn_info == NULL && np->rcn_node == NULL);
901 return (1); /* everyone likes deletes */
903 if (np->rcn_node == NULL) {
904 assert(np->rcn_info != NULL || np->rcn_delete != NULL);
905 return (0);
907 assert(np->rcn_info == NULL);
909 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
910 if (rnip->rni_namelist[i] != NULL) {
911 if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
912 return (1);
914 if (rnip->rni_typelist[i] != NULL) {
915 if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
916 return (1);
919 return (0);
922 static void
923 rc_notify_insert_node(rc_node_t *nnp)
925 rc_notify_t *np = &nnp->rn_notify;
926 rc_notify_info_t *nip;
927 int found = 0;
929 assert(np->rcn_info == NULL);
931 if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
932 return;
934 (void) pthread_mutex_lock(&rc_pg_notify_lock);
935 np->rcn_node = nnp;
936 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
937 nip = uu_list_next(rc_notify_info_list, nip)) {
938 if (rc_notify_info_interested(nip, np)) {
939 (void) pthread_cond_broadcast(&nip->rni_cv);
940 found++;
943 if (found)
944 (void) uu_list_insert_before(rc_notify_list, NULL, np);
945 else
946 np->rcn_node = NULL;
948 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
951 static void
952 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
953 const char *instance, const char *pg)
955 rc_notify_info_t *nip;
957 uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
958 rc_notify_pool);
959 ndp->rnd_notify.rcn_delete = ndp;
961 (void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
962 "svc:/%s%s%s%s%s", service,
963 (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
964 (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
967 * add to notification list, notify watchers
969 (void) pthread_mutex_lock(&rc_pg_notify_lock);
970 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
971 nip = uu_list_next(rc_notify_info_list, nip))
972 (void) pthread_cond_broadcast(&nip->rni_cv);
973 (void) uu_list_insert_before(rc_notify_list, NULL, ndp);
974 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
977 static void
978 rc_notify_remove_node(rc_node_t *nnp)
980 rc_notify_t *np = &nnp->rn_notify;
982 assert(np->rcn_info == NULL);
983 assert(!MUTEX_HELD(&nnp->rn_lock));
985 (void) pthread_mutex_lock(&rc_pg_notify_lock);
986 while (np->rcn_node != NULL) {
987 if (rc_notify_in_use) {
988 (void) pthread_cond_wait(&rc_pg_notify_cv,
989 &rc_pg_notify_lock);
990 continue;
992 (void) uu_list_remove(rc_notify_list, np);
993 np->rcn_node = NULL;
994 break;
996 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
999 static void
1000 rc_notify_remove_locked(rc_notify_t *np)
1002 assert(MUTEX_HELD(&rc_pg_notify_lock));
1003 assert(rc_notify_in_use == 0);
1005 (void) uu_list_remove(rc_notify_list, np);
1006 if (np->rcn_node) {
1007 np->rcn_node = NULL;
1008 } else if (np->rcn_delete) {
1009 uu_free(np->rcn_delete);
1010 } else {
1011 assert(0); /* CAN'T HAPPEN */
1016 * Permission checking functions. See comment atop this file.
1018 #ifndef NATIVE_BUILD
1019 static permcheck_t *
1020 pc_create()
1022 permcheck_t *p;
1024 p = uu_zalloc(sizeof (*p));
1025 if (p == NULL)
1026 return (NULL);
1027 p->pc_bnum = 8; /* Normal case will only have 2 elts. */
1028 p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
1029 if (p->pc_buckets == NULL) {
1030 uu_free(p);
1031 return (NULL);
1034 p->pc_enum = 0;
1035 return (p);
1038 static void
1039 pc_free(permcheck_t *pcp)
1041 uint_t i;
1042 struct pc_elt *ep, *next;
1044 for (i = 0; i < pcp->pc_bnum; ++i) {
1045 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1046 next = ep->pce_next;
1047 free(ep);
1051 free(pcp->pc_buckets);
1052 free(pcp);
1055 static uint32_t
1056 pc_hash(const char *auth)
1058 uint32_t h = 0, g;
1059 const char *p;
1062 * Generic hash function from kernel/os/modhash.c.
1064 for (p = auth; *p != '\0'; ++p) {
1065 h = (h << 4) + *p;
1066 g = (h & 0xf0000000);
1067 if (g != 0) {
1068 h ^= (g >> 24);
1069 h ^= g;
1073 return (h);
1076 static perm_status_t
1077 pc_exists(permcheck_t *pcp, const char *auth)
1079 uint32_t h;
1080 struct pc_elt *ep;
1082 h = pc_hash(auth);
1083 for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
1084 ep != NULL;
1085 ep = ep->pce_next) {
1086 if (strcmp(auth, ep->pce_auth) == 0) {
1087 return (PERM_GRANTED);
1091 return (PERM_DENIED);
1094 static perm_status_t
1095 pc_match(permcheck_t *pcp, const char *pattern)
1097 uint_t i;
1098 struct pc_elt *ep;
1100 for (i = 0; i < pcp->pc_bnum; ++i) {
1101 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
1102 if (_auth_match(pattern, ep->pce_auth)) {
1103 return (PERM_GRANTED);
1108 return (PERM_DENIED);
1111 static int
1112 pc_grow(permcheck_t *pcp)
1114 uint_t new_bnum, i, j;
1115 struct pc_elt **new_buckets;
1116 struct pc_elt *ep, *next;
1118 new_bnum = pcp->pc_bnum * 2;
1119 if (new_bnum < pcp->pc_bnum)
1120 /* Homey don't play that. */
1121 return (-1);
1123 new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
1124 if (new_buckets == NULL)
1125 return (-1);
1127 for (i = 0; i < pcp->pc_bnum; ++i) {
1128 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1129 next = ep->pce_next;
1130 j = pc_hash(ep->pce_auth) & (new_bnum - 1);
1131 ep->pce_next = new_buckets[j];
1132 new_buckets[j] = ep;
1136 uu_free(pcp->pc_buckets);
1137 pcp->pc_buckets = new_buckets;
1138 pcp->pc_bnum = new_bnum;
1140 return (0);
1143 static int
1144 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type)
1146 struct pc_elt *ep;
1147 uint_t i;
1149 ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
1150 if (ep == NULL)
1151 return (-1);
1153 /* Grow if pc_enum / pc_bnum > 3/4. */
1154 if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
1155 /* Failure is not a stopper; we'll try again next time. */
1156 (void) pc_grow(pcp);
1158 (void) strcpy(ep->pce_auth, auth);
1160 i = pc_hash(auth) & (pcp->pc_bnum - 1);
1161 ep->pce_next = pcp->pc_buckets[i];
1162 pcp->pc_buckets[i] = ep;
1164 if (auth_type > pcp->pc_specific_type) {
1165 pcp->pc_specific_type = auth_type;
1166 pcp->pc_specific = ep;
1169 ++pcp->pc_enum;
1171 return (0);
1175 * For the type of a property group, return the authorization which may be
1176 * used to modify it.
1178 static const char *
1179 perm_auth_for_pgtype(const char *pgtype)
1181 if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
1182 return (AUTH_MODIFY_PREFIX "method");
1183 else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
1184 return (AUTH_MODIFY_PREFIX "dependency");
1185 else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
1186 return (AUTH_MODIFY_PREFIX "application");
1187 else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
1188 return (AUTH_MODIFY_PREFIX "framework");
1189 else
1190 return (NULL);
1194 * Fails with
1195 * _NO_RESOURCES - out of memory
1197 static int
1198 perm_add_enabling_type(permcheck_t *pcp, const char *auth,
1199 pc_auth_type_t auth_type)
1201 return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS :
1202 REP_PROTOCOL_FAIL_NO_RESOURCES);
1206 * Fails with
1207 * _NO_RESOURCES - out of memory
1209 static int
1210 perm_add_enabling(permcheck_t *pcp, const char *auth)
1212 return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF));
1215 /* Note that perm_add_enabling_values() is defined below. */
1218 * perm_granted() returns PERM_GRANTED if the current door caller has one of
1219 * the enabling authorizations in pcp, PERM_DENIED if it doesn't, PERM_GONE if
1220 * the door client went away and PERM_FAIL if an error (usually lack of
1221 * memory) occurs. auth_cb() checks each and every authorizations as
1222 * enumerated by _enum_auths. When we find a result other than PERM_DENIED,
1223 * we short-cut the enumeration and return non-zero.
1226 static int
1227 auth_cb(const char *auth, void *ctxt, void *vres)
1229 permcheck_t *pcp = ctxt;
1230 int *pret = vres;
1232 if (strchr(auth, KV_WILDCHAR) == NULL)
1233 *pret = pc_exists(pcp, auth);
1234 else
1235 *pret = pc_match(pcp, auth);
1237 if (*pret != PERM_DENIED)
1238 return (1);
1240 return (0); /* Tells that we need to continue */
1243 static perm_status_t
1244 perm_granted(permcheck_t *pcp)
1246 ucred_t *uc;
1248 perm_status_t ret = PERM_DENIED;
1249 uid_t uid;
1250 struct passwd pw;
1251 char pwbuf[1024]; /* XXX should be NSS_BUFLEN_PASSWD */
1252 struct passwd *result;
1254 /* Get the uid */
1255 if ((uc = get_ucred()) == NULL) {
1256 if (errno == EINVAL) {
1258 * Client is no longer waiting for our response (e.g.,
1259 * it received a signal & resumed with EINTR).
1260 * Punting with door_return() would be nice but we
1261 * need to release all of the locks & references we
1262 * hold. And we must report failure to the client
1263 * layer to keep it from ignoring retries as
1264 * already-done (idempotency & all that). None of the
1265 * error codes fit very well, so we might as well
1266 * force the return of _PERMISSION_DENIED since we
1267 * couldn't determine the user.
1269 return (PERM_GONE);
1271 assert(0);
1272 abort();
1275 uid = ucred_geteuid(uc);
1276 assert(uid != (uid_t)-1);
1278 getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf), &result);
1279 if (!result) {
1280 return (PERM_FAIL);
1284 * Enumerate all the auths defined for the user and return the
1285 * result in ret.
1287 if (_enum_auths(pw.pw_name, auth_cb, pcp, &ret) < 0)
1288 return (PERM_FAIL);
1290 return (ret);
1293 static int
1294 map_granted_status(perm_status_t status, permcheck_t *pcp)
1296 int rc;
1298 switch (status) {
1299 case PERM_DENIED:
1300 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1301 break;
1302 case PERM_GRANTED:
1303 rc = REP_PROTOCOL_SUCCESS;
1304 break;
1305 case PERM_GONE:
1306 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1307 break;
1308 case PERM_FAIL:
1309 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1310 break;
1311 default:
1312 rc = REP_PROTOCOL_FAIL_UNKNOWN;
1313 break;
1315 return (rc);
1317 #endif /* NATIVE_BUILD */
1320 * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1321 * serialize certain actions, and to wait for certain operations to complete
1323 * The waiting flags are:
1324 * RC_NODE_CHILDREN_CHANGING
1325 * The child list is being built or changed (due to creation
1326 * or deletion). All iterators pause.
1328 * RC_NODE_USING_PARENT
1329 * Someone is actively using the parent pointer, so we can't
1330 * be removed from the parent list.
1332 * RC_NODE_CREATING_CHILD
1333 * A child is being created -- locks out other creations, to
1334 * prevent insert-insert races.
1336 * RC_NODE_IN_TX
1337 * This object is running a transaction.
1339 * RC_NODE_DYING
1340 * This node might be dying. Always set as a set, using
1341 * RC_NODE_DYING_FLAGS (which is everything but
1342 * RC_NODE_USING_PARENT)
1344 static int
1345 rc_node_hold_flag(rc_node_t *np, uint32_t flag)
1347 assert(MUTEX_HELD(&np->rn_lock));
1348 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1350 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
1351 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1353 if (np->rn_flags & RC_NODE_DEAD)
1354 return (0);
1356 np->rn_flags |= flag;
1357 return (1);
1360 static void
1361 rc_node_rele_flag(rc_node_t *np, uint32_t flag)
1363 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1364 assert(MUTEX_HELD(&np->rn_lock));
1365 assert((np->rn_flags & flag) == flag);
1366 np->rn_flags &= ~flag;
1367 (void) pthread_cond_broadcast(&np->rn_cv);
1371 * wait until a particular flag has cleared. Fails if the object dies.
1373 static int
1374 rc_node_wait_flag(rc_node_t *np, uint32_t flag)
1376 assert(MUTEX_HELD(&np->rn_lock));
1377 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
1378 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1380 return (!(np->rn_flags & RC_NODE_DEAD));
1384 * On entry, np's lock must be held, and this thread must be holding
1385 * RC_NODE_USING_PARENT. On return, both of them are released.
1387 * If the return value is NULL, np either does not have a parent, or
1388 * the parent has been marked DEAD.
1390 * If the return value is non-NULL, it is the parent of np, and both
1391 * its lock and the requested flags are held.
1393 static rc_node_t *
1394 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
1396 rc_node_t *pp;
1398 assert(MUTEX_HELD(&np->rn_lock));
1399 assert(np->rn_flags & RC_NODE_USING_PARENT);
1401 if ((pp = np->rn_parent) == NULL) {
1402 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1403 (void) pthread_mutex_unlock(&np->rn_lock);
1404 return (NULL);
1406 (void) pthread_mutex_unlock(&np->rn_lock);
1408 (void) pthread_mutex_lock(&pp->rn_lock);
1409 (void) pthread_mutex_lock(&np->rn_lock);
1410 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1411 (void) pthread_mutex_unlock(&np->rn_lock);
1413 if (!rc_node_hold_flag(pp, flag)) {
1414 (void) pthread_mutex_unlock(&pp->rn_lock);
1415 return (NULL);
1417 return (pp);
1420 rc_node_t *
1421 rc_node_alloc(void)
1423 rc_node_t *np = uu_zalloc(sizeof (*np));
1425 if (np == NULL)
1426 return (NULL);
1428 (void) pthread_mutex_init(&np->rn_lock, NULL);
1429 (void) pthread_cond_init(&np->rn_cv, NULL);
1431 np->rn_children = uu_list_create(rc_children_pool, np, 0);
1432 np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
1434 uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
1436 uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
1437 rc_notify_pool);
1439 return (np);
1442 void
1443 rc_node_destroy(rc_node_t *np)
1445 int i;
1447 if (np->rn_flags & RC_NODE_UNREFED)
1448 return; /* being handled elsewhere */
1450 assert(np->rn_refs == 0 && np->rn_other_refs == 0);
1451 assert(np->rn_former == NULL);
1453 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
1454 /* Release the holds from rc_iter_next(). */
1455 for (i = 0; i < COMPOSITION_DEPTH; ++i) {
1456 /* rn_cchain[i] may be NULL for empty snapshots. */
1457 if (np->rn_cchain[i] != NULL)
1458 rc_node_rele(np->rn_cchain[i]);
1462 if (np->rn_name != NULL)
1463 free((void *)np->rn_name);
1464 np->rn_name = NULL;
1465 if (np->rn_type != NULL)
1466 free((void *)np->rn_type);
1467 np->rn_type = NULL;
1468 if (np->rn_values != NULL)
1469 object_free_values(np->rn_values, np->rn_valtype,
1470 np->rn_values_count, np->rn_values_size);
1471 np->rn_values = NULL;
1472 rc_node_free_fmri(np);
1474 if (np->rn_snaplevel != NULL)
1475 rc_snaplevel_rele(np->rn_snaplevel);
1476 np->rn_snaplevel = NULL;
1478 uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
1480 uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
1481 rc_notify_pool);
1483 assert(uu_list_first(np->rn_children) == NULL);
1484 uu_list_destroy(np->rn_children);
1485 uu_list_destroy(np->rn_pg_notify_list);
1487 (void) pthread_mutex_destroy(&np->rn_lock);
1488 (void) pthread_cond_destroy(&np->rn_cv);
1490 uu_free(np);
1494 * Link in a child node.
1496 * Because of the lock ordering, cp has to already be in the hash table with
1497 * its lock dropped before we get it. To prevent anyone from noticing that
1498 * it is parentless, the creation code sets the RC_NODE_USING_PARENT. Once
1499 * we've linked it in, we release the flag.
1501 static void
1502 rc_node_link_child(rc_node_t *np, rc_node_t *cp)
1504 assert(!MUTEX_HELD(&np->rn_lock));
1505 assert(!MUTEX_HELD(&cp->rn_lock));
1507 (void) pthread_mutex_lock(&np->rn_lock);
1508 (void) pthread_mutex_lock(&cp->rn_lock);
1509 assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
1510 (cp->rn_flags & RC_NODE_USING_PARENT));
1512 assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
1513 REP_PROTOCOL_SUCCESS);
1515 cp->rn_parent = np;
1516 cp->rn_flags |= RC_NODE_IN_PARENT;
1517 (void) uu_list_insert_before(np->rn_children, NULL, cp);
1518 (void) rc_node_build_fmri(cp);
1520 (void) pthread_mutex_unlock(&np->rn_lock);
1522 rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
1523 (void) pthread_mutex_unlock(&cp->rn_lock);
1527 * Sets the rn_parent_ref field of all the children of np to pp -- always
1528 * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1530 * This is used when we mark a node RC_NODE_OLD, so that when the object and
1531 * its children are no longer referenced, they will all be deleted as a unit.
1533 static void
1534 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
1536 rc_node_t *cp;
1538 assert(MUTEX_HELD(&np->rn_lock));
1540 for (cp = uu_list_first(np->rn_children); cp != NULL;
1541 cp = uu_list_next(np->rn_children, cp)) {
1542 (void) pthread_mutex_lock(&cp->rn_lock);
1543 if (cp->rn_flags & RC_NODE_PARENT_REF) {
1544 assert(cp->rn_parent_ref == pp);
1545 } else {
1546 assert(cp->rn_parent_ref == NULL);
1548 cp->rn_flags |= RC_NODE_PARENT_REF;
1549 cp->rn_parent_ref = pp;
1550 if (cp->rn_refs != 0)
1551 rc_node_hold_other(pp);
1553 rc_node_setup_parent_ref(cp, pp); /* recurse */
1554 (void) pthread_mutex_unlock(&cp->rn_lock);
1559 * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1561 * Requirements:
1562 * *no* node locks may be held.
1563 * pp must be held with RC_NODE_CHILDREN_CHANGING
1564 * newp and np must be held with RC_NODE_IN_TX
1565 * np must be marked RC_NODE_IN_PARENT, newp must not be
1566 * np must be marked RC_NODE_OLD
1568 * Afterwards:
1569 * pp's RC_NODE_CHILDREN_CHANGING is dropped
1570 * newp and np's RC_NODE_IN_TX is dropped
1571 * newp->rn_former = np;
1572 * newp is RC_NODE_IN_PARENT, np is not.
1573 * interested notify subscribers have been notified of newp's new status.
1575 static void
1576 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
1578 cache_bucket_t *bp;
1580 * First, swap np and nnp in the cache. newp's RC_NODE_IN_TX flag
1581 * keeps rc_node_update() from seeing it until we are done.
1583 bp = cache_hold(newp->rn_hash);
1584 cache_remove_unlocked(bp, np);
1585 cache_insert_unlocked(bp, newp);
1586 cache_release(bp);
1589 * replace np with newp in pp's list, and attach it to newp's rn_former
1590 * link.
1592 (void) pthread_mutex_lock(&pp->rn_lock);
1593 assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
1595 (void) pthread_mutex_lock(&newp->rn_lock);
1596 assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
1597 assert(newp->rn_flags & RC_NODE_IN_TX);
1599 (void) pthread_mutex_lock(&np->rn_lock);
1600 assert(np->rn_flags & RC_NODE_IN_PARENT);
1601 assert(np->rn_flags & RC_NODE_OLD);
1602 assert(np->rn_flags & RC_NODE_IN_TX);
1604 newp->rn_parent = pp;
1605 newp->rn_flags |= RC_NODE_IN_PARENT;
1608 * Note that we carefully add newp before removing np -- this
1609 * keeps iterators on the list from missing us.
1611 (void) uu_list_insert_after(pp->rn_children, np, newp);
1612 (void) rc_node_build_fmri(newp);
1613 (void) uu_list_remove(pp->rn_children, np);
1616 * re-set np
1618 newp->rn_former = np;
1619 np->rn_parent = NULL;
1620 np->rn_flags &= ~RC_NODE_IN_PARENT;
1621 np->rn_flags |= RC_NODE_ON_FORMER;
1623 rc_notify_insert_node(newp);
1625 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
1626 (void) pthread_mutex_unlock(&pp->rn_lock);
1627 rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
1628 (void) pthread_mutex_unlock(&newp->rn_lock);
1629 rc_node_setup_parent_ref(np, np);
1630 rc_node_rele_flag(np, RC_NODE_IN_TX);
1631 (void) pthread_mutex_unlock(&np->rn_lock);
1635 * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
1636 * 'cp' is used (and returned) if the node does not yet exist. If it does
1637 * exist, 'cp' is freed, and the existent node is returned instead.
1639 rc_node_t *
1640 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1641 rc_node_t *pp)
1643 rc_node_t *np;
1644 cache_bucket_t *bp;
1645 uint32_t h = rc_node_hash(nip);
1647 assert(cp->rn_refs == 0);
1649 bp = cache_hold(h);
1650 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1651 cache_release(bp);
1654 * make sure it matches our expectations
1656 (void) pthread_mutex_lock(&np->rn_lock);
1657 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1658 assert(np->rn_parent == pp);
1659 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1660 assert(strcmp(np->rn_name, name) == 0);
1661 assert(np->rn_type == NULL);
1662 assert(np->rn_flags & RC_NODE_IN_PARENT);
1663 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1665 (void) pthread_mutex_unlock(&np->rn_lock);
1667 rc_node_destroy(cp);
1668 return (np);
1672 * No one is there -- setup & install the new node.
1674 np = cp;
1675 rc_node_hold(np);
1676 np->rn_id = *nip;
1677 np->rn_hash = h;
1678 np->rn_name = strdup(name);
1680 np->rn_flags |= RC_NODE_USING_PARENT;
1682 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
1683 #if COMPOSITION_DEPTH == 2
1684 np->rn_cchain[0] = np;
1685 np->rn_cchain[1] = pp;
1686 #else
1687 #error This code must be updated.
1688 #endif
1691 cache_insert_unlocked(bp, np);
1692 cache_release(bp); /* we are now visible */
1694 rc_node_link_child(pp, np);
1696 return (np);
1700 * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
1701 * 'cp' is used (and returned) if the node does not yet exist. If it does
1702 * exist, 'cp' is freed, and the existent node is returned instead.
1704 rc_node_t *
1705 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1706 uint32_t snap_id, rc_node_t *pp)
1708 rc_node_t *np;
1709 cache_bucket_t *bp;
1710 uint32_t h = rc_node_hash(nip);
1712 assert(cp->rn_refs == 0);
1714 bp = cache_hold(h);
1715 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1716 cache_release(bp);
1719 * make sure it matches our expectations
1721 (void) pthread_mutex_lock(&np->rn_lock);
1722 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1723 assert(np->rn_parent == pp);
1724 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1725 assert(strcmp(np->rn_name, name) == 0);
1726 assert(np->rn_type == NULL);
1727 assert(np->rn_flags & RC_NODE_IN_PARENT);
1728 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1730 (void) pthread_mutex_unlock(&np->rn_lock);
1732 rc_node_destroy(cp);
1733 return (np);
1737 * No one is there -- create a new node.
1739 np = cp;
1740 rc_node_hold(np);
1741 np->rn_id = *nip;
1742 np->rn_hash = h;
1743 np->rn_name = strdup(name);
1744 np->rn_snapshot_id = snap_id;
1746 np->rn_flags |= RC_NODE_USING_PARENT;
1748 cache_insert_unlocked(bp, np);
1749 cache_release(bp); /* we are now visible */
1751 rc_node_link_child(pp, np);
1753 return (np);
1757 * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists. 'cp' is
1758 * used (and returned) if the node does not yet exist. If it does exist, 'cp'
1759 * is freed, and the existent node is returned instead.
1761 rc_node_t *
1762 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
1763 rc_snaplevel_t *lvl, rc_node_t *pp)
1765 rc_node_t *np;
1766 cache_bucket_t *bp;
1767 uint32_t h = rc_node_hash(nip);
1769 assert(cp->rn_refs == 0);
1771 bp = cache_hold(h);
1772 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1773 cache_release(bp);
1776 * make sure it matches our expectations
1778 (void) pthread_mutex_lock(&np->rn_lock);
1779 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1780 assert(np->rn_parent == pp);
1781 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1782 assert(np->rn_name == NULL);
1783 assert(np->rn_type == NULL);
1784 assert(np->rn_flags & RC_NODE_IN_PARENT);
1785 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1787 (void) pthread_mutex_unlock(&np->rn_lock);
1789 rc_node_destroy(cp);
1790 return (np);
1794 * No one is there -- create a new node.
1796 np = cp;
1797 rc_node_hold(np); /* released in snapshot_fill_children() */
1798 np->rn_id = *nip;
1799 np->rn_hash = h;
1801 rc_snaplevel_hold(lvl);
1802 np->rn_snaplevel = lvl;
1804 np->rn_flags |= RC_NODE_USING_PARENT;
1806 cache_insert_unlocked(bp, np);
1807 cache_release(bp); /* we are now visible */
1809 /* Add this snaplevel to the snapshot's composition chain. */
1810 assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
1811 pp->rn_cchain[lvl->rsl_level_num - 1] = np;
1813 rc_node_link_child(pp, np);
1815 return (np);
1819 * Returns NULL if strdup() fails.
1821 rc_node_t *
1822 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1823 const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
1825 rc_node_t *np;
1826 cache_bucket_t *bp;
1828 uint32_t h = rc_node_hash(nip);
1829 bp = cache_hold(h);
1830 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1831 cache_release(bp);
1834 * make sure it matches our expectations (don't check
1835 * the generation number or parent, since someone could
1836 * have gotten a transaction through while we weren't
1837 * looking)
1839 (void) pthread_mutex_lock(&np->rn_lock);
1840 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1841 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1842 assert(strcmp(np->rn_name, name) == 0);
1843 assert(strcmp(np->rn_type, type) == 0);
1844 assert(np->rn_pgflags == flags);
1845 assert(np->rn_flags & RC_NODE_IN_PARENT);
1846 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1848 (void) pthread_mutex_unlock(&np->rn_lock);
1850 rc_node_destroy(cp);
1851 return (np);
1854 np = cp;
1855 rc_node_hold(np); /* released in fill_pg_callback() */
1856 np->rn_id = *nip;
1857 np->rn_hash = h;
1858 np->rn_name = strdup(name);
1859 if (np->rn_name == NULL) {
1860 rc_node_rele(np);
1861 return (NULL);
1863 np->rn_type = strdup(type);
1864 if (np->rn_type == NULL) {
1865 free((void *)np->rn_name);
1866 rc_node_rele(np);
1867 return (NULL);
1869 np->rn_pgflags = flags;
1870 np->rn_gen_id = gen_id;
1872 np->rn_flags |= RC_NODE_USING_PARENT;
1874 cache_insert_unlocked(bp, np);
1875 cache_release(bp); /* we are now visible */
1877 rc_node_link_child(pp, np);
1879 return (np);
1882 #if COMPOSITION_DEPTH == 2
1884 * Initialize a "composed property group" which represents the composition of
1885 * property groups pg1 & pg2. It is ephemeral: once created & returned for an
1886 * ITER_READ request, keeping it out of cache_hash and any child lists
1887 * prevents it from being looked up. Operations besides iteration are passed
1888 * through to pg1.
1890 * pg1 & pg2 should be held before entering this function. They will be
1891 * released in rc_node_destroy().
1893 static int
1894 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
1896 if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
1897 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
1899 cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
1900 cpg->rn_name = strdup(pg1->rn_name);
1901 if (cpg->rn_name == NULL)
1902 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
1904 cpg->rn_cchain[0] = pg1;
1905 cpg->rn_cchain[1] = pg2;
1907 return (REP_PROTOCOL_SUCCESS);
1909 #else
1910 #error This code must be updated.
1911 #endif
1914 * Fails with _NO_RESOURCES.
1917 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
1918 const char *name, rep_protocol_value_type_t type,
1919 const char *vals, size_t count, size_t size)
1921 rc_node_t *np;
1922 cache_bucket_t *bp;
1924 uint32_t h = rc_node_hash(nip);
1925 bp = cache_hold(h);
1926 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1927 cache_release(bp);
1929 * make sure it matches our expectations
1931 (void) pthread_mutex_lock(&np->rn_lock);
1932 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1933 assert(np->rn_parent == pp);
1934 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1935 assert(strcmp(np->rn_name, name) == 0);
1936 assert(np->rn_valtype == type);
1937 assert(np->rn_values_count == count);
1938 assert(np->rn_values_size == size);
1939 assert(vals == NULL ||
1940 memcmp(np->rn_values, vals, size) == 0);
1941 assert(np->rn_flags & RC_NODE_IN_PARENT);
1942 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1944 rc_node_rele_locked(np);
1945 object_free_values(vals, type, count, size);
1946 return (REP_PROTOCOL_SUCCESS);
1950 * No one is there -- create a new node.
1952 np = rc_node_alloc();
1953 if (np == NULL) {
1954 cache_release(bp);
1955 object_free_values(vals, type, count, size);
1956 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
1958 np->rn_id = *nip;
1959 np->rn_hash = h;
1960 np->rn_name = strdup(name);
1961 if (np->rn_name == NULL) {
1962 cache_release(bp);
1963 object_free_values(vals, type, count, size);
1964 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
1967 np->rn_valtype = type;
1968 np->rn_values = vals;
1969 np->rn_values_count = count;
1970 np->rn_values_size = size;
1972 np->rn_flags |= RC_NODE_USING_PARENT;
1974 cache_insert_unlocked(bp, np);
1975 cache_release(bp); /* we are now visible */
1977 rc_node_link_child(pp, np);
1979 return (REP_PROTOCOL_SUCCESS);
1983 rc_node_init(void)
1985 rc_node_t *np;
1986 cache_bucket_t *bp;
1988 rc_children_pool = uu_list_pool_create("rc_children_pool",
1989 sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
1990 NULL, UU_LIST_POOL_DEBUG);
1992 rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
1993 sizeof (rc_node_pg_notify_t),
1994 offsetof(rc_node_pg_notify_t, rnpn_node),
1995 NULL, UU_LIST_POOL_DEBUG);
1997 rc_notify_pool = uu_list_pool_create("rc_notify_pool",
1998 sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
1999 NULL, UU_LIST_POOL_DEBUG);
2001 rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
2002 sizeof (rc_notify_info_t),
2003 offsetof(rc_notify_info_t, rni_list_node),
2004 NULL, UU_LIST_POOL_DEBUG);
2006 if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
2007 rc_notify_pool == NULL || rc_notify_info_pool == NULL)
2008 uu_die("out of memory");
2010 rc_notify_list = uu_list_create(rc_notify_pool,
2011 &rc_notify_list, 0);
2013 rc_notify_info_list = uu_list_create(rc_notify_info_pool,
2014 &rc_notify_info_list, 0);
2016 if (rc_notify_list == NULL || rc_notify_info_list == NULL)
2017 uu_die("out of memory");
2019 if ((np = rc_node_alloc()) == NULL)
2020 uu_die("out of memory");
2022 rc_node_hold(np);
2023 np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
2024 np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
2025 np->rn_hash = rc_node_hash(&np->rn_id);
2026 np->rn_name = "localhost";
2028 bp = cache_hold(np->rn_hash);
2029 cache_insert_unlocked(bp, np);
2030 cache_release(bp);
2032 rc_scope = np;
2033 return (1);
2037 * Fails with
2038 * _INVALID_TYPE - type is invalid
2039 * _TYPE_MISMATCH - np doesn't carry children of type type
2040 * _DELETED - np has been deleted
2041 * _NO_RESOURCES
2043 static int
2044 rc_node_fill_children(rc_node_t *np, uint32_t type)
2046 int rc;
2048 assert(MUTEX_HELD(&np->rn_lock));
2050 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
2051 REP_PROTOCOL_SUCCESS)
2052 return (rc);
2054 if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
2055 return (REP_PROTOCOL_FAIL_DELETED);
2057 if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
2058 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2059 return (REP_PROTOCOL_SUCCESS);
2062 (void) pthread_mutex_unlock(&np->rn_lock);
2063 rc = object_fill_children(np);
2064 (void) pthread_mutex_lock(&np->rn_lock);
2066 if (rc == REP_PROTOCOL_SUCCESS) {
2067 np->rn_flags |= RC_NODE_HAS_CHILDREN;
2069 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2071 return (rc);
2075 * Returns
2076 * _INVALID_TYPE - type is invalid
2077 * _TYPE_MISMATCH - np doesn't carry children of type type
2078 * _DELETED - np has been deleted
2079 * _NO_RESOURCES
2080 * _SUCCESS - if *cpp is not NULL, it is held
2082 static int
2083 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
2084 rc_node_t **cpp)
2086 int ret;
2087 rc_node_t *cp;
2089 assert(MUTEX_HELD(&np->rn_lock));
2090 assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
2092 ret = rc_node_fill_children(np, type);
2093 if (ret != REP_PROTOCOL_SUCCESS)
2094 return (ret);
2096 for (cp = uu_list_first(np->rn_children);
2097 cp != NULL;
2098 cp = uu_list_next(np->rn_children, cp)) {
2099 if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
2100 break;
2103 if (cp != NULL)
2104 rc_node_hold(cp);
2105 *cpp = cp;
2107 return (REP_PROTOCOL_SUCCESS);
2110 static int rc_node_parent(rc_node_t *, rc_node_t **);
2113 * Returns
2114 * _INVALID_TYPE - type is invalid
2115 * _DELETED - np or an ancestor has been deleted
2116 * _NOT_FOUND - no ancestor of specified type exists
2117 * _SUCCESS - *app is held
2119 static int
2120 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app)
2122 int ret;
2123 rc_node_t *parent, *np_orig;
2125 if (type >= REP_PROTOCOL_ENTITY_MAX)
2126 return (REP_PROTOCOL_FAIL_INVALID_TYPE);
2128 np_orig = np;
2130 while (np->rn_id.rl_type > type) {
2131 ret = rc_node_parent(np, &parent);
2132 if (np != np_orig)
2133 rc_node_rele(np);
2134 if (ret != REP_PROTOCOL_SUCCESS)
2135 return (ret);
2136 np = parent;
2139 if (np->rn_id.rl_type == type) {
2140 *app = parent;
2141 return (REP_PROTOCOL_SUCCESS);
2144 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2147 #ifndef NATIVE_BUILD
2149 * If the propname property exists in pg, and it is of type string, add its
2150 * values as authorizations to pcp. pg must not be locked on entry, and it is
2151 * returned unlocked. Returns
2152 * _DELETED - pg was deleted
2153 * _NO_RESOURCES
2154 * _NOT_FOUND - pg has no property named propname
2155 * _SUCCESS
2157 static int
2158 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2160 rc_node_t *prop;
2161 int result;
2163 uint_t count;
2164 const char *cp;
2166 assert(!MUTEX_HELD(&pg->rn_lock));
2167 assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
2169 (void) pthread_mutex_lock(&pg->rn_lock);
2170 result = rc_node_find_named_child(pg, propname,
2171 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
2172 (void) pthread_mutex_unlock(&pg->rn_lock);
2173 if (result != REP_PROTOCOL_SUCCESS) {
2174 switch (result) {
2175 case REP_PROTOCOL_FAIL_DELETED:
2176 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2177 return (result);
2179 case REP_PROTOCOL_FAIL_INVALID_TYPE:
2180 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
2181 default:
2182 bad_error("rc_node_find_named_child", result);
2186 if (prop == NULL)
2187 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2189 /* rn_valtype is immutable, so no locking. */
2190 if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
2191 rc_node_rele(prop);
2192 return (REP_PROTOCOL_SUCCESS);
2195 (void) pthread_mutex_lock(&prop->rn_lock);
2196 for (count = prop->rn_values_count, cp = prop->rn_values;
2197 count > 0;
2198 --count) {
2199 result = perm_add_enabling_type(pcp, cp,
2200 (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST :
2201 PC_AUTH_SVC);
2202 if (result != REP_PROTOCOL_SUCCESS)
2203 break;
2205 cp = strchr(cp, '\0') + 1;
2208 rc_node_rele_locked(prop);
2210 return (result);
2214 * Assuming that ent is a service or instance node, if the pgname property
2215 * group has type pgtype, and it has a propname property with string type, add
2216 * its values as authorizations to pcp. If pgtype is NULL, it is not checked.
2217 * Returns
2218 * _SUCCESS
2219 * _DELETED - ent was deleted
2220 * _NO_RESOURCES - no resources
2221 * _NOT_FOUND - ent does not have pgname pg or propname property
2223 static int
2224 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
2225 const char *pgtype, const char *propname)
2227 int r;
2228 rc_node_t *pg;
2230 assert(!MUTEX_HELD(&ent->rn_lock));
2232 (void) pthread_mutex_lock(&ent->rn_lock);
2233 r = rc_node_find_named_child(ent, pgname,
2234 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
2235 (void) pthread_mutex_unlock(&ent->rn_lock);
2237 switch (r) {
2238 case REP_PROTOCOL_SUCCESS:
2239 break;
2241 case REP_PROTOCOL_FAIL_DELETED:
2242 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2243 return (r);
2245 default:
2246 bad_error("rc_node_find_named_child", r);
2249 if (pg == NULL)
2250 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2252 if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
2253 r = perm_add_pg_prop_values(pcp, pg, propname);
2254 switch (r) {
2255 case REP_PROTOCOL_FAIL_DELETED:
2256 r = REP_PROTOCOL_FAIL_NOT_FOUND;
2257 break;
2259 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2260 case REP_PROTOCOL_SUCCESS:
2261 case REP_PROTOCOL_FAIL_NOT_FOUND:
2262 break;
2264 default:
2265 bad_error("perm_add_pg_prop_values", r);
2269 rc_node_rele(pg);
2271 return (r);
2275 * If pg has a property named propname, and is string typed, add its values as
2276 * authorizations to pcp. If pg has no such property, and its parent is an
2277 * instance, walk up to the service and try doing the same with the property
2278 * of the same name from the property group of the same name. Returns
2279 * _SUCCESS
2280 * _NO_RESOURCES
2281 * _DELETED - pg (or an ancestor) was deleted
2283 static int
2284 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2286 int r;
2287 char pgname[REP_PROTOCOL_NAME_LEN + 1];
2288 rc_node_t *svc;
2289 size_t sz;
2291 r = perm_add_pg_prop_values(pcp, pg, propname);
2293 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2294 return (r);
2296 assert(!MUTEX_HELD(&pg->rn_lock));
2298 if (pg->rn_id.rl_ids[ID_INSTANCE] == 0)
2299 return (REP_PROTOCOL_SUCCESS);
2301 sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
2302 assert(sz < sizeof (pgname));
2305 * If pg is a child of an instance or snapshot, we want to compose the
2306 * authorization property with the service's (if it exists). The
2307 * snapshot case applies only to read_authorization. In all other
2308 * cases, the pg's parent will be the instance.
2310 r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc);
2311 if (r != REP_PROTOCOL_SUCCESS) {
2312 assert(r == REP_PROTOCOL_FAIL_DELETED);
2313 return (r);
2315 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
2317 r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
2319 rc_node_rele(svc);
2321 if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
2322 r = REP_PROTOCOL_SUCCESS;
2324 return (r);
2328 * Call perm_add_enabling_values() for the "action_authorization" property of
2329 * the "general" property group of inst. Returns
2330 * _DELETED - inst (or an ancestor) was deleted
2331 * _NO_RESOURCES
2332 * _SUCCESS
2334 static int
2335 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
2337 int r;
2338 rc_node_t *svc;
2340 assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
2342 r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
2343 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2345 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2346 return (r);
2348 r = rc_node_parent(inst, &svc);
2349 if (r != REP_PROTOCOL_SUCCESS) {
2350 assert(r == REP_PROTOCOL_FAIL_DELETED);
2351 return (r);
2354 r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
2355 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2357 return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
2359 #endif /* NATIVE_BUILD */
2361 void
2362 rc_node_ptr_init(rc_node_ptr_t *out)
2364 out->rnp_node = NULL;
2365 out->rnp_auth_string = NULL;
2366 out->rnp_authorized = RC_AUTH_UNKNOWN;
2367 out->rnp_deleted = 0;
2370 void
2371 rc_node_ptr_free_mem(rc_node_ptr_t *npp)
2373 if (npp->rnp_auth_string != NULL) {
2374 free((void *)npp->rnp_auth_string);
2375 npp->rnp_auth_string = NULL;
2379 static void
2380 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
2382 rc_node_t *cur = out->rnp_node;
2383 if (val != NULL)
2384 rc_node_hold(val);
2385 out->rnp_node = val;
2386 if (cur != NULL) {
2387 NODE_LOCK(cur);
2390 * Register the ephemeral reference created by reading
2391 * out->rnp_node into cur. Note that the persistent
2392 * reference we're destroying is locked by the client
2393 * layer.
2395 rc_node_hold_ephemeral_locked(cur);
2397 rc_node_rele_locked(cur);
2399 out->rnp_authorized = RC_AUTH_UNKNOWN;
2400 rc_node_ptr_free_mem(out);
2401 out->rnp_deleted = 0;
2404 void
2405 rc_node_clear(rc_node_ptr_t *out, int deleted)
2407 rc_node_assign(out, NULL);
2408 out->rnp_deleted = deleted;
2411 void
2412 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
2414 rc_node_assign(out, val->rnp_node);
2418 * rc_node_check()/RC_NODE_CHECK()
2419 * generic "entry" checks, run before the use of an rc_node pointer.
2421 * Fails with
2422 * _NOT_SET
2423 * _DELETED
2425 static int
2426 rc_node_check_and_lock(rc_node_t *np)
2428 int result = REP_PROTOCOL_SUCCESS;
2429 if (np == NULL)
2430 return (REP_PROTOCOL_FAIL_NOT_SET);
2432 (void) pthread_mutex_lock(&np->rn_lock);
2433 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2434 result = REP_PROTOCOL_FAIL_DELETED;
2435 (void) pthread_mutex_unlock(&np->rn_lock);
2438 return (result);
2442 * Fails with
2443 * _NOT_SET - ptr is reset
2444 * _DELETED - node has been deleted
2446 static rc_node_t *
2447 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
2449 rc_node_t *np = npp->rnp_node;
2450 if (np == NULL) {
2451 if (npp->rnp_deleted)
2452 *res = REP_PROTOCOL_FAIL_DELETED;
2453 else
2454 *res = REP_PROTOCOL_FAIL_NOT_SET;
2455 return (NULL);
2458 (void) pthread_mutex_lock(&np->rn_lock);
2459 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2460 (void) pthread_mutex_unlock(&np->rn_lock);
2461 rc_node_clear(npp, 1);
2462 *res = REP_PROTOCOL_FAIL_DELETED;
2463 return (NULL);
2465 return (np);
2468 #define RC_NODE_CHECK_AND_LOCK(n) { \
2469 int rc__res; \
2470 if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2471 return (rc__res); \
2474 #define RC_NODE_CHECK(n) { \
2475 RC_NODE_CHECK_AND_LOCK(n); \
2476 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2479 #define RC_NODE_CHECK_AND_HOLD(n) { \
2480 RC_NODE_CHECK_AND_LOCK(n); \
2481 rc_node_hold_locked(n); \
2482 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2485 #define RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) { \
2486 int rc__res; \
2487 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL) \
2488 return (rc__res); \
2491 #define RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, mem) { \
2492 int rc__res; \
2493 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == \
2494 NULL) { \
2495 free((mem)); \
2496 return (rc__res); \
2500 #define RC_NODE_PTR_GET_CHECK(np, npp) { \
2501 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2502 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2505 #define RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) { \
2506 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2507 rc_node_hold_locked(np); \
2508 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2511 #define HOLD_FLAG_OR_RETURN(np, flag) { \
2512 assert(MUTEX_HELD(&(np)->rn_lock)); \
2513 assert(!((np)->rn_flags & RC_NODE_DEAD)); \
2514 if (!rc_node_hold_flag((np), flag)) { \
2515 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2516 return (REP_PROTOCOL_FAIL_DELETED); \
2520 #define HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) { \
2521 assert(MUTEX_HELD(&(np)->rn_lock)); \
2522 if (!rc_node_hold_flag((np), flag)) { \
2523 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2524 assert((np) == (npp)->rnp_node); \
2525 rc_node_clear(npp, 1); \
2526 free((mem)); \
2527 return (REP_PROTOCOL_FAIL_DELETED); \
2532 rc_local_scope(uint32_t type, rc_node_ptr_t *out)
2534 if (type != REP_PROTOCOL_ENTITY_SCOPE) {
2535 rc_node_clear(out, 0);
2536 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2540 * the main scope never gets destroyed
2542 rc_node_assign(out, rc_scope);
2544 return (REP_PROTOCOL_SUCCESS);
2548 * Fails with
2549 * _NOT_SET - npp is not set
2550 * _DELETED - the node npp pointed at has been deleted
2551 * _TYPE_MISMATCH - type is not _SCOPE
2552 * _NOT_FOUND - scope has no parent
2554 static int
2555 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
2557 rc_node_t *np;
2559 rc_node_clear(out, 0);
2561 RC_NODE_PTR_GET_CHECK(np, npp);
2563 if (type != REP_PROTOCOL_ENTITY_SCOPE)
2564 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2566 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2569 static int rc_node_pg_check_read_protect(rc_node_t *);
2572 * Fails with
2573 * _NOT_SET
2574 * _DELETED
2575 * _NOT_APPLICABLE
2576 * _NOT_FOUND
2577 * _BAD_REQUEST
2578 * _TRUNCATED
2579 * _NO_RESOURCES
2582 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
2583 size_t *sz_out)
2585 size_t actual;
2586 rc_node_t *np;
2588 assert(sz == *sz_out);
2590 RC_NODE_PTR_GET_CHECK(np, npp);
2592 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2593 np = np->rn_cchain[0];
2594 RC_NODE_CHECK(np);
2597 switch (answertype) {
2598 case RP_ENTITY_NAME_NAME:
2599 if (np->rn_name == NULL)
2600 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2601 actual = strlcpy(buf, np->rn_name, sz);
2602 break;
2603 case RP_ENTITY_NAME_PGTYPE:
2604 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2605 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2606 actual = strlcpy(buf, np->rn_type, sz);
2607 break;
2608 case RP_ENTITY_NAME_PGFLAGS:
2609 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2610 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2611 actual = snprintf(buf, sz, "%d", np->rn_pgflags);
2612 break;
2613 case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
2614 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
2615 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2616 actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
2617 break;
2618 case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
2619 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
2620 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2621 actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
2622 break;
2623 case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
2624 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
2625 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2626 if (np->rn_snaplevel->rsl_instance == NULL)
2627 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2628 actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
2629 break;
2630 case RP_ENTITY_NAME_PGREADPROT:
2632 int ret;
2634 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2635 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2636 ret = rc_node_pg_check_read_protect(np);
2637 assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2638 switch (ret) {
2639 case REP_PROTOCOL_FAIL_PERMISSION_DENIED:
2640 actual = snprintf(buf, sz, "1");
2641 break;
2642 case REP_PROTOCOL_SUCCESS:
2643 actual = snprintf(buf, sz, "0");
2644 break;
2645 default:
2646 return (ret);
2648 break;
2650 default:
2651 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
2653 if (actual >= sz)
2654 return (REP_PROTOCOL_FAIL_TRUNCATED);
2656 *sz_out = actual;
2657 return (REP_PROTOCOL_SUCCESS);
2661 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
2663 rc_node_t *np;
2665 RC_NODE_PTR_GET_CHECK(np, npp);
2667 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
2668 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2670 *out = np->rn_valtype;
2672 return (REP_PROTOCOL_SUCCESS);
2676 * Get np's parent. If np is deleted, returns _DELETED. Otherwise puts a hold
2677 * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
2679 static int
2680 rc_node_parent(rc_node_t *np, rc_node_t **out)
2682 rc_node_t *pnp;
2683 rc_node_t *np_orig;
2685 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2686 RC_NODE_CHECK_AND_LOCK(np);
2687 } else {
2688 np = np->rn_cchain[0];
2689 RC_NODE_CHECK_AND_LOCK(np);
2692 np_orig = np;
2693 rc_node_hold_locked(np); /* simplifies the remainder */
2695 for (;;) {
2696 if (!rc_node_wait_flag(np,
2697 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
2698 rc_node_rele_locked(np);
2699 return (REP_PROTOCOL_FAIL_DELETED);
2702 if (!(np->rn_flags & RC_NODE_OLD))
2703 break;
2705 rc_node_rele_locked(np);
2706 np = cache_lookup(&np_orig->rn_id);
2707 assert(np != np_orig);
2709 if (np == NULL)
2710 goto deleted;
2711 (void) pthread_mutex_lock(&np->rn_lock);
2714 /* guaranteed to succeed without dropping the lock */
2715 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2716 (void) pthread_mutex_unlock(&np->rn_lock);
2717 *out = NULL;
2718 rc_node_rele(np);
2719 return (REP_PROTOCOL_FAIL_DELETED);
2722 assert(np->rn_parent != NULL);
2723 pnp = np->rn_parent;
2724 (void) pthread_mutex_unlock(&np->rn_lock);
2726 (void) pthread_mutex_lock(&pnp->rn_lock);
2727 (void) pthread_mutex_lock(&np->rn_lock);
2728 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2729 (void) pthread_mutex_unlock(&np->rn_lock);
2731 rc_node_hold_locked(pnp);
2733 (void) pthread_mutex_unlock(&pnp->rn_lock);
2735 rc_node_rele(np);
2736 *out = pnp;
2737 return (REP_PROTOCOL_SUCCESS);
2739 deleted:
2740 rc_node_rele(np);
2741 return (REP_PROTOCOL_FAIL_DELETED);
2745 * Fails with
2746 * _NOT_SET
2747 * _DELETED
2749 static int
2750 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
2752 rc_node_t *np;
2754 RC_NODE_PTR_GET_CHECK(np, npp);
2756 return (rc_node_parent(np, out));
2760 * Fails with
2761 * _NOT_SET - npp is not set
2762 * _DELETED - the node npp pointed at has been deleted
2763 * _TYPE_MISMATCH - npp's node's parent is not of type type
2765 * If npp points to a scope, can also fail with
2766 * _NOT_FOUND - scope has no parent
2769 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
2771 rc_node_t *pnp;
2772 int rc;
2774 if (npp->rnp_node != NULL &&
2775 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
2776 return (rc_scope_parent_scope(npp, type, out));
2778 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
2779 rc_node_clear(out, 0);
2780 return (rc);
2783 if (type != pnp->rn_id.rl_type) {
2784 rc_node_rele(pnp);
2785 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2788 rc_node_assign(out, pnp);
2789 rc_node_rele(pnp);
2791 return (REP_PROTOCOL_SUCCESS);
2795 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
2797 rc_node_t *pnp;
2798 int rc;
2800 if (npp->rnp_node != NULL &&
2801 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
2802 *type_out = REP_PROTOCOL_ENTITY_SCOPE;
2803 return (REP_PROTOCOL_SUCCESS);
2806 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
2807 return (rc);
2809 *type_out = pnp->rn_id.rl_type;
2811 rc_node_rele(pnp);
2813 return (REP_PROTOCOL_SUCCESS);
2817 * Fails with
2818 * _INVALID_TYPE - type is invalid
2819 * _TYPE_MISMATCH - np doesn't carry children of type type
2820 * _DELETED - np has been deleted
2821 * _NOT_FOUND - no child with that name/type combo found
2822 * _NO_RESOURCES
2823 * _BACKEND_ACCESS
2826 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
2827 rc_node_ptr_t *outp)
2829 rc_node_t *np, *cp;
2830 rc_node_t *child = NULL;
2831 int ret, idx;
2833 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
2834 if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
2835 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2836 ret = rc_node_find_named_child(np, name, type, &child);
2837 } else {
2838 (void) pthread_mutex_unlock(&np->rn_lock);
2839 ret = REP_PROTOCOL_SUCCESS;
2840 for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
2841 cp = np->rn_cchain[idx];
2842 if (cp == NULL)
2843 break;
2844 RC_NODE_CHECK_AND_LOCK(cp);
2845 ret = rc_node_find_named_child(cp, name, type,
2846 &child);
2847 (void) pthread_mutex_unlock(&cp->rn_lock);
2849 * loop only if we succeeded, but no child of
2850 * the correct name was found.
2852 if (ret != REP_PROTOCOL_SUCCESS ||
2853 child != NULL)
2854 break;
2856 (void) pthread_mutex_lock(&np->rn_lock);
2859 (void) pthread_mutex_unlock(&np->rn_lock);
2861 if (ret == REP_PROTOCOL_SUCCESS) {
2862 rc_node_assign(outp, child);
2863 if (child != NULL)
2864 rc_node_rele(child);
2865 else
2866 ret = REP_PROTOCOL_FAIL_NOT_FOUND;
2867 } else {
2868 rc_node_assign(outp, NULL);
2870 return (ret);
2874 rc_node_update(rc_node_ptr_t *npp)
2876 cache_bucket_t *bp;
2877 rc_node_t *np = npp->rnp_node;
2878 rc_node_t *nnp;
2879 rc_node_t *cpg = NULL;
2881 if (np != NULL &&
2882 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2884 * If we're updating a composed property group, actually
2885 * update the top-level property group & return the
2886 * appropriate value. But leave *nnp pointing at us.
2888 cpg = np;
2889 np = np->rn_cchain[0];
2892 RC_NODE_CHECK(np);
2894 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
2895 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
2896 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
2898 for (;;) {
2899 bp = cache_hold(np->rn_hash);
2900 nnp = cache_lookup_unlocked(bp, &np->rn_id);
2901 if (nnp == NULL) {
2902 cache_release(bp);
2903 rc_node_clear(npp, 1);
2904 return (REP_PROTOCOL_FAIL_DELETED);
2907 * grab the lock before dropping the cache bucket, so
2908 * that no one else can sneak in
2910 (void) pthread_mutex_lock(&nnp->rn_lock);
2911 cache_release(bp);
2913 if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
2914 !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
2915 break;
2917 rc_node_rele_locked(nnp);
2921 * If it is dead, we want to update it so that it will continue to
2922 * report being dead.
2924 if (nnp->rn_flags & RC_NODE_DEAD) {
2925 (void) pthread_mutex_unlock(&nnp->rn_lock);
2926 if (nnp != np && cpg == NULL)
2927 rc_node_assign(npp, nnp); /* updated */
2928 rc_node_rele(nnp);
2929 return (REP_PROTOCOL_FAIL_DELETED);
2932 assert(!(nnp->rn_flags & RC_NODE_OLD));
2933 (void) pthread_mutex_unlock(&nnp->rn_lock);
2935 if (nnp != np && cpg == NULL)
2936 rc_node_assign(npp, nnp); /* updated */
2938 rc_node_rele(nnp);
2940 return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
2944 * does a generic modification check, for creation, deletion, and snapshot
2945 * management only. Property group transactions have different checks.
2947 static perm_status_t
2948 rc_node_modify_permission_check(void)
2950 permcheck_t *pcp;
2951 perm_status_t granted = PERM_GRANTED;
2952 int rc;
2954 #ifdef NATIVE_BUILD
2955 if (!client_is_privileged()) {
2956 granted = PERM_DENIED;
2958 return (granted);
2959 #else
2960 if (is_main_repository == 0)
2961 return (PERM_GRANTED);
2962 pcp = pc_create();
2963 if (pcp != NULL) {
2964 rc = perm_add_enabling(pcp, AUTH_MODIFY);
2966 if (rc == REP_PROTOCOL_SUCCESS) {
2967 granted = perm_granted(pcp);
2968 } else {
2969 granted = PERM_FAIL;
2972 pc_free(pcp);
2973 } else {
2974 granted = PERM_FAIL;
2977 return (granted);
2978 #endif /* NATIVE_BUILD */
2982 * Return a pointer to a string containing all the values of the command
2983 * specified by cmd_no with each value enclosed in quotes. It is up to the
2984 * caller to free the memory at the returned pointer.
2986 static char *
2987 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no)
2989 const char *cp;
2990 const char *cur_value;
2991 size_t byte_count = 0;
2992 uint32_t i;
2993 uint32_t nvalues;
2994 size_t str_size = 0;
2995 char *values = NULL;
2996 char *vp;
2998 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
2999 return (NULL);
3001 * First determine the size of the buffer that we will need. We
3002 * will represent each property value surrounded by quotes with a
3003 * space separating the values. Thus, we need to find the total
3004 * size of all the value strings and add 3 for each value.
3006 * There is one catch, though. We need to escape any internal
3007 * quote marks in the values. So for each quote in the value we
3008 * need to add another byte to the buffer size.
3010 for (i = 0; i < nvalues; i++) {
3011 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3012 REP_PROTOCOL_SUCCESS)
3013 return (NULL);
3014 for (cp = cur_value; *cp != 0; cp++) {
3015 byte_count += (*cp == '"') ? 2 : 1;
3017 byte_count += 3; /* surrounding quotes & space */
3019 byte_count++; /* nul terminator */
3020 values = malloc(byte_count);
3021 if (values == NULL)
3022 return (NULL);
3023 *values = 0;
3025 /* Now build up the string of values. */
3026 for (i = 0; i < nvalues; i++) {
3027 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3028 REP_PROTOCOL_SUCCESS) {
3029 free(values);
3030 return (NULL);
3032 (void) strlcat(values, "\"", byte_count);
3033 for (cp = cur_value, vp = values + strlen(values);
3034 *cp != 0; cp++) {
3035 if (*cp == '"') {
3036 *vp++ = '\\';
3037 *vp++ = '"';
3038 } else {
3039 *vp++ = *cp;
3042 *vp = 0;
3043 str_size = strlcat(values, "\" ", byte_count);
3044 assert(str_size < byte_count);
3046 if (str_size > 0)
3047 values[str_size - 1] = 0; /* get rid of trailing space */
3048 return (values);
3052 * Fails with
3053 * _DELETED - node has been deleted
3054 * _NOT_SET - npp is reset
3055 * _NOT_APPLICABLE - type is _PROPERTYGRP
3056 * _INVALID_TYPE - node is corrupt or type is invalid
3057 * _TYPE_MISMATCH - node cannot have children of type type
3058 * _BAD_REQUEST - name is invalid
3059 * cannot create children for this type of node
3060 * _NO_RESOURCES - out of memory, or could not allocate new id
3061 * _PERMISSION_DENIED
3062 * _BACKEND_ACCESS
3063 * _BACKEND_READONLY
3064 * _EXISTS - child already exists
3067 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
3068 rc_node_ptr_t *cpp)
3070 rc_node_t *np;
3071 rc_node_t *cp = NULL;
3072 int rc;
3073 perm_status_t perm_rc;
3074 size_t sz_out;
3075 char fmri[REP_PROTOCOL_FMRI_LEN];
3077 rc_node_clear(cpp, 0);
3080 * rc_node_modify_permission_check() must be called before the node
3081 * is locked. This is because the library functions that check
3082 * authorizations can trigger calls back into configd.
3084 perm_rc = rc_node_modify_permission_check();
3085 switch (perm_rc) {
3086 case PERM_GRANTED:
3087 break;
3088 case PERM_GONE:
3089 case PERM_DENIED:
3090 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3091 case PERM_FAIL:
3092 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
3093 default:
3094 bad_error(rc_node_modify_permission_check, perm_rc);
3097 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, NULL);
3100 * there is a separate interface for creating property groups
3102 if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3103 (void) pthread_mutex_unlock(&np->rn_lock);
3104 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3107 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3108 (void) pthread_mutex_unlock(&np->rn_lock);
3109 np = np->rn_cchain[0];
3110 if ((rc = rc_node_check_and_lock(np)) != REP_PROTOCOL_SUCCESS) {
3111 return (rc);
3115 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
3116 REP_PROTOCOL_SUCCESS) {
3117 (void) pthread_mutex_unlock(&np->rn_lock);
3118 return (rc);
3120 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
3121 (void) pthread_mutex_unlock(&np->rn_lock);
3122 return (rc);
3125 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
3126 name, type)) != REP_PROTOCOL_SUCCESS) {
3127 (void) pthread_mutex_unlock(&np->rn_lock);
3128 return (rc);
3131 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
3132 NULL);
3133 (void) pthread_mutex_unlock(&np->rn_lock);
3135 rc = object_create(np, type, name, &cp);
3136 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3138 if (rc == REP_PROTOCOL_SUCCESS) {
3139 rc_node_assign(cpp, cp);
3140 rc_node_rele(cp);
3143 (void) pthread_mutex_lock(&np->rn_lock);
3144 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
3145 (void) pthread_mutex_unlock(&np->rn_lock);
3147 return (rc);
3151 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
3152 const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
3154 rc_node_t *np;
3155 rc_node_t *cp;
3156 int rc;
3157 permcheck_t *pcp;
3158 perm_status_t granted;
3159 char fmri[REP_PROTOCOL_FMRI_LEN];
3160 size_t sz_out;
3162 rc_node_clear(cpp, 0);
3164 /* verify flags is valid */
3165 if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
3166 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3168 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
3170 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3171 rc_node_rele(np);
3172 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3175 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
3176 REP_PROTOCOL_SUCCESS) {
3177 rc_node_rele(np);
3178 return (rc);
3180 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
3181 (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
3182 rc_node_rele(np);
3183 return (rc);
3186 #ifdef NATIVE_BUILD
3187 if (!client_is_privileged()) {
3188 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
3190 #else
3191 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
3192 name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) {
3193 rc_node_rele(np);
3194 return (rc);
3197 if (is_main_repository) {
3198 /* Must have .smf.modify or smf.modify.<type> authorization */
3199 pcp = pc_create();
3200 if (pcp != NULL) {
3201 rc = perm_add_enabling(pcp, AUTH_MODIFY);
3203 if (rc == REP_PROTOCOL_SUCCESS) {
3204 const char * const auth =
3205 perm_auth_for_pgtype(pgtype);
3207 if (auth != NULL)
3208 rc = perm_add_enabling(pcp, auth);
3212 * .manage or $action_authorization can be used to
3213 * create the actions pg and the general_ovr pg.
3215 if (rc == REP_PROTOCOL_SUCCESS &&
3216 (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
3217 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
3218 ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
3219 strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
3220 (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
3221 strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
3222 rc = perm_add_enabling(pcp, AUTH_MANAGE);
3224 if (rc == REP_PROTOCOL_SUCCESS)
3225 rc = perm_add_inst_action_auth(pcp, np);
3228 if (rc == REP_PROTOCOL_SUCCESS) {
3229 granted = perm_granted(pcp);
3231 rc = map_granted_status(granted, pcp);
3232 if (granted == PERM_GONE) {
3233 pc_free(pcp);
3234 rc_node_rele(np);
3235 return (rc);
3239 pc_free(pcp);
3240 } else {
3241 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
3244 } else {
3245 rc = REP_PROTOCOL_SUCCESS;
3247 #endif /* NATIVE_BUILD */
3250 if (rc != REP_PROTOCOL_SUCCESS) {
3251 rc_node_rele(np);
3252 return (rc);
3255 (void) pthread_mutex_lock(&np->rn_lock);
3256 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
3257 NULL);
3258 (void) pthread_mutex_unlock(&np->rn_lock);
3260 rc = object_create_pg(np, type, name, pgtype, flags, &cp);
3262 if (rc == REP_PROTOCOL_SUCCESS) {
3263 rc_node_assign(cpp, cp);
3264 rc_node_rele(cp);
3267 (void) pthread_mutex_lock(&np->rn_lock);
3268 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
3269 (void) pthread_mutex_unlock(&np->rn_lock);
3271 return (rc);
3274 static void
3275 rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
3277 assert(MUTEX_HELD(&rc_pg_notify_lock));
3279 if (pnp->rnpn_pg != NULL) {
3280 uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
3281 (void) close(pnp->rnpn_fd);
3283 pnp->rnpn_pg = NULL;
3284 pnp->rnpn_fd = -1;
3285 } else {
3286 assert(pnp->rnpn_fd == -1);
3290 static void
3291 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
3293 rc_node_t *svc = NULL;
3294 rc_node_t *inst = NULL;
3295 rc_node_t *pg = NULL;
3296 rc_node_t *np = np_arg;
3297 rc_node_t *nnp;
3299 while (svc == NULL) {
3300 (void) pthread_mutex_lock(&np->rn_lock);
3301 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3302 (void) pthread_mutex_unlock(&np->rn_lock);
3303 goto cleanup;
3305 nnp = np->rn_parent;
3306 rc_node_hold_locked(np); /* hold it in place */
3308 switch (np->rn_id.rl_type) {
3309 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
3310 assert(pg == NULL);
3311 pg = np;
3312 break;
3313 case REP_PROTOCOL_ENTITY_INSTANCE:
3314 assert(inst == NULL);
3315 inst = np;
3316 break;
3317 case REP_PROTOCOL_ENTITY_SERVICE:
3318 assert(svc == NULL);
3319 svc = np;
3320 break;
3321 default:
3322 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3323 rc_node_rele_locked(np);
3324 goto cleanup;
3327 (void) pthread_mutex_unlock(&np->rn_lock);
3329 np = nnp;
3330 if (np == NULL)
3331 goto cleanup;
3334 rc_notify_deletion(ndp,
3335 svc->rn_name,
3336 inst != NULL ? inst->rn_name : NULL,
3337 pg != NULL ? pg->rn_name : NULL);
3339 ndp = NULL;
3341 cleanup:
3342 if (ndp != NULL)
3343 uu_free(ndp);
3345 for (;;) {
3346 if (svc != NULL) {
3347 np = svc;
3348 svc = NULL;
3349 } else if (inst != NULL) {
3350 np = inst;
3351 inst = NULL;
3352 } else if (pg != NULL) {
3353 np = pg;
3354 pg = NULL;
3355 } else
3356 break;
3358 (void) pthread_mutex_lock(&np->rn_lock);
3359 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3360 rc_node_rele_locked(np);
3365 * Hold RC_NODE_DYING_FLAGS on np's descendents. If andformer is true, do
3366 * the same down the rn_former chain.
3368 static void
3369 rc_node_delete_hold(rc_node_t *np, int andformer)
3371 rc_node_t *cp;
3373 again:
3374 assert(MUTEX_HELD(&np->rn_lock));
3375 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
3377 for (cp = uu_list_first(np->rn_children); cp != NULL;
3378 cp = uu_list_next(np->rn_children, cp)) {
3379 (void) pthread_mutex_lock(&cp->rn_lock);
3380 (void) pthread_mutex_unlock(&np->rn_lock);
3381 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
3383 * already marked as dead -- can't happen, since that
3384 * would require setting RC_NODE_CHILDREN_CHANGING
3385 * in np, and we're holding that...
3387 abort();
3389 rc_node_delete_hold(cp, andformer); /* recurse, drop lock */
3391 (void) pthread_mutex_lock(&np->rn_lock);
3393 if (andformer && (cp = np->rn_former) != NULL) {
3394 (void) pthread_mutex_lock(&cp->rn_lock);
3395 (void) pthread_mutex_unlock(&np->rn_lock);
3396 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
3397 abort(); /* can't happen, see above */
3398 np = cp;
3399 goto again; /* tail-recurse down rn_former */
3401 (void) pthread_mutex_unlock(&np->rn_lock);
3405 * N.B.: this function drops np->rn_lock on the way out.
3407 static void
3408 rc_node_delete_rele(rc_node_t *np, int andformer)
3410 rc_node_t *cp;
3412 again:
3413 assert(MUTEX_HELD(&np->rn_lock));
3414 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
3416 for (cp = uu_list_first(np->rn_children); cp != NULL;
3417 cp = uu_list_next(np->rn_children, cp)) {
3418 (void) pthread_mutex_lock(&cp->rn_lock);
3419 (void) pthread_mutex_unlock(&np->rn_lock);
3420 rc_node_delete_rele(cp, andformer); /* recurse, drop lock */
3421 (void) pthread_mutex_lock(&np->rn_lock);
3423 if (andformer && (cp = np->rn_former) != NULL) {
3424 (void) pthread_mutex_lock(&cp->rn_lock);
3425 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
3426 (void) pthread_mutex_unlock(&np->rn_lock);
3428 np = cp;
3429 goto again; /* tail-recurse down rn_former */
3431 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
3432 (void) pthread_mutex_unlock(&np->rn_lock);
3435 static void
3436 rc_node_finish_delete(rc_node_t *cp)
3438 cache_bucket_t *bp;
3439 rc_node_pg_notify_t *pnp;
3441 assert(MUTEX_HELD(&cp->rn_lock));
3443 if (!(cp->rn_flags & RC_NODE_OLD)) {
3444 assert(cp->rn_flags & RC_NODE_IN_PARENT);
3445 if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
3446 abort(); /* can't happen, see above */
3448 cp->rn_flags &= ~RC_NODE_IN_PARENT;
3449 cp->rn_parent = NULL;
3450 rc_node_free_fmri(cp);
3453 cp->rn_flags |= RC_NODE_DEAD;
3456 * If this node is not out-dated, we need to remove it from
3457 * the notify list and cache hash table.
3459 if (!(cp->rn_flags & RC_NODE_OLD)) {
3460 assert(cp->rn_refs > 0); /* can't go away yet */
3461 (void) pthread_mutex_unlock(&cp->rn_lock);
3463 (void) pthread_mutex_lock(&rc_pg_notify_lock);
3464 while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
3465 rc_pg_notify_fire(pnp);
3466 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
3467 rc_notify_remove_node(cp);
3469 bp = cache_hold(cp->rn_hash);
3470 (void) pthread_mutex_lock(&cp->rn_lock);
3471 cache_remove_unlocked(bp, cp);
3472 cache_release(bp);
3477 * For each child, call rc_node_finish_delete() and recurse. If andformer
3478 * is set, also recurse down rn_former. Finally release np, which might
3479 * free it.
3481 static void
3482 rc_node_delete_children(rc_node_t *np, int andformer)
3484 rc_node_t *cp;
3486 again:
3487 assert(np->rn_refs > 0);
3488 assert(MUTEX_HELD(&np->rn_lock));
3489 assert(np->rn_flags & RC_NODE_DEAD);
3491 while ((cp = uu_list_first(np->rn_children)) != NULL) {
3492 uu_list_remove(np->rn_children, cp);
3493 (void) pthread_mutex_lock(&cp->rn_lock);
3494 (void) pthread_mutex_unlock(&np->rn_lock);
3495 rc_node_hold_locked(cp); /* hold while we recurse */
3496 rc_node_finish_delete(cp);
3497 rc_node_delete_children(cp, andformer); /* drops lock + ref */
3498 (void) pthread_mutex_lock(&np->rn_lock);
3502 * When we drop cp's lock, all the children will be gone, so we
3503 * can release DYING_FLAGS.
3505 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
3506 if (andformer && (cp = np->rn_former) != NULL) {
3507 np->rn_former = NULL; /* unlink */
3508 (void) pthread_mutex_lock(&cp->rn_lock);
3511 * Register the ephemeral reference created by reading
3512 * np->rn_former into cp. Note that the persistent
3513 * reference (np->rn_former) is locked because we haven't
3514 * dropped np's lock since we dropped its RC_NODE_IN_TX
3515 * (via RC_NODE_DYING_FLAGS).
3517 rc_node_hold_ephemeral_locked(cp);
3519 (void) pthread_mutex_unlock(&np->rn_lock);
3520 cp->rn_flags &= ~RC_NODE_ON_FORMER;
3522 rc_node_hold_locked(cp); /* hold while we loop */
3524 rc_node_finish_delete(cp);
3526 rc_node_rele(np); /* drop the old reference */
3528 np = cp;
3529 goto again; /* tail-recurse down rn_former */
3531 rc_node_rele_locked(np);
3535 * The last client or child reference to np, which must be either
3536 * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed. We'll destroy any
3537 * remaining references (e.g., rn_former) and call rc_node_destroy() to
3538 * free np.
3540 static void
3541 rc_node_no_client_refs(rc_node_t *np)
3543 int unrefed;
3544 rc_node_t *current, *cur;
3546 assert(MUTEX_HELD(&np->rn_lock));
3547 assert(np->rn_refs == 0);
3548 assert(np->rn_other_refs == 0);
3549 assert(np->rn_other_refs_held == 0);
3551 if (np->rn_flags & RC_NODE_DEAD) {
3553 * The node is DEAD, so the deletion code should have
3554 * destroyed all rn_children or rn_former references.
3555 * Since the last client or child reference has been
3556 * destroyed, we're free to destroy np. Unless another
3557 * thread has an ephemeral reference, in which case we'll
3558 * pass the buck.
3560 if (np->rn_erefs > 1) {
3561 --np->rn_erefs;
3562 NODE_UNLOCK(np);
3563 return;
3566 (void) pthread_mutex_unlock(&np->rn_lock);
3567 rc_node_destroy(np);
3568 return;
3571 /* We only collect DEAD and OLD nodes, thank you. */
3572 assert(np->rn_flags & RC_NODE_OLD);
3575 * RC_NODE_UNREFED keeps multiple threads from processing OLD
3576 * nodes. But it's vulnerable to unfriendly scheduling, so full
3577 * use of rn_erefs should supersede it someday.
3579 if (np->rn_flags & RC_NODE_UNREFED) {
3580 (void) pthread_mutex_unlock(&np->rn_lock);
3581 return;
3583 np->rn_flags |= RC_NODE_UNREFED;
3586 * Now we'll remove the node from the rn_former chain and take its
3587 * DYING_FLAGS.
3591 * Since this node is OLD, it should be on an rn_former chain. To
3592 * remove it, we must find the current in-hash object and grab its
3593 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
3596 (void) pthread_mutex_unlock(&np->rn_lock);
3598 for (;;) {
3599 current = cache_lookup(&np->rn_id);
3601 if (current == NULL) {
3602 (void) pthread_mutex_lock(&np->rn_lock);
3604 if (np->rn_flags & RC_NODE_DEAD)
3605 goto died;
3608 * We are trying to unreference this node, but the
3609 * owner of the former list does not exist. It must
3610 * be the case that another thread is deleting this
3611 * entire sub-branch, but has not yet reached us.
3612 * We will in short order be deleted.
3614 np->rn_flags &= ~RC_NODE_UNREFED;
3615 (void) pthread_mutex_unlock(&np->rn_lock);
3616 return;
3619 if (current == np) {
3621 * no longer unreferenced
3623 (void) pthread_mutex_lock(&np->rn_lock);
3624 np->rn_flags &= ~RC_NODE_UNREFED;
3625 /* held in cache_lookup() */
3626 rc_node_rele_locked(np);
3627 return;
3630 (void) pthread_mutex_lock(&current->rn_lock);
3631 if (current->rn_flags & RC_NODE_OLD) {
3633 * current has been replaced since we looked it
3634 * up. Try again.
3636 /* held in cache_lookup() */
3637 rc_node_rele_locked(current);
3638 continue;
3641 if (!rc_node_hold_flag(current, RC_NODE_IN_TX)) {
3643 * current has been deleted since we looked it up. Try
3644 * again.
3646 /* held in cache_lookup() */
3647 rc_node_rele_locked(current);
3648 continue;
3652 * rc_node_hold_flag() might have dropped current's lock, so
3653 * check OLD again.
3655 if (!(current->rn_flags & RC_NODE_OLD)) {
3656 /* Not old. Stop looping. */
3657 (void) pthread_mutex_unlock(&current->rn_lock);
3658 break;
3661 rc_node_rele_flag(current, RC_NODE_IN_TX);
3662 rc_node_rele_locked(current);
3665 /* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
3666 (void) pthread_mutex_lock(&np->rn_lock);
3669 * While we didn't have the lock, a thread may have added
3670 * a reference or changed the flags.
3672 if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
3673 np->rn_refs != 0 || np->rn_other_refs != 0 ||
3674 np->rn_other_refs_held != 0) {
3675 np->rn_flags &= ~RC_NODE_UNREFED;
3677 (void) pthread_mutex_lock(&current->rn_lock);
3678 rc_node_rele_flag(current, RC_NODE_IN_TX);
3679 /* held by cache_lookup() */
3680 rc_node_rele_locked(current);
3681 return;
3684 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
3686 * Someone deleted the node while we were waiting for
3687 * DYING_FLAGS. Undo the modifications to current.
3689 (void) pthread_mutex_unlock(&np->rn_lock);
3691 rc_node_rele_flag(current, RC_NODE_IN_TX);
3692 /* held by cache_lookup() */
3693 rc_node_rele_locked(current);
3695 (void) pthread_mutex_lock(&np->rn_lock);
3696 goto died;
3699 /* Take RC_NODE_DYING_FLAGS on np's descendents. */
3700 rc_node_delete_hold(np, 0); /* drops np->rn_lock */
3702 /* Mark np DEAD. This requires the lock. */
3703 (void) pthread_mutex_lock(&np->rn_lock);
3705 /* Recheck for new references. */
3706 if (!(np->rn_flags & RC_NODE_OLD) ||
3707 np->rn_refs != 0 || np->rn_other_refs != 0 ||
3708 np->rn_other_refs_held != 0) {
3709 np->rn_flags &= ~RC_NODE_UNREFED;
3710 rc_node_delete_rele(np, 0); /* drops np's lock */
3712 (void) pthread_mutex_lock(&current->rn_lock);
3713 rc_node_rele_flag(current, RC_NODE_IN_TX);
3714 /* held by cache_lookup() */
3715 rc_node_rele_locked(current);
3716 return;
3719 np->rn_flags |= RC_NODE_DEAD;
3722 * Delete the children. This calls rc_node_rele_locked() on np at
3723 * the end, so add a reference to keep the count from going
3724 * negative. It will recurse with RC_NODE_DEAD set, so we'll call
3725 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
3726 * shouldn't actually free() np.
3728 rc_node_hold_locked(np);
3729 rc_node_delete_children(np, 0); /* unlocks np */
3731 /* Remove np from current's rn_former chain. */
3732 (void) pthread_mutex_lock(&current->rn_lock);
3733 for (cur = current; cur != NULL && cur->rn_former != np;
3734 cur = cur->rn_former)
3736 assert(cur != NULL && cur != np);
3738 cur->rn_former = np->rn_former;
3739 np->rn_former = NULL;
3741 rc_node_rele_flag(current, RC_NODE_IN_TX);
3742 /* held by cache_lookup() */
3743 rc_node_rele_locked(current);
3745 /* Clear ON_FORMER and UNREFED, and destroy. */
3746 (void) pthread_mutex_lock(&np->rn_lock);
3747 assert(np->rn_flags & RC_NODE_ON_FORMER);
3748 np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
3750 if (np->rn_erefs > 1) {
3751 /* Still referenced. Stay execution. */
3752 --np->rn_erefs;
3753 NODE_UNLOCK(np);
3754 return;
3757 (void) pthread_mutex_unlock(&np->rn_lock);
3758 rc_node_destroy(np);
3759 return;
3761 died:
3763 * Another thread marked np DEAD. If there still aren't any
3764 * persistent references, destroy the node.
3766 np->rn_flags &= ~RC_NODE_UNREFED;
3768 unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
3769 np->rn_other_refs_held == 0);
3771 if (np->rn_erefs > 0)
3772 --np->rn_erefs;
3774 if (unrefed && np->rn_erefs > 0) {
3775 NODE_UNLOCK(np);
3776 return;
3779 (void) pthread_mutex_unlock(&np->rn_lock);
3781 if (unrefed)
3782 rc_node_destroy(np);
3785 * Fails with
3786 * _NOT_SET
3787 * _DELETED
3788 * _BAD_REQUEST
3789 * _PERMISSION_DENIED
3790 * _NO_RESOURCES
3791 * _TRUNCATED
3792 * and whatever object_delete() fails with.
3795 rc_node_delete(rc_node_ptr_t *npp)
3797 rc_node_t *np, *np_orig;
3798 rc_node_t *pp = NULL;
3799 int rc;
3800 rc_node_pg_notify_t *pnp;
3801 cache_bucket_t *bp;
3802 rc_notify_delete_t *ndp;
3803 permcheck_t *pcp;
3804 int granted;
3805 size_t sz_out;
3807 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3809 switch (np->rn_id.rl_type) {
3810 case REP_PROTOCOL_ENTITY_SERVICE:
3811 break;
3812 case REP_PROTOCOL_ENTITY_INSTANCE:
3813 break;
3814 case REP_PROTOCOL_ENTITY_SNAPSHOT:
3815 break; /* deletable */
3817 case REP_PROTOCOL_ENTITY_SCOPE:
3818 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
3819 /* Scopes and snaplevels are indelible. */
3820 (void) pthread_mutex_unlock(&np->rn_lock);
3821 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3823 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
3824 (void) pthread_mutex_unlock(&np->rn_lock);
3825 np = np->rn_cchain[0];
3826 RC_NODE_CHECK_AND_LOCK(np);
3827 break;
3829 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
3830 if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) {
3831 break;
3834 /* Snapshot property groups are indelible. */
3835 (void) pthread_mutex_unlock(&np->rn_lock);
3836 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3838 case REP_PROTOCOL_ENTITY_PROPERTY:
3839 (void) pthread_mutex_unlock(&np->rn_lock);
3840 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3842 default:
3843 assert(0);
3844 abort();
3845 break;
3848 np_orig = np;
3849 rc_node_hold_locked(np); /* simplifies rest of the code */
3851 again:
3853 * The following loop is to deal with the fact that snapshots and
3854 * property groups are moving targets -- changes to them result
3855 * in a new "child" node. Since we can only delete from the top node,
3856 * we have to loop until we have a non-RC_NODE_OLD version.
3858 for (;;) {
3859 if (!rc_node_wait_flag(np,
3860 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
3861 rc_node_rele_locked(np);
3862 rc = REP_PROTOCOL_FAIL_DELETED;
3863 goto cleanout;
3866 if (np->rn_flags & RC_NODE_OLD) {
3867 rc_node_rele_locked(np);
3868 np = cache_lookup(&np_orig->rn_id);
3869 assert(np != np_orig);
3871 if (np == NULL) {
3872 rc = REP_PROTOCOL_FAIL_DELETED;
3873 goto fail;
3875 (void) pthread_mutex_lock(&np->rn_lock);
3876 continue;
3879 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3880 rc_node_rele_locked(np);
3881 rc_node_clear(npp, 1);
3882 rc = REP_PROTOCOL_FAIL_DELETED;
3886 * Mark our parent as children changing. this call drops our
3887 * lock and the RC_NODE_USING_PARENT flag, and returns with
3888 * pp's lock held
3890 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
3891 if (pp == NULL) {
3892 /* our parent is gone, we're going next... */
3893 rc_node_rele(np);
3895 rc_node_clear(npp, 1);
3896 rc = REP_PROTOCOL_FAIL_DELETED;
3897 goto cleanout;
3900 rc_node_hold_locked(pp); /* hold for later */
3901 (void) pthread_mutex_unlock(&pp->rn_lock);
3903 (void) pthread_mutex_lock(&np->rn_lock);
3904 if (!(np->rn_flags & RC_NODE_OLD))
3905 break; /* not old -- we're done */
3907 (void) pthread_mutex_unlock(&np->rn_lock);
3908 (void) pthread_mutex_lock(&pp->rn_lock);
3909 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
3910 rc_node_rele_locked(pp);
3911 (void) pthread_mutex_lock(&np->rn_lock);
3912 continue; /* loop around and try again */
3915 * Everyone out of the pool -- we grab everything but
3916 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
3917 * any changes from occurring while we are attempting to
3918 * delete the node.
3920 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
3921 (void) pthread_mutex_unlock(&np->rn_lock);
3922 rc = REP_PROTOCOL_FAIL_DELETED;
3923 goto fail;
3926 assert(!(np->rn_flags & RC_NODE_OLD));
3928 #ifdef NATIVE_BUILD
3929 if (!client_is_privileged())
3930 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
3931 else
3932 rc = REP_PROTOCOL_SUCCESS;
3933 #else
3934 if (is_main_repository) {
3935 /* permission check */
3936 (void) pthread_mutex_unlock(&np->rn_lock);
3937 pcp = pc_create();
3938 if (pcp != NULL) {
3939 rc = perm_add_enabling(pcp, AUTH_MODIFY);
3941 /* add .smf.modify.<type> for pgs. */
3942 if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
3943 REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3944 const char * const auth =
3945 perm_auth_for_pgtype(np->rn_type);
3947 if (auth != NULL)
3948 rc = perm_add_enabling(pcp, auth);
3951 if (rc == REP_PROTOCOL_SUCCESS) {
3952 granted = perm_granted(pcp);
3954 rc = map_granted_status(granted, pcp);
3955 if (granted == PERM_GONE) {
3956 pc_free(pcp);
3957 rc_node_rele_flag(np,
3958 RC_NODE_DYING_FLAGS);
3959 return (rc);
3963 pc_free(pcp);
3964 } else {
3965 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
3968 (void) pthread_mutex_lock(&np->rn_lock);
3969 } else {
3970 rc = REP_PROTOCOL_SUCCESS;
3972 #endif /* NATIVE_BUILD */
3974 if (rc != REP_PROTOCOL_SUCCESS) {
3975 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
3976 (void) pthread_mutex_unlock(&np->rn_lock);
3977 goto fail;
3980 ndp = uu_zalloc(sizeof (*ndp));
3981 if (ndp == NULL) {
3982 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
3983 (void) pthread_mutex_unlock(&np->rn_lock);
3984 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
3985 goto fail;
3988 rc_node_delete_hold(np, 1); /* hold entire subgraph, drop lock */
3990 rc = object_delete(np);
3992 if (rc != REP_PROTOCOL_SUCCESS) {
3993 (void) pthread_mutex_lock(&np->rn_lock);
3994 rc_node_delete_rele(np, 1); /* drops lock */
3995 uu_free(ndp);
3996 goto fail;
4000 * Now, delicately unlink and delete the object.
4002 * Create the delete notification, atomically remove
4003 * from the hash table and set the NODE_DEAD flag, and
4004 * remove from the parent's children list.
4006 rc_notify_node_delete(ndp, np); /* frees or uses ndp */
4008 bp = cache_hold(np->rn_hash);
4010 (void) pthread_mutex_lock(&np->rn_lock);
4011 cache_remove_unlocked(bp, np);
4012 cache_release(bp);
4014 np->rn_flags |= RC_NODE_DEAD;
4016 if (pp != NULL) {
4018 * Remove from pp's rn_children. This requires pp's lock,
4019 * so we must drop np's lock to respect lock order.
4021 (void) pthread_mutex_unlock(&np->rn_lock);
4022 (void) pthread_mutex_lock(&pp->rn_lock);
4023 (void) pthread_mutex_lock(&np->rn_lock);
4025 uu_list_remove(pp->rn_children, np);
4027 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4029 (void) pthread_mutex_unlock(&pp->rn_lock);
4031 np->rn_flags &= ~RC_NODE_IN_PARENT;
4035 * finally, propagate death to our children (including marking
4036 * them DEAD), handle notifications, and release our hold.
4038 rc_node_hold_locked(np); /* hold for delete */
4039 rc_node_delete_children(np, 1); /* drops DYING_FLAGS, lock, ref */
4041 rc_node_clear(npp, 1);
4043 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4044 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
4045 rc_pg_notify_fire(pnp);
4046 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4047 rc_notify_remove_node(np);
4049 rc_node_rele(np);
4051 return (rc);
4053 fail:
4054 rc_node_rele(np);
4055 if (rc == REP_PROTOCOL_FAIL_DELETED)
4056 rc_node_clear(npp, 1);
4057 if (pp != NULL) {
4058 (void) pthread_mutex_lock(&pp->rn_lock);
4059 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4060 rc_node_rele_locked(pp); /* drop ref and lock */
4062 cleanout:
4063 return (rc);
4067 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
4069 rc_node_t *np;
4070 rc_node_t *cp, *pp;
4071 int res;
4073 rc_node_clear(cpp, 0);
4075 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4077 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
4078 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
4079 (void) pthread_mutex_unlock(&np->rn_lock);
4080 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4083 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
4084 if ((res = rc_node_fill_children(np,
4085 REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
4086 (void) pthread_mutex_unlock(&np->rn_lock);
4087 return (res);
4090 for (cp = uu_list_first(np->rn_children);
4091 cp != NULL;
4092 cp = uu_list_next(np->rn_children, cp)) {
4093 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
4094 continue;
4095 rc_node_hold(cp);
4096 break;
4099 (void) pthread_mutex_unlock(&np->rn_lock);
4100 } else {
4101 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4102 (void) pthread_mutex_unlock(&np->rn_lock);
4103 rc_node_clear(npp, 1);
4104 return (REP_PROTOCOL_FAIL_DELETED);
4108 * mark our parent as children changing. This call drops our
4109 * lock and the RC_NODE_USING_PARENT flag, and returns with
4110 * pp's lock held
4112 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
4113 if (pp == NULL) {
4114 /* our parent is gone, we're going next... */
4116 rc_node_clear(npp, 1);
4117 return (REP_PROTOCOL_FAIL_DELETED);
4121 * find the next snaplevel
4123 cp = np;
4124 while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
4125 cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
4128 /* it must match the snaplevel list */
4129 assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
4130 (cp != NULL && np->rn_snaplevel->rsl_next ==
4131 cp->rn_snaplevel));
4133 if (cp != NULL)
4134 rc_node_hold(cp);
4136 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4138 (void) pthread_mutex_unlock(&pp->rn_lock);
4141 rc_node_assign(cpp, cp);
4142 if (cp != NULL) {
4143 rc_node_rele(cp);
4145 return (REP_PROTOCOL_SUCCESS);
4147 return (REP_PROTOCOL_FAIL_NOT_FOUND);
4151 * This call takes a snapshot (np) and either:
4152 * an existing snapid (to be associated with np), or
4153 * a non-NULL parentp (from which a new snapshot is taken, and associated
4154 * with np)
4156 * To do the association, np is duplicated, the duplicate is made to
4157 * represent the new snapid, and np is replaced with the new rc_node_t on
4158 * np's parent's child list. np is placed on the new node's rn_former list,
4159 * and replaces np in cache_hash (so rc_node_update() will find the new one).
4161 * old_fmri and old_name point to the original snap shot's FMRI and name.
4163 * Fails with
4164 * _BAD_REQUEST
4165 * _BACKEND_READONLY
4166 * _DELETED
4167 * _NO_RESOURCES
4168 * _TRUNCATED
4169 * _TYPE_MISMATCH
4171 static int
4172 rc_attach_snapshot(
4173 rc_node_t *np,
4174 uint32_t snapid,
4175 rc_node_t *parentp,
4176 char *old_fmri,
4177 char *old_name)
4179 rc_node_t *np_orig;
4180 rc_node_t *nnp, *prev;
4181 rc_node_t *pp;
4182 int rc;
4183 size_t sz_out;
4184 perm_status_t granted;
4186 if (parentp == NULL) {
4187 assert(old_fmri != NULL);
4188 } else {
4189 assert(snapid == 0);
4191 assert(MUTEX_HELD(&np->rn_lock));
4193 np_orig = np;
4194 rc_node_hold_locked(np); /* simplifies the remainder */
4196 (void) pthread_mutex_unlock(&np->rn_lock);
4197 granted = rc_node_modify_permission_check();
4198 switch (granted) {
4199 case PERM_DENIED:
4200 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4201 rc_node_rele(np);
4202 goto cleanout;
4203 case PERM_GRANTED:
4204 break;
4205 case PERM_GONE:
4206 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4207 rc_node_rele(np);
4208 goto cleanout;
4209 case PERM_FAIL:
4210 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4211 rc_node_rele(np);
4212 goto cleanout;
4213 default:
4214 bad_error(rc_node_modify_permission_check, granted);
4216 (void) pthread_mutex_lock(&np->rn_lock);
4219 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
4220 * list from changing.
4222 for (;;) {
4223 if (!(np->rn_flags & RC_NODE_OLD)) {
4224 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4225 goto again;
4227 pp = rc_node_hold_parent_flag(np,
4228 RC_NODE_CHILDREN_CHANGING);
4230 (void) pthread_mutex_lock(&np->rn_lock);
4231 if (pp == NULL) {
4232 goto again;
4234 if (np->rn_flags & RC_NODE_OLD) {
4235 rc_node_rele_flag(pp,
4236 RC_NODE_CHILDREN_CHANGING);
4237 (void) pthread_mutex_unlock(&pp->rn_lock);
4238 goto again;
4240 (void) pthread_mutex_unlock(&pp->rn_lock);
4242 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
4244 * Can't happen, since we're holding our
4245 * parent's CHILDREN_CHANGING flag...
4247 abort();
4249 break; /* everything's ready */
4251 again:
4252 rc_node_rele_locked(np);
4253 np = cache_lookup(&np_orig->rn_id);
4255 if (np == NULL) {
4256 rc = REP_PROTOCOL_FAIL_DELETED;
4257 goto cleanout;
4260 (void) pthread_mutex_lock(&np->rn_lock);
4264 nnp = prev = NULL;
4265 if (parentp != NULL) {
4266 if (pp != parentp) {
4267 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
4268 goto fail;
4270 } else {
4272 * look for a former node with the snapid we need.
4274 if (np->rn_snapshot_id == snapid) {
4275 rc_node_rele_flag(np, RC_NODE_IN_TX);
4276 rc_node_rele_locked(np);
4278 (void) pthread_mutex_lock(&pp->rn_lock);
4279 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4280 (void) pthread_mutex_unlock(&pp->rn_lock);
4281 rc = REP_PROTOCOL_SUCCESS; /* nothing to do */
4282 goto cleanout;
4285 prev = np;
4286 while ((nnp = prev->rn_former) != NULL) {
4287 if (nnp->rn_snapshot_id == snapid) {
4288 rc_node_hold(nnp);
4289 break; /* existing node with that id */
4291 prev = nnp;
4295 if (nnp == NULL) {
4296 prev = NULL;
4297 nnp = rc_node_alloc();
4298 if (nnp == NULL) {
4299 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4300 goto fail;
4303 nnp->rn_id = np->rn_id; /* structure assignment */
4304 nnp->rn_hash = np->rn_hash;
4305 nnp->rn_name = strdup(np->rn_name);
4306 nnp->rn_snapshot_id = snapid;
4307 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
4309 if (nnp->rn_name == NULL) {
4310 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4311 goto fail;
4315 (void) pthread_mutex_unlock(&np->rn_lock);
4317 rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
4319 if (parentp != NULL)
4320 nnp->rn_snapshot_id = snapid; /* fill in new snapid */
4321 else
4322 assert(nnp->rn_snapshot_id == snapid);
4324 (void) pthread_mutex_lock(&np->rn_lock);
4325 if (rc != REP_PROTOCOL_SUCCESS)
4326 goto fail;
4329 * fix up the former chain
4331 if (prev != NULL) {
4332 prev->rn_former = nnp->rn_former;
4333 (void) pthread_mutex_lock(&nnp->rn_lock);
4334 nnp->rn_flags &= ~RC_NODE_ON_FORMER;
4335 nnp->rn_former = NULL;
4336 (void) pthread_mutex_unlock(&nnp->rn_lock);
4338 np->rn_flags |= RC_NODE_OLD;
4339 (void) pthread_mutex_unlock(&np->rn_lock);
4342 * replace np with nnp
4344 rc_node_relink_child(pp, np, nnp);
4346 rc_node_rele(np);
4347 rc = REP_PROTOCOL_SUCCESS;
4349 cleanout:
4350 return (rc);
4352 fail:
4353 rc_node_rele_flag(np, RC_NODE_IN_TX);
4354 rc_node_rele_locked(np);
4355 (void) pthread_mutex_lock(&pp->rn_lock);
4356 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4357 (void) pthread_mutex_unlock(&pp->rn_lock);
4359 if (nnp != NULL) {
4360 if (prev == NULL)
4361 rc_node_destroy(nnp);
4362 else
4363 rc_node_rele(nnp);
4366 return (rc);
4370 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
4371 const char *instname, const char *name, rc_node_ptr_t *outpp)
4373 perm_status_t granted;
4374 rc_node_t *np;
4375 rc_node_t *outp = NULL;
4376 int rc, perm_rc;
4377 char fmri[REP_PROTOCOL_FMRI_LEN];
4378 size_t sz_out;
4380 rc_node_clear(outpp, 0);
4383 * rc_node_modify_permission_check() must be called before the node
4384 * is locked. This is because the library functions that check
4385 * authorizations can trigger calls back into configd.
4387 granted = rc_node_modify_permission_check();
4388 switch (granted) {
4389 case PERM_GRANTED:
4390 perm_rc = REP_PROTOCOL_SUCCESS;
4391 break;
4392 case PERM_GONE:
4393 case PERM_DENIED:
4394 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4395 case PERM_FAIL:
4396 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4397 default:
4398 bad_error("rc_node_modify_permission_check", granted);
4399 break;
4402 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, NULL);
4403 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
4404 (void) pthread_mutex_unlock(&np->rn_lock);
4405 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
4408 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
4409 if (rc != REP_PROTOCOL_SUCCESS) {
4410 (void) pthread_mutex_unlock(&np->rn_lock);
4411 return (rc);
4414 if (svcname != NULL && (rc =
4415 rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
4416 REP_PROTOCOL_SUCCESS) {
4417 (void) pthread_mutex_unlock(&np->rn_lock);
4418 return (rc);
4421 if (instname != NULL && (rc =
4422 rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
4423 REP_PROTOCOL_SUCCESS) {
4424 (void) pthread_mutex_unlock(&np->rn_lock);
4425 return (rc);
4428 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri),
4429 &sz_out)) != REP_PROTOCOL_SUCCESS) {
4430 (void) pthread_mutex_unlock(&np->rn_lock);
4431 return (rc);
4433 if (perm_rc != REP_PROTOCOL_SUCCESS) {
4434 (void) pthread_mutex_unlock(&np->rn_lock);
4435 return (perm_rc);
4438 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4439 NULL);
4440 (void) pthread_mutex_unlock(&np->rn_lock);
4442 rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
4444 if (rc == REP_PROTOCOL_SUCCESS) {
4445 rc_node_assign(outpp, outp);
4446 rc_node_rele(outp);
4449 (void) pthread_mutex_lock(&np->rn_lock);
4450 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4451 (void) pthread_mutex_unlock(&np->rn_lock);
4453 return (rc);
4457 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
4459 rc_node_t *np, *outp;
4461 RC_NODE_PTR_GET_CHECK(np, npp);
4462 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
4463 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
4466 RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
4467 if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
4468 (void) pthread_mutex_unlock(&outp->rn_lock);
4469 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4472 return (rc_attach_snapshot(outp, 0, np, NULL,
4473 NULL)); /* drops outp's lock */
4477 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
4479 rc_node_t *np;
4480 rc_node_t *cp;
4481 uint32_t snapid;
4482 char old_name[REP_PROTOCOL_NAME_LEN];
4483 int rc;
4484 size_t sz_out;
4485 char old_fmri[REP_PROTOCOL_FMRI_LEN];
4487 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4488 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
4489 (void) pthread_mutex_unlock(&np->rn_lock);
4490 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4492 snapid = np->rn_snapshot_id;
4493 rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri),
4494 &sz_out);
4495 (void) pthread_mutex_unlock(&np->rn_lock);
4496 if (rc != REP_PROTOCOL_SUCCESS)
4497 return (rc);
4498 if (np->rn_name != NULL) {
4499 if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >=
4500 sizeof (old_name)) {
4501 return (REP_PROTOCOL_FAIL_TRUNCATED);
4505 RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
4506 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
4507 (void) pthread_mutex_unlock(&cp->rn_lock);
4508 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4511 rc = rc_attach_snapshot(cp, snapid, NULL,
4512 old_fmri, old_name); /* drops cp's lock */
4513 return (rc);
4517 * If the pgname property group under ent has type pgtype, and it has a
4518 * propname property with type ptype, return _SUCCESS. If pgtype is NULL,
4519 * it is not checked. If ent is not a service node, we will return _SUCCESS if
4520 * a property meeting the requirements exists in either the instance or its
4521 * parent.
4523 * Returns
4524 * _SUCCESS - see above
4525 * _DELETED - ent or one of its ancestors was deleted
4526 * _NO_RESOURCES - no resources
4527 * _NOT_FOUND - no matching property was found
4529 static int
4530 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype,
4531 const char *propname, rep_protocol_value_type_t ptype)
4533 int ret;
4534 rc_node_t *pg = NULL, *spg = NULL, *svc, *prop;
4536 assert(!MUTEX_HELD(&ent->rn_lock));
4538 (void) pthread_mutex_lock(&ent->rn_lock);
4539 ret = rc_node_find_named_child(ent, pgname,
4540 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
4541 (void) pthread_mutex_unlock(&ent->rn_lock);
4543 switch (ret) {
4544 case REP_PROTOCOL_SUCCESS:
4545 break;
4547 case REP_PROTOCOL_FAIL_DELETED:
4548 case REP_PROTOCOL_FAIL_NO_RESOURCES:
4549 return (ret);
4551 default:
4552 bad_error("rc_node_find_named_child", ret);
4555 if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) {
4556 ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE,
4557 &svc);
4558 if (ret != REP_PROTOCOL_SUCCESS) {
4559 assert(ret == REP_PROTOCOL_FAIL_DELETED);
4560 if (pg != NULL)
4561 rc_node_rele(pg);
4562 return (ret);
4564 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
4566 (void) pthread_mutex_lock(&svc->rn_lock);
4567 ret = rc_node_find_named_child(svc, pgname,
4568 REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg);
4569 (void) pthread_mutex_unlock(&svc->rn_lock);
4571 rc_node_rele(svc);
4573 switch (ret) {
4574 case REP_PROTOCOL_SUCCESS:
4575 break;
4577 case REP_PROTOCOL_FAIL_DELETED:
4578 case REP_PROTOCOL_FAIL_NO_RESOURCES:
4579 if (pg != NULL)
4580 rc_node_rele(pg);
4581 return (ret);
4583 default:
4584 bad_error("rc_node_find_named_child", ret);
4588 if (pg != NULL &&
4589 pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) {
4590 rc_node_rele(pg);
4591 pg = NULL;
4594 if (spg != NULL &&
4595 pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) {
4596 rc_node_rele(spg);
4597 spg = NULL;
4600 if (pg == NULL) {
4601 if (spg == NULL)
4602 return (REP_PROTOCOL_FAIL_NOT_FOUND);
4603 pg = spg;
4604 spg = NULL;
4608 * At this point, pg is non-NULL, and is a property group node of the
4609 * correct type. spg, if non-NULL, is also a property group node of
4610 * the correct type. Check for the property in pg first, then spg
4611 * (if applicable).
4613 (void) pthread_mutex_lock(&pg->rn_lock);
4614 ret = rc_node_find_named_child(pg, propname,
4615 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
4616 (void) pthread_mutex_unlock(&pg->rn_lock);
4617 rc_node_rele(pg);
4618 switch (ret) {
4619 case REP_PROTOCOL_SUCCESS:
4620 if (prop != NULL) {
4621 if (prop->rn_valtype == ptype) {
4622 rc_node_rele(prop);
4623 if (spg != NULL)
4624 rc_node_rele(spg);
4625 return (REP_PROTOCOL_SUCCESS);
4627 rc_node_rele(prop);
4629 break;
4631 case REP_PROTOCOL_FAIL_NO_RESOURCES:
4632 if (spg != NULL)
4633 rc_node_rele(spg);
4634 return (ret);
4636 case REP_PROTOCOL_FAIL_DELETED:
4637 break;
4639 default:
4640 bad_error("rc_node_find_named_child", ret);
4643 if (spg == NULL)
4644 return (REP_PROTOCOL_FAIL_NOT_FOUND);
4646 pg = spg;
4648 (void) pthread_mutex_lock(&pg->rn_lock);
4649 ret = rc_node_find_named_child(pg, propname,
4650 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
4651 (void) pthread_mutex_unlock(&pg->rn_lock);
4652 rc_node_rele(pg);
4653 switch (ret) {
4654 case REP_PROTOCOL_SUCCESS:
4655 if (prop != NULL) {
4656 if (prop->rn_valtype == ptype) {
4657 rc_node_rele(prop);
4658 return (REP_PROTOCOL_SUCCESS);
4660 rc_node_rele(prop);
4662 return (REP_PROTOCOL_FAIL_NOT_FOUND);
4664 case REP_PROTOCOL_FAIL_NO_RESOURCES:
4665 return (ret);
4667 case REP_PROTOCOL_FAIL_DELETED:
4668 return (REP_PROTOCOL_FAIL_NOT_FOUND);
4670 default:
4671 bad_error("rc_node_find_named_child", ret);
4674 return (REP_PROTOCOL_SUCCESS);
4678 * Given a property group node, returns _SUCCESS if the property group may
4679 * be read without any special authorization.
4681 * Fails with:
4682 * _DELETED - np or an ancestor node was deleted
4683 * _TYPE_MISMATCH - np does not refer to a property group
4684 * _NO_RESOURCES - no resources
4685 * _PERMISSION_DENIED - authorization is required
4687 static int
4688 rc_node_pg_check_read_protect(rc_node_t *np)
4690 int ret;
4691 rc_node_t *ent;
4693 assert(!MUTEX_HELD(&np->rn_lock));
4695 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
4696 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
4698 if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 ||
4699 strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 ||
4700 strcmp(np->rn_type, SCF_GROUP_METHOD) == 0)
4701 return (REP_PROTOCOL_SUCCESS);
4703 ret = rc_node_parent(np, &ent);
4705 if (ret != REP_PROTOCOL_SUCCESS)
4706 return (ret);
4708 ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type,
4709 AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING);
4711 rc_node_rele(ent);
4713 switch (ret) {
4714 case REP_PROTOCOL_FAIL_NOT_FOUND:
4715 return (REP_PROTOCOL_SUCCESS);
4716 case REP_PROTOCOL_SUCCESS:
4717 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4718 case REP_PROTOCOL_FAIL_DELETED:
4719 case REP_PROTOCOL_FAIL_NO_RESOURCES:
4720 return (ret);
4721 default:
4722 bad_error("rc_svc_prop_exists", ret);
4725 return (REP_PROTOCOL_SUCCESS);
4729 * Fails with
4730 * _DELETED - np's node or parent has been deleted
4731 * _TYPE_MISMATCH - np's node is not a property
4732 * _NO_RESOURCES - out of memory
4733 * _PERMISSION_DENIED - no authorization to read this property's value(s)
4734 * _BAD_REQUEST - np's parent is not a property group
4736 static int
4737 rc_node_property_may_read(rc_node_t *np)
4739 int ret;
4740 perm_status_t granted = PERM_DENIED;
4741 rc_node_t *pgp;
4742 permcheck_t *pcp;
4743 size_t sz_out;
4745 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
4746 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
4748 if (client_is_privileged())
4749 return (REP_PROTOCOL_SUCCESS);
4751 #ifdef NATIVE_BUILD
4752 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4753 #else
4754 ret = rc_node_parent(np, &pgp);
4756 if (ret != REP_PROTOCOL_SUCCESS)
4757 return (ret);
4759 if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4760 rc_node_rele(pgp);
4761 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4764 ret = rc_node_pg_check_read_protect(pgp);
4766 if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) {
4767 rc_node_rele(pgp);
4768 return (ret);
4771 pcp = pc_create();
4773 if (pcp == NULL) {
4774 rc_node_rele(pgp);
4775 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4778 ret = perm_add_enabling(pcp, AUTH_MODIFY);
4780 if (ret == REP_PROTOCOL_SUCCESS) {
4781 const char * const auth =
4782 perm_auth_for_pgtype(pgp->rn_type);
4784 if (auth != NULL)
4785 ret = perm_add_enabling(pcp, auth);
4789 * If you are permitted to modify the value, you may also
4790 * read it. This means that both the MODIFY and VALUE
4791 * authorizations are acceptable. We don't allow requests
4792 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
4793 * however, to avoid leaking possibly valuable information
4794 * since such a user can't change the property anyway.
4796 if (ret == REP_PROTOCOL_SUCCESS)
4797 ret = perm_add_enabling_values(pcp, pgp,
4798 AUTH_PROP_MODIFY);
4800 if (ret == REP_PROTOCOL_SUCCESS &&
4801 strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0)
4802 ret = perm_add_enabling_values(pcp, pgp,
4803 AUTH_PROP_VALUE);
4805 if (ret == REP_PROTOCOL_SUCCESS)
4806 ret = perm_add_enabling_values(pcp, pgp,
4807 AUTH_PROP_READ);
4809 rc_node_rele(pgp);
4811 if (ret == REP_PROTOCOL_SUCCESS) {
4812 granted = perm_granted(pcp);
4813 if (granted == PERM_FAIL)
4814 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
4815 if (granted == PERM_GONE)
4816 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4819 pc_free(pcp);
4821 if ((ret == REP_PROTOCOL_SUCCESS) && (granted == PERM_DENIED))
4822 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4824 return (ret);
4825 #endif /* NATIVE_BUILD */
4829 * Iteration
4831 static int
4832 rc_iter_filter_name(rc_node_t *np, void *s)
4834 const char *name = s;
4836 return (strcmp(np->rn_name, name) == 0);
4839 static int
4840 rc_iter_filter_type(rc_node_t *np, void *s)
4842 const char *type = s;
4844 return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
4847 /*ARGSUSED*/
4848 static int
4849 rc_iter_null_filter(rc_node_t *np, void *s)
4851 return (1);
4855 * Allocate & initialize an rc_node_iter_t structure. Essentially, ensure
4856 * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
4857 * If successful, leaves a hold on np & increments np->rn_other_refs
4859 * If composed is true, then set up for iteration across the top level of np's
4860 * composition chain. If successful, leaves a hold on np and increments
4861 * rn_other_refs for the top level of np's composition chain.
4863 * Fails with
4864 * _NO_RESOURCES
4865 * _INVALID_TYPE
4866 * _TYPE_MISMATCH - np cannot carry type children
4867 * _DELETED
4869 static int
4870 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
4871 rc_iter_filter_func *filter, void *arg, boolean_t composed)
4873 rc_node_iter_t *nip;
4874 int res;
4876 assert(*resp == NULL);
4878 nip = uu_zalloc(sizeof (*nip));
4879 if (nip == NULL)
4880 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4882 /* np is held by the client's rc_node_ptr_t */
4883 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
4884 composed = 1;
4886 if (!composed) {
4887 (void) pthread_mutex_lock(&np->rn_lock);
4889 if ((res = rc_node_fill_children(np, type)) !=
4890 REP_PROTOCOL_SUCCESS) {
4891 (void) pthread_mutex_unlock(&np->rn_lock);
4892 uu_free(nip);
4893 return (res);
4896 nip->rni_clevel = -1;
4898 nip->rni_iter = uu_list_walk_start(np->rn_children,
4899 UU_WALK_ROBUST);
4900 if (nip->rni_iter != NULL) {
4901 nip->rni_iter_node = np;
4902 rc_node_hold_other(np);
4903 } else {
4904 (void) pthread_mutex_unlock(&np->rn_lock);
4905 uu_free(nip);
4906 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4908 (void) pthread_mutex_unlock(&np->rn_lock);
4909 } else {
4910 rc_node_t *ent;
4912 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
4913 /* rn_cchain isn't valid until children are loaded. */
4914 (void) pthread_mutex_lock(&np->rn_lock);
4915 res = rc_node_fill_children(np,
4916 REP_PROTOCOL_ENTITY_SNAPLEVEL);
4917 (void) pthread_mutex_unlock(&np->rn_lock);
4918 if (res != REP_PROTOCOL_SUCCESS) {
4919 uu_free(nip);
4920 return (res);
4923 /* Check for an empty snapshot. */
4924 if (np->rn_cchain[0] == NULL)
4925 goto empty;
4928 /* Start at the top of the composition chain. */
4929 for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
4930 if (nip->rni_clevel >= COMPOSITION_DEPTH) {
4931 /* Empty composition chain. */
4932 empty:
4933 nip->rni_clevel = -1;
4934 nip->rni_iter = NULL;
4935 /* It's ok, iter_next() will return _DONE. */
4936 goto out;
4939 ent = np->rn_cchain[nip->rni_clevel];
4940 assert(ent != NULL);
4942 if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
4943 break;
4945 /* Someone deleted it, so try the next one. */
4948 res = rc_node_fill_children(ent, type);
4950 if (res == REP_PROTOCOL_SUCCESS) {
4951 nip->rni_iter = uu_list_walk_start(ent->rn_children,
4952 UU_WALK_ROBUST);
4954 if (nip->rni_iter == NULL)
4955 res = REP_PROTOCOL_FAIL_NO_RESOURCES;
4956 else {
4957 nip->rni_iter_node = ent;
4958 rc_node_hold_other(ent);
4962 if (res != REP_PROTOCOL_SUCCESS) {
4963 (void) pthread_mutex_unlock(&ent->rn_lock);
4964 uu_free(nip);
4965 return (res);
4968 (void) pthread_mutex_unlock(&ent->rn_lock);
4971 out:
4972 rc_node_hold(np); /* released by rc_iter_end() */
4973 nip->rni_parent = np;
4974 nip->rni_type = type;
4975 nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
4976 nip->rni_filter_arg = arg;
4977 *resp = nip;
4978 return (REP_PROTOCOL_SUCCESS);
4981 static void
4982 rc_iter_end(rc_node_iter_t *iter)
4984 rc_node_t *np = iter->rni_parent;
4986 if (iter->rni_clevel >= 0)
4987 np = np->rn_cchain[iter->rni_clevel];
4989 assert(MUTEX_HELD(&np->rn_lock));
4990 if (iter->rni_iter != NULL)
4991 uu_list_walk_end(iter->rni_iter);
4992 iter->rni_iter = NULL;
4994 (void) pthread_mutex_unlock(&np->rn_lock);
4995 rc_node_rele(iter->rni_parent);
4996 if (iter->rni_iter_node != NULL)
4997 rc_node_rele_other(iter->rni_iter_node);
5001 * Fails with
5002 * _NOT_SET - npp is reset
5003 * _DELETED - npp's node has been deleted
5004 * _NOT_APPLICABLE - npp's node is not a property
5005 * _NO_RESOURCES - out of memory
5007 static int
5008 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
5010 rc_node_t *np;
5012 rc_node_iter_t *nip;
5014 assert(*iterp == NULL);
5016 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5018 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
5019 (void) pthread_mutex_unlock(&np->rn_lock);
5020 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5023 nip = uu_zalloc(sizeof (*nip));
5024 if (nip == NULL) {
5025 (void) pthread_mutex_unlock(&np->rn_lock);
5026 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5029 nip->rni_parent = np;
5030 nip->rni_iter = NULL;
5031 nip->rni_clevel = -1;
5032 nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
5033 nip->rni_offset = 0;
5034 nip->rni_last_offset = 0;
5036 rc_node_hold_locked(np);
5038 *iterp = nip;
5039 (void) pthread_mutex_unlock(&np->rn_lock);
5041 return (REP_PROTOCOL_SUCCESS);
5045 * Returns:
5046 * _NO_RESOURCES - out of memory
5047 * _NOT_SET - npp is reset
5048 * _DELETED - npp's node has been deleted
5049 * _TYPE_MISMATCH - npp's node is not a property
5050 * _NOT_FOUND - property has no values
5051 * _TRUNCATED - property has >1 values (first is written into out)
5052 * _SUCCESS - property has 1 value (which is written into out)
5053 * _PERMISSION_DENIED - no authorization to read property value(s)
5055 * We shorten *sz_out to not include anything after the final '\0'.
5058 rc_node_get_property_value(rc_node_ptr_t *npp,
5059 struct rep_protocol_value_response *out, size_t *sz_out)
5061 rc_node_t *np;
5062 size_t w;
5063 int ret;
5065 assert(*sz_out == sizeof (*out));
5067 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
5068 ret = rc_node_property_may_read(np);
5069 rc_node_rele(np);
5071 if (ret != REP_PROTOCOL_SUCCESS)
5072 return (ret);
5074 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5076 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
5077 (void) pthread_mutex_unlock(&np->rn_lock);
5078 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5081 if (np->rn_values_size == 0) {
5082 (void) pthread_mutex_unlock(&np->rn_lock);
5083 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5085 out->rpr_type = np->rn_valtype;
5086 w = strlcpy(out->rpr_value, &np->rn_values[0],
5087 sizeof (out->rpr_value));
5089 if (w >= sizeof (out->rpr_value))
5090 backend_panic("value too large");
5092 *sz_out = offsetof(struct rep_protocol_value_response,
5093 rpr_value[w + 1]);
5095 ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
5096 REP_PROTOCOL_SUCCESS;
5097 (void) pthread_mutex_unlock(&np->rn_lock);
5098 return (ret);
5102 rc_iter_next_value(rc_node_iter_t *iter,
5103 struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
5105 rc_node_t *np = iter->rni_parent;
5106 const char *vals;
5107 size_t len;
5109 size_t start;
5110 size_t w;
5111 int ret;
5113 rep_protocol_responseid_t result;
5115 assert(*sz_out == sizeof (*out));
5117 (void) memset(out, '\0', *sz_out);
5119 if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
5120 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5122 RC_NODE_CHECK(np);
5123 ret = rc_node_property_may_read(np);
5125 if (ret != REP_PROTOCOL_SUCCESS)
5126 return (ret);
5128 RC_NODE_CHECK_AND_LOCK(np);
5130 vals = np->rn_values;
5131 len = np->rn_values_size;
5133 out->rpr_type = np->rn_valtype;
5135 start = (repeat)? iter->rni_last_offset : iter->rni_offset;
5137 if (len == 0 || start >= len) {
5138 result = REP_PROTOCOL_DONE;
5139 *sz_out -= sizeof (out->rpr_value);
5140 } else {
5141 w = strlcpy(out->rpr_value, &vals[start],
5142 sizeof (out->rpr_value));
5144 if (w >= sizeof (out->rpr_value))
5145 backend_panic("value too large");
5147 *sz_out = offsetof(struct rep_protocol_value_response,
5148 rpr_value[w + 1]);
5151 * update the offsets if we're not repeating
5153 if (!repeat) {
5154 iter->rni_last_offset = iter->rni_offset;
5155 iter->rni_offset += (w + 1);
5158 result = REP_PROTOCOL_SUCCESS;
5161 (void) pthread_mutex_unlock(&np->rn_lock);
5162 return (result);
5166 * Entry point for ITER_START from client.c. Validate the arguments & call
5167 * rc_iter_create().
5169 * Fails with
5170 * _NOT_SET
5171 * _DELETED
5172 * _TYPE_MISMATCH - np cannot carry type children
5173 * _BAD_REQUEST - flags is invalid
5174 * pattern is invalid
5175 * _NO_RESOURCES
5176 * _INVALID_TYPE
5177 * _TYPE_MISMATCH - *npp cannot have children of type
5178 * _BACKEND_ACCESS
5181 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
5182 uint32_t type, uint32_t flags, const char *pattern)
5184 rc_node_t *np;
5185 rc_iter_filter_func *f = NULL;
5186 int rc;
5188 RC_NODE_PTR_GET_CHECK(np, npp);
5190 if (pattern != NULL && pattern[0] == '\0')
5191 pattern = NULL;
5193 if (type == REP_PROTOCOL_ENTITY_VALUE) {
5194 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
5195 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5196 if (flags != RP_ITER_START_ALL || pattern != NULL)
5197 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5199 rc = rc_node_setup_value_iter(npp, iterp);
5200 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5201 return (rc);
5204 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
5205 REP_PROTOCOL_SUCCESS)
5206 return (rc);
5208 if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
5209 (pattern == NULL))
5210 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5212 /* Composition only works for instances & snapshots. */
5213 if ((flags & RP_ITER_START_COMPOSED) &&
5214 (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
5215 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
5216 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5218 if (pattern != NULL) {
5219 if ((rc = rc_check_type_name(type, pattern)) !=
5220 REP_PROTOCOL_SUCCESS)
5221 return (rc);
5222 pattern = strdup(pattern);
5223 if (pattern == NULL)
5224 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5227 switch (flags & RP_ITER_START_FILT_MASK) {
5228 case RP_ITER_START_ALL:
5229 f = NULL;
5230 break;
5231 case RP_ITER_START_EXACT:
5232 f = rc_iter_filter_name;
5233 break;
5234 case RP_ITER_START_PGTYPE:
5235 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5236 free((void *)pattern);
5237 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5239 f = rc_iter_filter_type;
5240 break;
5241 default:
5242 free((void *)pattern);
5243 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5246 rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
5247 flags & RP_ITER_START_COMPOSED);
5248 if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
5249 free((void *)pattern);
5251 return (rc);
5255 * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
5256 * the filter.
5257 * For composed iterators, then check to see if there's an overlapping entity
5258 * (see embedded comments). If we reach the end of the list, start over at
5259 * the next level.
5261 * Returns
5262 * _BAD_REQUEST - iter walks values
5263 * _TYPE_MISMATCH - iter does not walk type entities
5264 * _DELETED - parent was deleted
5265 * _NO_RESOURCES
5266 * _INVALID_TYPE - type is invalid
5267 * _DONE
5268 * _SUCCESS
5270 * For composed property group iterators, can also return
5271 * _TYPE_MISMATCH - parent cannot have type children
5274 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
5276 rc_node_t *np = iter->rni_parent;
5277 rc_node_t *res;
5278 int rc;
5280 if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
5281 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5283 if (iter->rni_iter == NULL) {
5284 rc_node_clear(out, 0);
5285 return (REP_PROTOCOL_DONE);
5288 if (iter->rni_type != type) {
5289 rc_node_clear(out, 0);
5290 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5293 (void) pthread_mutex_lock(&np->rn_lock); /* held by _iter_create() */
5295 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
5296 (void) pthread_mutex_unlock(&np->rn_lock);
5297 rc_node_clear(out, 1);
5298 return (REP_PROTOCOL_FAIL_DELETED);
5301 if (iter->rni_clevel >= 0) {
5302 /* Composed iterator. Iterate over appropriate level. */
5303 (void) pthread_mutex_unlock(&np->rn_lock);
5304 np = np->rn_cchain[iter->rni_clevel];
5306 * If iter->rni_parent is an instance or a snapshot, np must
5307 * be valid since iter holds iter->rni_parent & possible
5308 * levels (service, instance, snaplevel) cannot be destroyed
5309 * while rni_parent is held. If iter->rni_parent is
5310 * a composed property group then rc_node_setup_cpg() put
5311 * a hold on np.
5314 (void) pthread_mutex_lock(&np->rn_lock);
5316 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
5317 (void) pthread_mutex_unlock(&np->rn_lock);
5318 rc_node_clear(out, 1);
5319 return (REP_PROTOCOL_FAIL_DELETED);
5323 assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
5325 for (;;) {
5326 res = uu_list_walk_next(iter->rni_iter);
5327 if (res == NULL) {
5328 rc_node_t *parent = iter->rni_parent;
5330 #if COMPOSITION_DEPTH == 2
5331 if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
5332 /* release walker and lock */
5333 rc_iter_end(iter);
5334 break;
5337 /* Stop walking current level. */
5338 uu_list_walk_end(iter->rni_iter);
5339 iter->rni_iter = NULL;
5340 (void) pthread_mutex_unlock(&np->rn_lock);
5341 rc_node_rele_other(iter->rni_iter_node);
5342 iter->rni_iter_node = NULL;
5344 /* Start walking next level. */
5345 ++iter->rni_clevel;
5346 np = parent->rn_cchain[iter->rni_clevel];
5347 assert(np != NULL);
5348 #else
5349 #error This code must be updated.
5350 #endif
5352 (void) pthread_mutex_lock(&np->rn_lock);
5354 rc = rc_node_fill_children(np, iter->rni_type);
5356 if (rc == REP_PROTOCOL_SUCCESS) {
5357 iter->rni_iter =
5358 uu_list_walk_start(np->rn_children,
5359 UU_WALK_ROBUST);
5361 if (iter->rni_iter == NULL)
5362 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5363 else {
5364 iter->rni_iter_node = np;
5365 rc_node_hold_other(np);
5369 if (rc != REP_PROTOCOL_SUCCESS) {
5370 (void) pthread_mutex_unlock(&np->rn_lock);
5371 rc_node_clear(out, 0);
5372 return (rc);
5375 continue;
5378 if (res->rn_id.rl_type != type ||
5379 !iter->rni_filter(res, iter->rni_filter_arg))
5380 continue;
5383 * If we're composed and not at the top level, check to see if
5384 * there's an entity at a higher level with the same name. If
5385 * so, skip this one.
5387 if (iter->rni_clevel > 0) {
5388 rc_node_t *ent = iter->rni_parent->rn_cchain[0];
5389 rc_node_t *pg;
5391 #if COMPOSITION_DEPTH == 2
5392 assert(iter->rni_clevel == 1);
5394 (void) pthread_mutex_unlock(&np->rn_lock);
5395 (void) pthread_mutex_lock(&ent->rn_lock);
5396 rc = rc_node_find_named_child(ent, res->rn_name, type,
5397 &pg);
5398 if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
5399 rc_node_rele(pg);
5400 (void) pthread_mutex_unlock(&ent->rn_lock);
5401 if (rc != REP_PROTOCOL_SUCCESS) {
5402 rc_node_clear(out, 0);
5403 return (rc);
5405 (void) pthread_mutex_lock(&np->rn_lock);
5407 /* Make sure np isn't being deleted all of a sudden. */
5408 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
5409 (void) pthread_mutex_unlock(&np->rn_lock);
5410 rc_node_clear(out, 1);
5411 return (REP_PROTOCOL_FAIL_DELETED);
5414 if (pg != NULL)
5415 /* Keep going. */
5416 continue;
5417 #else
5418 #error This code must be updated.
5419 #endif
5423 * If we're composed, iterating over property groups, and not
5424 * at the bottom level, check to see if there's a pg at lower
5425 * level with the same name. If so, return a cpg.
5427 if (iter->rni_clevel >= 0 &&
5428 type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
5429 iter->rni_clevel < COMPOSITION_DEPTH - 1) {
5430 #if COMPOSITION_DEPTH == 2
5431 rc_node_t *pg;
5432 rc_node_t *ent = iter->rni_parent->rn_cchain[1];
5434 rc_node_hold(res); /* While we drop np->rn_lock */
5436 (void) pthread_mutex_unlock(&np->rn_lock);
5437 (void) pthread_mutex_lock(&ent->rn_lock);
5438 rc = rc_node_find_named_child(ent, res->rn_name, type,
5439 &pg);
5440 /* holds pg if not NULL */
5441 (void) pthread_mutex_unlock(&ent->rn_lock);
5442 if (rc != REP_PROTOCOL_SUCCESS) {
5443 rc_node_rele(res);
5444 rc_node_clear(out, 0);
5445 return (rc);
5448 (void) pthread_mutex_lock(&np->rn_lock);
5449 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
5450 (void) pthread_mutex_unlock(&np->rn_lock);
5451 rc_node_rele(res);
5452 if (pg != NULL)
5453 rc_node_rele(pg);
5454 rc_node_clear(out, 1);
5455 return (REP_PROTOCOL_FAIL_DELETED);
5458 if (pg == NULL) {
5459 (void) pthread_mutex_unlock(&np->rn_lock);
5460 rc_node_rele(res);
5461 (void) pthread_mutex_lock(&np->rn_lock);
5462 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
5463 (void) pthread_mutex_unlock(&np->
5464 rn_lock);
5465 rc_node_clear(out, 1);
5466 return (REP_PROTOCOL_FAIL_DELETED);
5468 } else {
5469 rc_node_t *cpg;
5471 /* Keep res held for rc_node_setup_cpg(). */
5473 cpg = rc_node_alloc();
5474 if (cpg == NULL) {
5475 (void) pthread_mutex_unlock(
5476 &np->rn_lock);
5477 rc_node_rele(res);
5478 rc_node_rele(pg);
5479 rc_node_clear(out, 0);
5480 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5483 switch (rc_node_setup_cpg(cpg, res, pg)) {
5484 case REP_PROTOCOL_SUCCESS:
5485 res = cpg;
5486 break;
5488 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
5489 /* Nevermind. */
5490 (void) pthread_mutex_unlock(&np->
5491 rn_lock);
5492 rc_node_destroy(cpg);
5493 rc_node_rele(pg);
5494 rc_node_rele(res);
5495 (void) pthread_mutex_lock(&np->
5496 rn_lock);
5497 if (!rc_node_wait_flag(np,
5498 RC_NODE_DYING)) {
5499 (void) pthread_mutex_unlock(&
5500 np->rn_lock);
5501 rc_node_clear(out, 1);
5502 return
5503 (REP_PROTOCOL_FAIL_DELETED);
5505 break;
5507 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5508 rc_node_destroy(cpg);
5509 (void) pthread_mutex_unlock(
5510 &np->rn_lock);
5511 rc_node_rele(res);
5512 rc_node_rele(pg);
5513 rc_node_clear(out, 0);
5514 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5516 default:
5517 assert(0);
5518 abort();
5521 #else
5522 #error This code must be updated.
5523 #endif
5526 rc_node_hold(res);
5527 (void) pthread_mutex_unlock(&np->rn_lock);
5528 break;
5530 rc_node_assign(out, res);
5532 if (res == NULL)
5533 return (REP_PROTOCOL_DONE);
5534 rc_node_rele(res);
5535 return (REP_PROTOCOL_SUCCESS);
5538 void
5539 rc_iter_destroy(rc_node_iter_t **nipp)
5541 rc_node_iter_t *nip = *nipp;
5542 rc_node_t *np;
5544 if (nip == NULL)
5545 return; /* already freed */
5547 np = nip->rni_parent;
5549 free(nip->rni_filter_arg);
5550 nip->rni_filter_arg = NULL;
5552 if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
5553 nip->rni_iter != NULL) {
5554 if (nip->rni_clevel < 0)
5555 (void) pthread_mutex_lock(&np->rn_lock);
5556 else
5557 (void) pthread_mutex_lock(
5558 &np->rn_cchain[nip->rni_clevel]->rn_lock);
5559 rc_iter_end(nip); /* release walker and lock */
5561 nip->rni_parent = NULL;
5563 uu_free(nip);
5564 *nipp = NULL;
5568 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
5570 rc_node_t *np;
5571 permcheck_t *pcp;
5572 int ret;
5573 perm_status_t granted;
5574 rc_auth_state_t authorized = RC_AUTH_UNKNOWN;
5575 char *auth_string = NULL;
5577 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
5579 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
5580 rc_node_rele(np);
5581 np = np->rn_cchain[0];
5582 RC_NODE_CHECK_AND_HOLD(np);
5585 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5586 rc_node_rele(np);
5587 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5590 if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
5591 rc_node_rele(np);
5592 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5595 #ifdef NATIVE_BUILD
5596 if (client_is_privileged())
5597 goto skip_checks;
5598 rc_node_rele(np);
5599 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5600 #else
5601 if (is_main_repository == 0)
5602 goto skip_checks;
5604 /* permission check */
5605 pcp = pc_create();
5606 if (pcp == NULL) {
5607 rc_node_rele(np);
5608 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5611 if (np->rn_id.rl_ids[ID_INSTANCE] != 0 && /* instance pg */
5612 ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
5613 strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
5614 (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
5615 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
5616 rc_node_t *instn;
5618 /* solaris.smf.modify can be used */
5619 ret = perm_add_enabling(pcp, AUTH_MODIFY);
5620 if (ret != REP_PROTOCOL_SUCCESS) {
5621 pc_free(pcp);
5622 rc_node_rele(np);
5623 return (ret);
5626 /* solaris.smf.manage can be used. */
5627 ret = perm_add_enabling(pcp, AUTH_MANAGE);
5629 if (ret != REP_PROTOCOL_SUCCESS) {
5630 pc_free(pcp);
5631 rc_node_rele(np);
5632 return (ret);
5635 /* general/action_authorization values can be used. */
5636 ret = rc_node_parent(np, &instn);
5637 if (ret != REP_PROTOCOL_SUCCESS) {
5638 assert(ret == REP_PROTOCOL_FAIL_DELETED);
5639 rc_node_rele(np);
5640 pc_free(pcp);
5641 return (REP_PROTOCOL_FAIL_DELETED);
5644 assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
5646 ret = perm_add_inst_action_auth(pcp, instn);
5647 rc_node_rele(instn);
5648 switch (ret) {
5649 case REP_PROTOCOL_SUCCESS:
5650 break;
5652 case REP_PROTOCOL_FAIL_DELETED:
5653 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5654 rc_node_rele(np);
5655 pc_free(pcp);
5656 return (ret);
5658 default:
5659 bad_error("perm_add_inst_action_auth", ret);
5662 if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
5663 authorized = RC_AUTH_PASSED; /* No check on commit. */
5664 } else {
5665 ret = perm_add_enabling(pcp, AUTH_MODIFY);
5667 if (ret == REP_PROTOCOL_SUCCESS) {
5668 /* propertygroup-type-specific authorization */
5669 /* no locking because rn_type won't change anyway */
5670 const char * const auth =
5671 perm_auth_for_pgtype(np->rn_type);
5673 if (auth != NULL)
5674 ret = perm_add_enabling(pcp, auth);
5677 if (ret == REP_PROTOCOL_SUCCESS)
5678 /* propertygroup/transaction-type-specific auths */
5679 ret =
5680 perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
5682 if (ret == REP_PROTOCOL_SUCCESS)
5683 ret =
5684 perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
5686 /* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
5687 if (ret == REP_PROTOCOL_SUCCESS &&
5688 strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
5689 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
5690 ret = perm_add_enabling(pcp, AUTH_MANAGE);
5692 if (ret != REP_PROTOCOL_SUCCESS) {
5693 pc_free(pcp);
5694 rc_node_rele(np);
5695 return (ret);
5699 granted = perm_granted(pcp);
5700 ret = map_granted_status(granted, pcp);
5701 pc_free(pcp);
5703 if ((granted == PERM_GONE) || (granted == PERM_FAIL) ||
5704 (ret == REP_PROTOCOL_FAIL_NO_RESOURCES)) {
5705 free(auth_string);
5706 rc_node_rele(np);
5707 return (ret);
5710 if (granted == PERM_DENIED) {
5711 authorized = RC_AUTH_FAILED;
5713 #endif /* NATIVE_BUILD */
5715 skip_checks:
5716 rc_node_assign(txp, np);
5717 txp->rnp_authorized = authorized;
5718 if (authorized != RC_AUTH_UNKNOWN) {
5719 /* Save the authorization string. */
5720 if (txp->rnp_auth_string != NULL)
5721 free((void *)txp->rnp_auth_string);
5722 txp->rnp_auth_string = auth_string;
5723 auth_string = NULL; /* Don't free until done with txp. */
5726 rc_node_rele(np);
5727 free(auth_string);
5728 return (REP_PROTOCOL_SUCCESS);
5732 * Return 1 if the given transaction commands only modify the values of
5733 * properties other than "modify_authorization". Return -1 if any of the
5734 * commands are invalid, and 0 otherwise.
5736 static int
5737 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
5739 const struct rep_protocol_transaction_cmd *cmds;
5740 uintptr_t loc;
5741 uint32_t sz;
5742 rc_node_t *prop;
5743 boolean_t ok;
5745 assert(!MUTEX_HELD(&pg->rn_lock));
5747 loc = (uintptr_t)cmds_arg;
5749 while (cmds_sz > 0) {
5750 cmds = (struct rep_protocol_transaction_cmd *)loc;
5752 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
5753 return (-1);
5755 sz = cmds->rptc_size;
5756 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
5757 return (-1);
5759 sz = TX_SIZE(sz);
5760 if (sz > cmds_sz)
5761 return (-1);
5763 switch (cmds[0].rptc_action) {
5764 case REP_PROTOCOL_TX_ENTRY_CLEAR:
5765 break;
5767 case REP_PROTOCOL_TX_ENTRY_REPLACE:
5768 /* Check type */
5769 (void) pthread_mutex_lock(&pg->rn_lock);
5770 ok = B_FALSE;
5771 if (rc_node_find_named_child(pg,
5772 (const char *)cmds[0].rptc_data,
5773 REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
5774 REP_PROTOCOL_SUCCESS) {
5775 if (prop != NULL) {
5776 ok = prop->rn_valtype ==
5777 cmds[0].rptc_type;
5779 * rc_node_find_named_child()
5780 * places a hold on prop which we
5781 * do not need to hang on to.
5783 rc_node_rele(prop);
5786 (void) pthread_mutex_unlock(&pg->rn_lock);
5787 if (ok)
5788 break;
5789 return (0);
5791 default:
5792 return (0);
5795 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
5796 == 0)
5797 return (0);
5799 loc += sz;
5800 cmds_sz -= sz;
5803 return (1);
5807 * Return 1 if any of the given transaction commands affect
5808 * "action_authorization". Return -1 if any of the commands are invalid and
5809 * 0 in all other cases.
5811 static int
5812 tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
5814 const struct rep_protocol_transaction_cmd *cmds;
5815 uintptr_t loc;
5816 uint32_t sz;
5818 loc = (uintptr_t)cmds_arg;
5820 while (cmds_sz > 0) {
5821 cmds = (struct rep_protocol_transaction_cmd *)loc;
5823 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
5824 return (-1);
5826 sz = cmds->rptc_size;
5827 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
5828 return (-1);
5830 sz = TX_SIZE(sz);
5831 if (sz > cmds_sz)
5832 return (-1);
5834 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
5835 == 0)
5836 return (1);
5838 loc += sz;
5839 cmds_sz -= sz;
5842 return (0);
5846 * Returns 1 if the transaction commands only modify properties named
5847 * 'enabled'.
5849 static int
5850 tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
5852 const struct rep_protocol_transaction_cmd *cmd;
5853 uintptr_t loc;
5854 uint32_t sz;
5856 loc = (uintptr_t)cmds_arg;
5858 while (cmds_sz > 0) {
5859 cmd = (struct rep_protocol_transaction_cmd *)loc;
5861 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
5862 return (-1);
5864 sz = cmd->rptc_size;
5865 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
5866 return (-1);
5868 sz = TX_SIZE(sz);
5869 if (sz > cmds_sz)
5870 return (-1);
5872 if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
5873 != 0)
5874 return (0);
5876 loc += sz;
5877 cmds_sz -= sz;
5880 return (1);
5884 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
5886 rc_node_t *np = txp->rnp_node;
5887 rc_node_t *pp;
5888 rc_node_t *nnp;
5889 rc_node_pg_notify_t *pnp;
5890 int rc;
5891 permcheck_t *pcp;
5892 perm_status_t granted;
5893 int normal;
5894 char *pg_fmri = NULL;
5895 char *auth_string = NULL;
5896 size_t sz_out;
5897 int tx_flag = 1;
5898 tx_commit_data_t *tx_data = NULL;
5900 RC_NODE_CHECK(np);
5902 if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) &&
5903 (txp->rnp_auth_string != NULL)) {
5904 auth_string = strdup(txp->rnp_auth_string);
5905 if (auth_string == NULL)
5906 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5909 if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) &&
5910 is_main_repository) {
5911 #ifdef NATIVE_BUILD
5912 if (!client_is_privileged()) {
5913 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5915 #else
5916 /* permission check: depends on contents of transaction */
5917 pcp = pc_create();
5918 if (pcp == NULL)
5919 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5921 /* If normal is cleared, we won't do the normal checks. */
5922 normal = 1;
5923 rc = REP_PROTOCOL_SUCCESS;
5925 if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
5926 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
5927 /* Touching general[framework]/action_authorization? */
5928 rc = tx_modifies_action(cmds, cmds_sz);
5929 if (rc == -1) {
5930 pc_free(pcp);
5931 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5934 if (rc) {
5936 * Yes: only AUTH_MODIFY and AUTH_MANAGE
5937 * can be used.
5939 rc = perm_add_enabling(pcp, AUTH_MODIFY);
5941 if (rc == REP_PROTOCOL_SUCCESS)
5942 rc = perm_add_enabling(pcp,
5943 AUTH_MANAGE);
5945 normal = 0;
5946 } else {
5947 rc = REP_PROTOCOL_SUCCESS;
5949 } else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
5950 strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
5951 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
5952 rc_node_t *instn;
5954 rc = tx_only_enabled(cmds, cmds_sz);
5955 if (rc == -1) {
5956 pc_free(pcp);
5957 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5960 if (rc) {
5961 rc = rc_node_parent(np, &instn);
5962 if (rc != REP_PROTOCOL_SUCCESS) {
5963 assert(rc == REP_PROTOCOL_FAIL_DELETED);
5964 pc_free(pcp);
5965 return (rc);
5968 assert(instn->rn_id.rl_type ==
5969 REP_PROTOCOL_ENTITY_INSTANCE);
5971 rc = perm_add_inst_action_auth(pcp, instn);
5972 rc_node_rele(instn);
5973 switch (rc) {
5974 case REP_PROTOCOL_SUCCESS:
5975 break;
5977 case REP_PROTOCOL_FAIL_DELETED:
5978 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5979 pc_free(pcp);
5980 return (rc);
5982 default:
5983 bad_error("perm_add_inst_action_auth",
5984 rc);
5986 } else {
5987 rc = REP_PROTOCOL_SUCCESS;
5991 if (rc == REP_PROTOCOL_SUCCESS && normal) {
5992 rc = perm_add_enabling(pcp, AUTH_MODIFY);
5994 if (rc == REP_PROTOCOL_SUCCESS) {
5995 /* Add pgtype-specific authorization. */
5996 const char * const auth =
5997 perm_auth_for_pgtype(np->rn_type);
5999 if (auth != NULL)
6000 rc = perm_add_enabling(pcp, auth);
6003 /* Add pg-specific modify_authorization auths. */
6004 if (rc == REP_PROTOCOL_SUCCESS)
6005 rc = perm_add_enabling_values(pcp, np,
6006 AUTH_PROP_MODIFY);
6008 /* If value_authorization values are ok, add them. */
6009 if (rc == REP_PROTOCOL_SUCCESS) {
6010 rc = tx_allow_value(cmds, cmds_sz, np);
6011 if (rc == -1)
6012 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
6013 else if (rc)
6014 rc = perm_add_enabling_values(pcp, np,
6015 AUTH_PROP_VALUE);
6019 if (rc == REP_PROTOCOL_SUCCESS) {
6020 granted = perm_granted(pcp);
6021 rc = map_granted_status(granted, pcp);
6024 pc_free(pcp);
6026 if (rc != REP_PROTOCOL_SUCCESS)
6027 goto cleanout;
6029 if (granted == PERM_DENIED) {
6030 tx_flag = 0;
6032 #endif /* NATIVE_BUILD */
6033 } else if (txp->rnp_authorized == RC_AUTH_FAILED) {
6034 tx_flag = 0;
6037 pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
6038 if (pg_fmri == NULL) {
6039 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6040 goto cleanout;
6042 if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri,
6043 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
6044 goto cleanout;
6048 * Parse the transaction commands into a useful form.
6050 if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) !=
6051 REP_PROTOCOL_SUCCESS) {
6052 goto cleanout;
6055 if (tx_flag == 0) {
6056 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
6057 goto cleanout;
6060 nnp = rc_node_alloc();
6061 if (nnp == NULL) {
6062 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6063 goto cleanout;
6066 nnp->rn_id = np->rn_id; /* structure assignment */
6067 nnp->rn_hash = np->rn_hash;
6068 nnp->rn_name = strdup(np->rn_name);
6069 nnp->rn_type = strdup(np->rn_type);
6070 nnp->rn_pgflags = np->rn_pgflags;
6072 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
6074 if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
6075 rc_node_destroy(nnp);
6076 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6077 goto cleanout;
6080 (void) pthread_mutex_lock(&np->rn_lock);
6083 * We must have all of the old properties in the cache, or the
6084 * database deletions could cause inconsistencies.
6086 if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
6087 REP_PROTOCOL_SUCCESS) {
6088 (void) pthread_mutex_unlock(&np->rn_lock);
6089 rc_node_destroy(nnp);
6090 goto cleanout;
6093 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
6094 (void) pthread_mutex_unlock(&np->rn_lock);
6095 rc_node_destroy(nnp);
6096 rc = REP_PROTOCOL_FAIL_DELETED;
6097 goto cleanout;
6100 if (np->rn_flags & RC_NODE_OLD) {
6101 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
6102 (void) pthread_mutex_unlock(&np->rn_lock);
6103 rc_node_destroy(nnp);
6104 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
6105 goto cleanout;
6108 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
6109 if (pp == NULL) {
6110 /* our parent is gone, we're going next... */
6111 rc_node_destroy(nnp);
6112 (void) pthread_mutex_lock(&np->rn_lock);
6113 if (np->rn_flags & RC_NODE_OLD) {
6114 (void) pthread_mutex_unlock(&np->rn_lock);
6115 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
6116 goto cleanout;
6118 (void) pthread_mutex_unlock(&np->rn_lock);
6119 rc = REP_PROTOCOL_FAIL_DELETED;
6120 goto cleanout;
6122 (void) pthread_mutex_unlock(&pp->rn_lock);
6125 * prepare for the transaction
6127 (void) pthread_mutex_lock(&np->rn_lock);
6128 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
6129 (void) pthread_mutex_unlock(&np->rn_lock);
6130 (void) pthread_mutex_lock(&pp->rn_lock);
6131 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
6132 (void) pthread_mutex_unlock(&pp->rn_lock);
6133 rc_node_destroy(nnp);
6134 rc = REP_PROTOCOL_FAIL_DELETED;
6135 goto cleanout;
6137 nnp->rn_gen_id = np->rn_gen_id;
6138 (void) pthread_mutex_unlock(&np->rn_lock);
6140 /* Sets nnp->rn_gen_id on success. */
6141 rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id);
6143 (void) pthread_mutex_lock(&np->rn_lock);
6144 if (rc != REP_PROTOCOL_SUCCESS) {
6145 rc_node_rele_flag(np, RC_NODE_IN_TX);
6146 (void) pthread_mutex_unlock(&np->rn_lock);
6147 (void) pthread_mutex_lock(&pp->rn_lock);
6148 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
6149 (void) pthread_mutex_unlock(&pp->rn_lock);
6150 rc_node_destroy(nnp);
6151 rc_node_clear(txp, 0);
6152 if (rc == REP_PROTOCOL_DONE)
6153 rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
6154 goto cleanout;
6158 * Notify waiters
6160 (void) pthread_mutex_lock(&rc_pg_notify_lock);
6161 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
6162 rc_pg_notify_fire(pnp);
6163 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6165 np->rn_flags |= RC_NODE_OLD;
6166 (void) pthread_mutex_unlock(&np->rn_lock);
6168 rc_notify_remove_node(np);
6171 * replace np with nnp
6173 rc_node_relink_child(pp, np, nnp);
6176 * all done -- clear the transaction.
6178 rc_node_clear(txp, 0);
6180 rc = REP_PROTOCOL_SUCCESS;
6182 cleanout:
6183 free(pg_fmri);
6184 tx_commit_data_free(tx_data);
6185 return (rc);
6188 void
6189 rc_pg_notify_init(rc_node_pg_notify_t *pnp)
6191 uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
6192 pnp->rnpn_pg = NULL;
6193 pnp->rnpn_fd = -1;
6197 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
6199 rc_node_t *np;
6201 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6203 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6204 (void) pthread_mutex_unlock(&np->rn_lock);
6205 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6209 * wait for any transaction in progress to complete
6211 if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
6212 (void) pthread_mutex_unlock(&np->rn_lock);
6213 return (REP_PROTOCOL_FAIL_DELETED);
6216 if (np->rn_flags & RC_NODE_OLD) {
6217 (void) pthread_mutex_unlock(&np->rn_lock);
6218 return (REP_PROTOCOL_FAIL_NOT_LATEST);
6221 (void) pthread_mutex_lock(&rc_pg_notify_lock);
6222 rc_pg_notify_fire(pnp);
6223 pnp->rnpn_pg = np;
6224 pnp->rnpn_fd = fd;
6225 (void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
6226 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6228 (void) pthread_mutex_unlock(&np->rn_lock);
6229 return (REP_PROTOCOL_SUCCESS);
6232 void
6233 rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
6235 (void) pthread_mutex_lock(&rc_pg_notify_lock);
6236 rc_pg_notify_fire(pnp);
6237 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6239 uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
6242 void
6243 rc_notify_info_init(rc_notify_info_t *rnip)
6245 int i;
6247 uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
6248 uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
6249 rc_notify_pool);
6251 rnip->rni_notify.rcn_node = NULL;
6252 rnip->rni_notify.rcn_info = rnip;
6254 bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
6255 bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
6257 (void) pthread_cond_init(&rnip->rni_cv, NULL);
6259 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
6260 rnip->rni_namelist[i] = NULL;
6261 rnip->rni_typelist[i] = NULL;
6265 static void
6266 rc_notify_info_insert_locked(rc_notify_info_t *rnip)
6268 assert(MUTEX_HELD(&rc_pg_notify_lock));
6270 assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
6272 rnip->rni_flags |= RC_NOTIFY_ACTIVE;
6273 (void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
6274 (void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
6277 static void
6278 rc_notify_info_remove_locked(rc_notify_info_t *rnip)
6280 rc_notify_t *me = &rnip->rni_notify;
6281 rc_notify_t *np;
6283 assert(MUTEX_HELD(&rc_pg_notify_lock));
6285 assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
6287 assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
6288 rnip->rni_flags |= RC_NOTIFY_DRAIN;
6289 (void) pthread_cond_broadcast(&rnip->rni_cv);
6291 (void) uu_list_remove(rc_notify_info_list, rnip);
6294 * clean up any notifications at the beginning of the list
6296 if (uu_list_first(rc_notify_list) == me) {
6298 * We can't call rc_notify_remove_locked() unless
6299 * rc_notify_in_use is 0.
6301 while (rc_notify_in_use) {
6302 (void) pthread_cond_wait(&rc_pg_notify_cv,
6303 &rc_pg_notify_lock);
6305 while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
6306 np->rcn_info == NULL)
6307 rc_notify_remove_locked(np);
6309 (void) uu_list_remove(rc_notify_list, me);
6311 while (rnip->rni_waiters) {
6312 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
6313 (void) pthread_cond_broadcast(&rnip->rni_cv);
6314 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
6317 rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
6320 static int
6321 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
6322 const char *name)
6324 int i;
6325 int rc;
6326 char *f;
6328 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
6329 if (rc != REP_PROTOCOL_SUCCESS)
6330 return (rc);
6332 f = strdup(name);
6333 if (f == NULL)
6334 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6336 (void) pthread_mutex_lock(&rc_pg_notify_lock);
6338 while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
6339 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
6341 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
6342 if (arr[i] == NULL)
6343 break;
6346 * Don't add name if it's already being tracked.
6348 if (strcmp(arr[i], f) == 0) {
6349 free(f);
6350 goto out;
6354 if (i == RC_NOTIFY_MAX_NAMES) {
6355 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6356 free(f);
6357 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6360 arr[i] = f;
6362 out:
6363 if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
6364 rc_notify_info_insert_locked(rnip);
6366 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6367 return (REP_PROTOCOL_SUCCESS);
6371 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
6373 return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
6377 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
6379 return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
6383 * Wait for and report an event of interest to rnip, a notification client
6386 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
6387 char *outp, size_t sz)
6389 rc_notify_t *np;
6390 rc_notify_t *me = &rnip->rni_notify;
6391 rc_node_t *nnp;
6392 rc_notify_delete_t *ndp;
6394 int am_first_info;
6396 if (sz > 0)
6397 outp[0] = 0;
6399 (void) pthread_mutex_lock(&rc_pg_notify_lock);
6401 while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
6402 RC_NOTIFY_ACTIVE) {
6404 * If I'm first on the notify list, it is my job to
6405 * clean up any notifications I pass by. I can't do that
6406 * if someone is blocking the list from removals, so I
6407 * have to wait until they have all drained.
6409 am_first_info = (uu_list_first(rc_notify_list) == me);
6410 if (am_first_info && rc_notify_in_use) {
6411 rnip->rni_waiters++;
6412 (void) pthread_cond_wait(&rc_pg_notify_cv,
6413 &rc_pg_notify_lock);
6414 rnip->rni_waiters--;
6415 continue;
6419 * Search the list for a node of interest.
6421 np = uu_list_next(rc_notify_list, me);
6422 while (np != NULL && !rc_notify_info_interested(rnip, np)) {
6423 rc_notify_t *next = uu_list_next(rc_notify_list, np);
6425 if (am_first_info) {
6426 if (np->rcn_info) {
6428 * Passing another client -- stop
6429 * cleaning up notifications
6431 am_first_info = 0;
6432 } else {
6433 rc_notify_remove_locked(np);
6436 np = next;
6440 * Nothing of interest -- wait for notification
6442 if (np == NULL) {
6443 rnip->rni_waiters++;
6444 (void) pthread_cond_wait(&rnip->rni_cv,
6445 &rc_pg_notify_lock);
6446 rnip->rni_waiters--;
6447 continue;
6451 * found something to report -- move myself after the
6452 * notification and process it.
6454 (void) uu_list_remove(rc_notify_list, me);
6455 (void) uu_list_insert_after(rc_notify_list, np, me);
6457 if ((ndp = np->rcn_delete) != NULL) {
6458 (void) strlcpy(outp, ndp->rnd_fmri, sz);
6459 if (am_first_info)
6460 rc_notify_remove_locked(np);
6461 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6462 rc_node_clear(out, 0);
6463 return (REP_PROTOCOL_SUCCESS);
6466 nnp = np->rcn_node;
6467 assert(nnp != NULL);
6470 * We can't bump nnp's reference count without grabbing its
6471 * lock, and rc_pg_notify_lock is a leaf lock. So we
6472 * temporarily block all removals to keep nnp from
6473 * disappearing.
6475 rc_notify_in_use++;
6476 assert(rc_notify_in_use > 0);
6477 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6479 rc_node_assign(out, nnp);
6481 (void) pthread_mutex_lock(&rc_pg_notify_lock);
6482 assert(rc_notify_in_use > 0);
6483 rc_notify_in_use--;
6485 if (am_first_info) {
6487 * While we had the lock dropped, another thread
6488 * may have also incremented rc_notify_in_use. We
6489 * need to make sure that we're back to 0 before
6490 * removing the node.
6492 while (rc_notify_in_use) {
6493 (void) pthread_cond_wait(&rc_pg_notify_cv,
6494 &rc_pg_notify_lock);
6496 rc_notify_remove_locked(np);
6498 if (rc_notify_in_use == 0)
6499 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
6500 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6502 return (REP_PROTOCOL_SUCCESS);
6505 * If we're the last one out, let people know it's clear.
6507 if (rnip->rni_waiters == 0)
6508 (void) pthread_cond_broadcast(&rnip->rni_cv);
6509 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6510 return (REP_PROTOCOL_DONE);
6513 static void
6514 rc_notify_info_reset(rc_notify_info_t *rnip)
6516 int i;
6518 (void) pthread_mutex_lock(&rc_pg_notify_lock);
6519 if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
6520 rc_notify_info_remove_locked(rnip);
6521 assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
6522 rnip->rni_flags |= RC_NOTIFY_EMPTYING;
6523 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6525 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
6526 if (rnip->rni_namelist[i] != NULL) {
6527 free((void *)rnip->rni_namelist[i]);
6528 rnip->rni_namelist[i] = NULL;
6530 if (rnip->rni_typelist[i] != NULL) {
6531 free((void *)rnip->rni_typelist[i]);
6532 rnip->rni_typelist[i] = NULL;
6536 (void) pthread_mutex_lock(&rc_pg_notify_lock);
6537 rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
6538 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
6541 void
6542 rc_notify_info_fini(rc_notify_info_t *rnip)
6544 rc_notify_info_reset(rnip);
6546 uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
6547 uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
6548 rc_notify_pool);