8738 svc/configd: variable 'event_id' set but not used
[unleashed.git] / usr / src / cmd / svc / configd / rc_node.c
bloba5b968c53cb542aface0ca0b78897ec0cb76958d
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
29 * rc_node.c - In-memory SCF object management
31 * This layer manages the in-memory cache (the Repository Cache) of SCF
32 * data. Read requests are usually satisfied from here, but may require
33 * load calls to the "object" layer. Modify requests always write-through
34 * to the object layer.
36 * SCF data comprises scopes, services, instances, snapshots, snaplevels,
37 * property groups, properties, and property values. All but the last are
38 * known here as "entities" and are represented by rc_node_t data
39 * structures. (Property values are kept in the rn_values member of the
40 * respective property, not as separate objects.) All entities besides
41 * the "localhost" scope have some entity as a parent, and therefore form
42 * a tree.
44 * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
45 * the "localhost" scope. The tree is filled in from the database on-demand
46 * by rc_node_fill_children().
48 * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
49 * lookup.
51 * Multiple threads may service client requests, so access to each
52 * rc_node_t is synchronized by its rn_lock member. Some fields are
53 * protected by bits in the rn_flags field instead, to support operations
54 * which need to drop rn_lock, for example to respect locking order. Such
55 * flags should be manipulated with the rc_node_{hold,rele}_flag()
56 * functions.
58 * We track references to nodes to tell when they can be free()d. rn_refs
59 * should be incremented with rc_node_hold() on the creation of client
60 * references (rc_node_ptr_t's and rc_iter_t's). rn_erefs ("ephemeral
61 * references") should be incremented when a pointer is read into a local
62 * variable of a thread, with rc_node_hold_ephemeral_locked(). This
63 * hasn't been fully implemented, however, so rc_node_rele() tolerates
64 * rn_erefs being 0. Some code which predates rn_erefs counts ephemeral
65 * references in rn_refs. Other references are tracked by the
66 * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
67 * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
69 * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
70 * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
71 * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
72 * etc.). Once you have locked an rc_node_t you must check its rn_flags for
73 * RC_NODE_DEAD before you can use it. This is usually done with the
74 * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
75 * functions & RC_NODE_*() macros), which fail if the object has died.
77 * When a transactional node (property group or snapshot) is updated,
78 * a new node takes the place of the old node in the global hash and the
79 * old node is hung off of the rn_former list of the new node. At the
80 * same time, all of its children have their rn_parent_ref pointer set,
81 * and any holds they have are reflected in the old node's rn_other_refs
82 * count. This is automatically kept up to date until the final reference
83 * to the subgraph is dropped, at which point the node is unrefed and
84 * destroyed, along with all of its children.
86 * Because name service lookups may take a long time and, more importantly
87 * may trigger additional accesses to the repository, perm_granted() must be
88 * called without holding any locks.
90 * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
91 * call via rc_node_setup_iter() to populate the rn_children uu_list of the
92 * rc_node_t * in question and a call to uu_list_walk_start() on that list. For
93 * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
94 * apropriate child.
96 * An ITER_START for an ENTITY_VALUE makes sure the node has its values
97 * filled, and sets up the iterator. An ITER_READ_VALUE just copies out
98 * the proper values and updates the offset information.
100 * To allow aliases, snapshots are implemented with a level of indirection.
101 * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
102 * snapshot.c which contains the authoritative snaplevel information. The
103 * snapid is "assigned" by rc_attach_snapshot().
105 * We provide the client layer with rc_node_ptr_t's to reference objects.
106 * Objects referred to by them are automatically held & released by
107 * rc_node_assign() & rc_node_clear(). The RC_NODE_PTR_*() macros are used at
108 * client.c entry points to read the pointers. They fetch the pointer to the
109 * object, return (from the function) if it is dead, and lock, hold, or hold
110 * a flag of the object.
114 * Permission checking is authorization-based: some operations may only
115 * proceed if the user has been assigned at least one of a set of
116 * authorization strings. The set of enabling authorizations depends on the
117 * operation and the target object. The set of authorizations assigned to
118 * a user is determined by an algorithm defined in libsecdb.
120 * The fastest way to decide whether the two sets intersect is by entering the
121 * strings into a hash table and detecting collisions, which takes linear time
122 * in the total size of the sets. Except for the authorization patterns which
123 * may be assigned to users, which without advanced pattern-matching
124 * algorithms will take O(n) in the number of enabling authorizations, per
125 * pattern.
127 * We can achieve some practical speed-ups by noting that if we enter all of
128 * the authorizations from one of the sets into the hash table we can merely
129 * check the elements of the second set for existence without adding them.
130 * This reduces memory requirements and hash table clutter. The enabling set
131 * is well suited for this because it is internal to configd (for now, at
132 * least). Combine this with short-circuiting and we can even minimize the
133 * number of queries to the security databases (user_attr & prof_attr).
135 * To force this usage onto clients we provide functions for adding
136 * authorizations to the enabling set of a permission context structure
137 * (perm_add_*()) and one to decide whether the the user associated with the
138 * current door call client possesses any of them (perm_granted()).
140 * At some point, a generic version of this should move to libsecdb.
142 * While entering the enabling strings into the hash table, we keep track
143 * of which is the most specific for use in generating auditing events.
144 * See the "Collecting the Authorization String" section of the "SMF Audit
145 * Events" block comment below.
149 * Composition is the combination of sets of properties. The sets are ordered
150 * and properties in higher sets obscure properties of the same name in lower
151 * sets. Here we present a composed view of an instance's properties as the
152 * union of its properties and its service's properties. Similarly the
153 * properties of snaplevels are combined to form a composed view of the
154 * properties of a snapshot (which should match the composed view of the
155 * properties of the instance when the snapshot was taken).
157 * In terms of the client interface, the client may request that a property
158 * group iterator for an instance or snapshot be composed. Property groups
159 * traversed by such an iterator may not have the target entity as a parent.
160 * Similarly, the properties traversed by a property iterator for those
161 * property groups may not have the property groups iterated as parents.
163 * Implementation requires that iterators for instances and snapshots be
164 * composition-savvy, and that we have a "composed property group" entity
165 * which represents the composition of a number of property groups. Iteration
166 * over "composed property groups" yields properties which may have different
167 * parents, but for all other operations a composed property group behaves
168 * like the top-most property group it represents.
170 * The implementation is based on the rn_cchain[] array of rc_node_t pointers
171 * in rc_node_t. For instances, the pointers point to the instance and its
172 * parent service. For snapshots they point to the child snaplevels, and for
173 * composed property groups they point to property groups. A composed
174 * iterator carries an index into rn_cchain[]. Thus most of the magic ends up
175 * int the rc_iter_*() code.
178 * SMF Audit Events:
179 * ================
181 * To maintain security, SMF generates audit events whenever
182 * privileged operations are attempted. See the System Administration
183 * Guide:Security Services answerbook for a discussion of the Solaris
184 * audit system.
186 * The SMF audit event codes are defined in adt_event.h by symbols
187 * starting with ADT_smf_ and are described in audit_event.txt. The
188 * audit record structures are defined in the SMF section of adt.xml.
189 * adt.xml is used to automatically generate adt_event.h which
190 * contains the definitions that we code to in this file. For the
191 * most part the audit events map closely to actions that you would
192 * perform with svcadm or svccfg, but there are some special cases
193 * which we'll discuss later.
195 * The software associated with SMF audit events falls into three
196 * categories:
197 * - collecting information to be written to the audit
198 * records
199 * - using the adt_* functions in
200 * usr/src/lib/libbsm/common/adt.c to generate the audit
201 * records.
202 * - handling special cases
204 * Collecting Information:
205 * ----------------------
207 * Most all of the audit events require the FMRI of the affected
208 * object and the authorization string that was used. The one
209 * exception is ADT_smf_annotation which we'll talk about later.
211 * Collecting the FMRI:
213 * The rc_node structure has a member called rn_fmri which points to
214 * its FMRI. This is initialized by a call to rc_node_build_fmri()
215 * when the node's parent is established. The reason for doing it
216 * at this time is that a node's FMRI is basically the concatenation
217 * of the parent's FMRI and the node's name with the appropriate
218 * decoration. rc_node_build_fmri() does this concatenation and
219 * decorating. It is called from rc_node_link_child() and
220 * rc_node_relink_child() where a node is linked to its parent.
222 * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI
223 * when it is needed. It returns rn_fmri if it is set. If the node
224 * is at the top level, however, rn_fmri won't be set because it was
225 * never linked to a parent. In this case,
226 * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on
227 * its node type and its name, rn_name.
229 * Collecting the Authorization String:
231 * Naturally, the authorization string is captured during the
232 * authorization checking process. Acceptable authorization strings
233 * are added to a permcheck_t hash table as noted in the section on
234 * permission checking above. Once all entries have been added to the
235 * hash table, perm_granted() is called. If the client is authorized,
236 * perm_granted() returns with pc_auth_string of the permcheck_t
237 * structure pointing to the authorization string.
239 * This works fine if the client is authorized, but what happens if
240 * the client is not authorized? We need to report the required
241 * authorization string. This is the authorization that would have
242 * been used if permission had been granted. perm_granted() will
243 * find no match, so it needs to decide which string in the hash
244 * table to use as the required authorization string. It needs to do
245 * this, because configd is still going to generate an event. A
246 * design decision was made to use the most specific authorization
247 * in the hash table. The pc_auth_type enum designates the
248 * specificity of an authorization string. For example, an
249 * authorization string that is declared in an instance PG is more
250 * specific than one that is declared in a service PG.
252 * The pc_add() function keeps track of the most specific
253 * authorization in the hash table. It does this using the
254 * pc_specific and pc_specific_type members of the permcheck
255 * structure. pc_add() updates these members whenever a more
256 * specific authorization string is added to the hash table. Thus, if
257 * an authorization match is not found, perm_granted() will return
258 * with pc_auth_string in the permcheck_t pointing to the string that
259 * is referenced by pc_specific.
261 * Generating the Audit Events:
262 * ===========================
264 * As the functions in this file process requests for clients of
265 * configd, they gather the information that is required for an audit
266 * event. Eventually, the request processing gets to the point where
267 * the authorization is rejected or to the point where the requested
268 * action was attempted. At these two points smf_audit_event() is
269 * called.
271 * smf_audit_event() takes 4 parameters:
272 * - the event ID which is one of the ADT_smf_* symbols from
273 * adt_event.h.
274 * - status to pass to adt_put_event()
275 * - return value to pass to adt_put_event()
276 * - the event data (see audit_event_data structure)
278 * All interactions with the auditing software require an audit
279 * session. We use one audit session per configd client. We keep
280 * track of the audit session in the repcache_client structure.
281 * smf_audit_event() calls get_audit_session() to get the session
282 * pointer.
284 * smf_audit_event() then calls adt_alloc_event() to allocate an
285 * adt_event_data union which is defined in adt_event.h, copies the
286 * data into the appropriate members of the union and calls
287 * adt_put_event() to generate the event.
289 * Special Cases:
290 * =============
292 * There are three major types of special cases:
294 * - gathering event information for each action in a
295 * transaction
296 * - Higher level events represented by special property
297 * group/property name combinations. Many of these are
298 * restarter actions.
299 * - ADT_smf_annotation event
301 * Processing Transaction Actions:
302 * ------------------------------
304 * A transaction can contain multiple actions to modify, create or
305 * delete one or more properties. We need to capture information so
306 * that we can generate an event for each property action. The
307 * transaction information is stored in a tx_commmit_data_t, and
308 * object.c provides accessor functions to retrieve data from this
309 * structure. rc_tx_commit() obtains a tx_commit_data_t by calling
310 * tx_commit_data_new() and passes this to object_tx_commit() to
311 * commit the transaction. Then we call generate_property_events() to
312 * generate an audit event for each property action.
314 * Special Properties:
315 * ------------------
317 * There are combinations of property group/property name that are special.
318 * They are special because they have specific meaning to startd. startd
319 * interprets them in a service-independent fashion.
320 * restarter_actions/refresh and general/enabled are two examples of these.
321 * A special event is generated for these properties in addition to the
322 * regular property event described in the previous section. The special
323 * properties are declared as an array of audit_special_prop_item
324 * structures at special_props_list in rc_node.c.
326 * In the previous section, we mentioned the
327 * generate_property_event() function that generates an event for
328 * every property action. Before generating the event,
329 * generate_property_event() calls special_property_event().
330 * special_property_event() checks to see if the action involves a
331 * special property. If it does, it generates a special audit
332 * event.
334 * ADT_smf_annotation event:
335 * ------------------------
337 * This is a special event unlike any other. It allows the svccfg
338 * program to store an annotation in the event log before a series
339 * of transactions is processed. It is used with the import and
340 * apply svccfg commands. svccfg uses the rep_protocol_annotation
341 * message to pass the operation (import or apply) and the file name
342 * to configd. The set_annotation() function in client.c stores
343 * these away in the a repcache_client structure. The address of
344 * this structure is saved in the thread_info structure.
346 * Before it generates any events, smf_audit_event() calls
347 * smf_annotation_event(). smf_annotation_event() calls
348 * client_annotation_needed() which is defined in client.c. If an
349 * annotation is needed client_annotation_needed() returns the
350 * operation and filename strings that were saved from the
351 * rep_protocol_annotation message. smf_annotation_event() then
352 * generates the ADT_smf_annotation event.
355 #include <assert.h>
356 #include <atomic.h>
357 #include <bsm/adt_event.h>
358 #include <errno.h>
359 #include <libuutil.h>
360 #include <libscf.h>
361 #include <libscf_priv.h>
362 #include <pthread.h>
363 #include <pwd.h>
364 #include <stdio.h>
365 #include <stdlib.h>
366 #include <strings.h>
367 #include <sys/types.h>
368 #include <syslog.h>
369 #include <unistd.h>
370 #include <secdb.h>
372 #include "configd.h"
374 #define AUTH_PREFIX "solaris.smf."
375 #define AUTH_MANAGE AUTH_PREFIX "manage"
376 #define AUTH_MODIFY AUTH_PREFIX "modify"
377 #define AUTH_MODIFY_PREFIX AUTH_MODIFY "."
378 #define AUTH_PG_ACTIONS SCF_PG_RESTARTER_ACTIONS
379 #define AUTH_PG_ACTIONS_TYPE SCF_PG_RESTARTER_ACTIONS_TYPE
380 #define AUTH_PG_GENERAL SCF_PG_GENERAL
381 #define AUTH_PG_GENERAL_TYPE SCF_PG_GENERAL_TYPE
382 #define AUTH_PG_GENERAL_OVR SCF_PG_GENERAL_OVR
383 #define AUTH_PG_GENERAL_OVR_TYPE SCF_PG_GENERAL_OVR_TYPE
384 #define AUTH_PROP_ACTION "action_authorization"
385 #define AUTH_PROP_ENABLED "enabled"
386 #define AUTH_PROP_MODIFY "modify_authorization"
387 #define AUTH_PROP_VALUE "value_authorization"
388 #define AUTH_PROP_READ "read_authorization"
390 #define MAX_VALID_CHILDREN 3
392 typedef struct rc_type_info {
393 uint32_t rt_type; /* matches array index */
394 uint32_t rt_num_ids;
395 uint32_t rt_name_flags;
396 uint32_t rt_valid_children[MAX_VALID_CHILDREN];
397 } rc_type_info_t;
399 #define RT_NO_NAME -1U
401 static rc_type_info_t rc_types[] = {
402 {REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
403 {REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
404 {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
405 {REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
406 {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
407 {REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
408 {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
409 {REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
410 {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
411 {REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
412 {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
413 {REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
414 {REP_PROTOCOL_ENTITY_PROPERTY}},
415 {REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
416 {REP_PROTOCOL_ENTITY_PROPERTY}},
417 {REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
418 {-1UL}
420 #define NUM_TYPES ((sizeof (rc_types) / sizeof (*rc_types)))
422 /* Element of a permcheck_t hash table. */
423 struct pc_elt {
424 struct pc_elt *pce_next;
425 char pce_auth[1];
429 * If an authorization fails, we must decide which of the elements in the
430 * permcheck hash table to use in the audit event. That is to say of all
431 * the strings in the hash table, we must choose one and use it in the audit
432 * event. It is desirable to use the most specific string in the audit
433 * event.
435 * The pc_auth_type specifies the types (sources) of authorization
436 * strings. The enum is ordered in increasing specificity.
438 typedef enum pc_auth_type {
439 PC_AUTH_NONE = 0, /* no auth string available. */
440 PC_AUTH_SMF, /* strings coded into SMF. */
441 PC_AUTH_SVC, /* strings specified in PG of a service. */
442 PC_AUTH_INST /* strings specified in PG of an instance. */
443 } pc_auth_type_t;
446 * The following enum is used to represent the results of the checks to see
447 * if the client has the appropriate permissions to perform an action.
449 typedef enum perm_status {
450 PERM_DENIED = 0, /* Permission denied. */
451 PERM_GRANTED, /* Client has authorizations. */
452 PERM_GONE, /* Door client went away. */
453 PERM_FAIL /* Generic failure. e.g. resources */
454 } perm_status_t;
456 /* An authorization set hash table. */
457 typedef struct {
458 struct pc_elt **pc_buckets;
459 uint_t pc_bnum; /* number of buckets */
460 uint_t pc_enum; /* number of elements */
461 struct pc_elt *pc_specific; /* most specific element */
462 pc_auth_type_t pc_specific_type; /* type of pc_specific */
463 char *pc_auth_string; /* authorization string */
464 /* for audit events */
465 } permcheck_t;
468 * Structure for holding audit event data. Not all events use all members
469 * of the structure.
471 typedef struct audit_event_data {
472 char *ed_auth; /* authorization string. */
473 char *ed_fmri; /* affected FMRI. */
474 char *ed_snapname; /* name of snapshot. */
475 char *ed_old_fmri; /* old fmri in attach case. */
476 char *ed_old_name; /* old snapshot in attach case. */
477 char *ed_type; /* prop. group or prop. type. */
478 char *ed_prop_value; /* property value. */
479 } audit_event_data_t;
482 * Pointer to function to do special processing to get audit event ID.
483 * Audit event IDs are defined in /usr/include/bsm/adt_event.h. Function
484 * returns 0 if ID successfully retrieved. Otherwise it returns -1.
486 typedef int (*spc_getid_fn_t)(tx_commit_data_t *, size_t, const char *,
487 au_event_t *);
488 static int general_enable_id(tx_commit_data_t *, size_t, const char *,
489 au_event_t *);
491 static uu_list_pool_t *rc_children_pool;
492 static uu_list_pool_t *rc_pg_notify_pool;
493 static uu_list_pool_t *rc_notify_pool;
494 static uu_list_pool_t *rc_notify_info_pool;
496 static rc_node_t *rc_scope;
498 static pthread_mutex_t rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
499 static pthread_cond_t rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
500 static uint_t rc_notify_in_use; /* blocks removals */
503 * Some combinations of property group/property name require a special
504 * audit event to be generated when there is a change.
505 * audit_special_prop_item_t is used to specify these special cases. The
506 * special_props_list array defines a list of these special properties.
508 typedef struct audit_special_prop_item {
509 const char *api_pg_name; /* property group name. */
510 const char *api_prop_name; /* property name. */
511 au_event_t api_event_id; /* event id or 0. */
512 spc_getid_fn_t api_event_func; /* function to get event id. */
513 } audit_special_prop_item_t;
516 * Native builds are done using the build machine's standard include
517 * files. These files may not yet have the definitions for the ADT_smf_*
518 * symbols. Thus, we do not compile this table when doing native builds.
520 #ifndef NATIVE_BUILD
522 * The following special_props_list array specifies property group/property
523 * name combinations that have specific meaning to startd. A special event
524 * is generated for these combinations in addition to the regular property
525 * event.
527 * At run time this array gets sorted. See the call to qsort(3C) in
528 * rc_node_init(). The array is sorted, so that bsearch(3C) can be used
529 * to do lookups.
531 static audit_special_prop_item_t special_props_list[] = {
532 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADED, ADT_smf_degrade,
533 NULL},
534 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADE_IMMEDIATE,
535 ADT_smf_immediate_degrade, NULL},
536 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_OFF, ADT_smf_clear, NULL},
537 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON,
538 ADT_smf_maintenance, NULL},
539 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMEDIATE,
540 ADT_smf_immediate_maintenance, NULL},
541 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMTEMP,
542 ADT_smf_immtmp_maintenance, NULL},
543 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_TEMPORARY,
544 ADT_smf_tmp_maintenance, NULL},
545 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_REFRESH, ADT_smf_refresh, NULL},
546 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTART, ADT_smf_restart, NULL},
547 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTORE, ADT_smf_clear, NULL},
548 {SCF_PG_OPTIONS, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
549 {SCF_PG_OPTIONS_OVR, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
550 {SCF_PG_GENERAL, SCF_PROPERTY_ENABLED, 0, general_enable_id},
551 {SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 0, general_enable_id}
553 #define SPECIAL_PROP_COUNT (sizeof (special_props_list) /\
554 sizeof (audit_special_prop_item_t))
555 #endif /* NATIVE_BUILD */
558 * We support an arbitrary number of clients interested in events for certain
559 * types of changes. Each client is represented by an rc_notify_info_t, and
560 * all clients are chained onto the rc_notify_info_list.
562 * The rc_notify_list is the global notification list. Each entry is of
563 * type rc_notify_t, which is embedded in one of three other structures:
565 * rc_node_t property group update notification
566 * rc_notify_delete_t object deletion notification
567 * rc_notify_info_t notification clients
569 * Which type of object is determined by which pointer in the rc_notify_t is
570 * non-NULL.
572 * New notifications and clients are added to the end of the list.
573 * Notifications no-one is interested in are never added to the list.
575 * Clients use their position in the list to track which notifications they
576 * have not yet reported. As they process notifications, they move forward
577 * in the list past them. There is always a client at the beginning of the
578 * list -- as it moves past notifications, it removes them from the list and
579 * cleans them up.
581 * The rc_pg_notify_lock protects all notification state. The rc_pg_notify_cv
582 * is used for global signalling, and each client has a cv which it waits for
583 * events of interest on.
585 * rc_notify_in_use is used to protect rc_notify_list from deletions when
586 * the rc_pg_notify_lock is dropped. Specifically, rc_notify_info_wait()
587 * must drop the lock to call rc_node_assign(), and then it reacquires the
588 * lock. Deletions from rc_notify_list during this period are not
589 * allowed. Insertions do not matter, because they are always done at the
590 * end of the list.
592 static uu_list_t *rc_notify_info_list;
593 static uu_list_t *rc_notify_list;
595 #define HASH_SIZE 512
596 #define HASH_MASK (HASH_SIZE - 1)
598 #pragma align 64(cache_hash)
599 static cache_bucket_t cache_hash[HASH_SIZE];
601 #define CACHE_BUCKET(h) (&cache_hash[(h) & HASH_MASK])
604 static void rc_node_no_client_refs(rc_node_t *np);
607 static uint32_t
608 rc_node_hash(rc_node_lookup_t *lp)
610 uint32_t type = lp->rl_type;
611 uint32_t backend = lp->rl_backend;
612 uint32_t mainid = lp->rl_main_id;
613 uint32_t *ids = lp->rl_ids;
615 rc_type_info_t *tp = &rc_types[type];
616 uint32_t num_ids;
617 uint32_t left;
618 uint32_t hash;
620 assert(backend == BACKEND_TYPE_NORMAL ||
621 backend == BACKEND_TYPE_NONPERSIST);
623 assert(type > 0 && type < NUM_TYPES);
624 num_ids = tp->rt_num_ids;
626 left = MAX_IDS - num_ids;
627 assert(num_ids <= MAX_IDS);
629 hash = type * 7 + mainid * 5 + backend;
631 while (num_ids-- > 0)
632 hash = hash * 11 + *ids++ * 7;
635 * the rest should be zeroed
637 while (left-- > 0)
638 assert(*ids++ == 0);
640 return (hash);
643 static int
644 rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
646 rc_node_lookup_t *r = &np->rn_id;
647 rc_type_info_t *tp;
648 uint32_t type;
649 uint32_t num_ids;
651 if (r->rl_main_id != l->rl_main_id)
652 return (0);
654 type = r->rl_type;
655 if (type != l->rl_type)
656 return (0);
658 assert(type > 0 && type < NUM_TYPES);
660 tp = &rc_types[r->rl_type];
661 num_ids = tp->rt_num_ids;
663 assert(num_ids <= MAX_IDS);
664 while (num_ids-- > 0)
665 if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
666 return (0);
668 return (1);
672 * Register an ephemeral reference to np. This should be done while both
673 * the persistent reference from which the np pointer was read is locked
674 * and np itself is locked. This guarantees that another thread which
675 * thinks it has the last reference will yield without destroying the
676 * node.
678 static void
679 rc_node_hold_ephemeral_locked(rc_node_t *np)
681 assert(MUTEX_HELD(&np->rn_lock));
683 ++np->rn_erefs;
687 * the "other" references on a node are maintained in an atomically
688 * updated refcount, rn_other_refs. This can be bumped from arbitrary
689 * context, and tracks references to a possibly out-of-date node's children.
691 * To prevent the node from disappearing between the final drop of
692 * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
693 * 0->1 transitions and decremented (with the node lock held) on 1->0
694 * transitions.
696 static void
697 rc_node_hold_other(rc_node_t *np)
699 if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
700 atomic_add_32(&np->rn_other_refs_held, 1);
701 assert(np->rn_other_refs_held > 0);
703 assert(np->rn_other_refs > 0);
707 * No node locks may be held
709 static void
710 rc_node_rele_other(rc_node_t *np)
712 assert(np->rn_other_refs > 0);
713 if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
714 (void) pthread_mutex_lock(&np->rn_lock);
715 assert(np->rn_other_refs_held > 0);
716 if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
717 np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) {
719 * This was the last client reference. Destroy
720 * any other references and free() the node.
722 rc_node_no_client_refs(np);
723 } else {
724 (void) pthread_mutex_unlock(&np->rn_lock);
729 static void
730 rc_node_hold_locked(rc_node_t *np)
732 assert(MUTEX_HELD(&np->rn_lock));
734 if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
735 rc_node_hold_other(np->rn_parent_ref);
736 np->rn_refs++;
737 assert(np->rn_refs > 0);
740 static void
741 rc_node_hold(rc_node_t *np)
743 (void) pthread_mutex_lock(&np->rn_lock);
744 rc_node_hold_locked(np);
745 (void) pthread_mutex_unlock(&np->rn_lock);
748 static void
749 rc_node_rele_locked(rc_node_t *np)
751 int unref = 0;
752 rc_node_t *par_ref = NULL;
754 assert(MUTEX_HELD(&np->rn_lock));
755 assert(np->rn_refs > 0);
757 if (--np->rn_refs == 0) {
758 if (np->rn_flags & RC_NODE_PARENT_REF)
759 par_ref = np->rn_parent_ref;
762 * Composed property groups are only as good as their
763 * references.
765 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
766 np->rn_flags |= RC_NODE_DEAD;
768 if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
769 np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
770 unref = 1;
773 if (unref) {
775 * This was the last client reference. Destroy any other
776 * references and free() the node.
778 rc_node_no_client_refs(np);
779 } else {
781 * rn_erefs can be 0 if we acquired the reference in
782 * a path which hasn't been updated to increment rn_erefs.
783 * When all paths which end here are updated, we should
784 * assert rn_erefs > 0 and always decrement it.
786 if (np->rn_erefs > 0)
787 --np->rn_erefs;
788 (void) pthread_mutex_unlock(&np->rn_lock);
791 if (par_ref != NULL)
792 rc_node_rele_other(par_ref);
795 void
796 rc_node_rele(rc_node_t *np)
798 (void) pthread_mutex_lock(&np->rn_lock);
799 rc_node_rele_locked(np);
802 static cache_bucket_t *
803 cache_hold(uint32_t h)
805 cache_bucket_t *bp = CACHE_BUCKET(h);
806 (void) pthread_mutex_lock(&bp->cb_lock);
807 return (bp);
810 static void
811 cache_release(cache_bucket_t *bp)
813 (void) pthread_mutex_unlock(&bp->cb_lock);
816 static rc_node_t *
817 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
819 uint32_t h = rc_node_hash(lp);
820 rc_node_t *np;
822 assert(MUTEX_HELD(&bp->cb_lock));
823 assert(bp == CACHE_BUCKET(h));
825 for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
826 if (np->rn_hash == h && rc_node_match(np, lp)) {
827 rc_node_hold(np);
828 return (np);
832 return (NULL);
835 static rc_node_t *
836 cache_lookup(rc_node_lookup_t *lp)
838 uint32_t h;
839 cache_bucket_t *bp;
840 rc_node_t *np;
842 h = rc_node_hash(lp);
843 bp = cache_hold(h);
845 np = cache_lookup_unlocked(bp, lp);
847 cache_release(bp);
849 return (np);
852 static void
853 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
855 assert(MUTEX_HELD(&bp->cb_lock));
856 assert(np->rn_hash == rc_node_hash(&np->rn_id));
857 assert(bp == CACHE_BUCKET(np->rn_hash));
859 assert(np->rn_hash_next == NULL);
861 np->rn_hash_next = bp->cb_head;
862 bp->cb_head = np;
865 static void
866 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
868 rc_node_t **npp;
870 assert(MUTEX_HELD(&bp->cb_lock));
871 assert(np->rn_hash == rc_node_hash(&np->rn_id));
872 assert(bp == CACHE_BUCKET(np->rn_hash));
874 for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
875 if (*npp == np)
876 break;
878 assert(*npp == np);
879 *npp = np->rn_hash_next;
880 np->rn_hash_next = NULL;
884 * verify that the 'parent' type can have a child typed 'child'
885 * Fails with
886 * _INVALID_TYPE - argument is invalid
887 * _TYPE_MISMATCH - parent type cannot have children of type child
889 static int
890 rc_check_parent_child(uint32_t parent, uint32_t child)
892 int idx;
893 uint32_t type;
895 if (parent == 0 || parent >= NUM_TYPES ||
896 child == 0 || child >= NUM_TYPES)
897 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
899 for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
900 type = rc_types[parent].rt_valid_children[idx];
901 if (type == child)
902 return (REP_PROTOCOL_SUCCESS);
905 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
909 * Fails with
910 * _INVALID_TYPE - type is invalid
911 * _BAD_REQUEST - name is an invalid name for a node of type type
914 rc_check_type_name(uint32_t type, const char *name)
916 if (type == 0 || type >= NUM_TYPES)
917 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
919 if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
920 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
922 return (REP_PROTOCOL_SUCCESS);
925 static int
926 rc_check_pgtype_name(const char *name)
928 if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
929 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
931 return (REP_PROTOCOL_SUCCESS);
935 * rc_node_free_fmri should be called whenever a node loses its parent.
936 * The reason is that the node's fmri string is built up by concatenating
937 * its name to the parent's fmri. Thus, when the node no longer has a
938 * parent, its fmri is no longer valid.
940 static void
941 rc_node_free_fmri(rc_node_t *np)
943 if (np->rn_fmri != NULL) {
944 free((void *)np->rn_fmri);
945 np->rn_fmri = NULL;
950 * Concatenate the appropriate separator and the FMRI element to the base
951 * FMRI string at fmri.
953 * Fails with
954 * _TRUNCATED Not enough room in buffer at fmri.
956 static int
957 rc_concat_fmri_element(
958 char *fmri, /* base fmri */
959 size_t bufsize, /* size of buf at fmri */
960 size_t *sz_out, /* receives result size. */
961 const char *element, /* element name to concat */
962 rep_protocol_entity_t type) /* type of element */
964 size_t actual;
965 const char *name = element;
966 int rc;
967 const char *separator;
969 if (bufsize > 0)
970 *sz_out = strlen(fmri);
971 else
972 *sz_out = 0;
974 switch (type) {
975 case REP_PROTOCOL_ENTITY_SCOPE:
976 if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) {
978 * No need to display scope information if we are
979 * in the local scope.
981 separator = SCF_FMRI_SVC_PREFIX;
982 name = NULL;
983 } else {
985 * Need to display scope information, because it is
986 * not the local scope.
988 separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX;
990 break;
991 case REP_PROTOCOL_ENTITY_SERVICE:
992 separator = SCF_FMRI_SERVICE_PREFIX;
993 break;
994 case REP_PROTOCOL_ENTITY_INSTANCE:
995 separator = SCF_FMRI_INSTANCE_PREFIX;
996 break;
997 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
998 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
999 separator = SCF_FMRI_PROPERTYGRP_PREFIX;
1000 break;
1001 case REP_PROTOCOL_ENTITY_PROPERTY:
1002 separator = SCF_FMRI_PROPERTY_PREFIX;
1003 break;
1004 case REP_PROTOCOL_ENTITY_VALUE:
1006 * A value does not have a separate FMRI from its property,
1007 * so there is nothing to concat.
1009 return (REP_PROTOCOL_SUCCESS);
1010 case REP_PROTOCOL_ENTITY_SNAPSHOT:
1011 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
1012 /* Snapshots do not have FMRIs, so there is nothing to do. */
1013 return (REP_PROTOCOL_SUCCESS);
1014 default:
1015 (void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n",
1016 __FILE__, __LINE__, type);
1017 abort(); /* Missing a case in switch if we get here. */
1020 /* Concatenate separator and element to the fmri buffer. */
1022 actual = strlcat(fmri, separator, bufsize);
1023 if (name != NULL) {
1024 if (actual < bufsize) {
1025 actual = strlcat(fmri, name, bufsize);
1026 } else {
1027 actual += strlen(name);
1030 if (actual < bufsize) {
1031 rc = REP_PROTOCOL_SUCCESS;
1032 } else {
1033 rc = REP_PROTOCOL_FAIL_TRUNCATED;
1035 *sz_out = actual;
1036 return (rc);
1040 * Get the FMRI for the node at np. The fmri will be placed in buf. On
1041 * success sz_out will be set to the size of the fmri in buf. If
1042 * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
1043 * of the buffer that would be required to avoid truncation.
1045 * Fails with
1046 * _TRUNCATED not enough room in buf for the FMRI.
1048 static int
1049 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize,
1050 size_t *sz_out)
1052 size_t fmri_len = 0;
1053 int r;
1055 if (bufsize > 0)
1056 *buf = 0;
1057 *sz_out = 0;
1059 if (np->rn_fmri == NULL) {
1061 * A NULL rn_fmri implies that this is a top level scope.
1062 * Child nodes will always have an rn_fmri established
1063 * because both rc_node_link_child() and
1064 * rc_node_relink_child() call rc_node_build_fmri(). In
1065 * this case, we'll just return our name preceded by the
1066 * appropriate FMRI decorations.
1068 assert(np->rn_parent == NULL);
1069 r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name,
1070 np->rn_id.rl_type);
1071 if (r != REP_PROTOCOL_SUCCESS)
1072 return (r);
1073 } else {
1074 /* We have an fmri, so return it. */
1075 fmri_len = strlcpy(buf, np->rn_fmri, bufsize);
1078 *sz_out = fmri_len;
1080 if (fmri_len >= bufsize)
1081 return (REP_PROTOCOL_FAIL_TRUNCATED);
1083 return (REP_PROTOCOL_SUCCESS);
1087 * Build an FMRI string for this node and save it in rn_fmri.
1089 * The basic strategy here is to get the fmri of our parent and then
1090 * concatenate the appropriate separator followed by our name. If our name
1091 * is null, the resulting fmri will just be a copy of the parent fmri.
1092 * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
1093 * set. Also the rn_lock for this node should be held.
1095 * Fails with
1096 * _NO_RESOURCES Could not allocate memory.
1098 static int
1099 rc_node_build_fmri(rc_node_t *np)
1101 size_t actual;
1102 char fmri[REP_PROTOCOL_FMRI_LEN];
1103 int rc;
1104 size_t sz = REP_PROTOCOL_FMRI_LEN;
1106 assert(MUTEX_HELD(&np->rn_lock));
1107 assert(np->rn_flags & RC_NODE_USING_PARENT);
1109 rc_node_free_fmri(np);
1111 rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual);
1112 assert(rc == REP_PROTOCOL_SUCCESS);
1114 if (np->rn_name != NULL) {
1115 rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name,
1116 np->rn_id.rl_type);
1117 assert(rc == REP_PROTOCOL_SUCCESS);
1118 np->rn_fmri = strdup(fmri);
1119 } else {
1120 np->rn_fmri = strdup(fmri);
1122 if (np->rn_fmri == NULL) {
1123 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1124 } else {
1125 rc = REP_PROTOCOL_SUCCESS;
1128 return (rc);
1132 * Get the FMRI of the node at np placing the result in fmri. Then
1133 * concatenate the additional element to fmri. The type variable indicates
1134 * the type of element, so that the appropriate separator can be
1135 * generated. size is the number of bytes in the buffer at fmri, and
1136 * sz_out receives the size of the generated string. If the result is
1137 * truncated, sz_out will receive the size of the buffer that would be
1138 * required to avoid truncation.
1140 * Fails with
1141 * _TRUNCATED Not enough room in buffer at fmri.
1143 static int
1144 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out,
1145 const char *element, rep_protocol_entity_t type)
1147 int rc;
1149 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) !=
1150 REP_PROTOCOL_SUCCESS) {
1151 return (rc);
1153 if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) !=
1154 REP_PROTOCOL_SUCCESS) {
1155 return (rc);
1158 return (REP_PROTOCOL_SUCCESS);
1161 static int
1162 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
1164 rc_node_t *nnp = np->rcn_node;
1165 int i;
1167 assert(MUTEX_HELD(&rc_pg_notify_lock));
1169 if (np->rcn_delete != NULL) {
1170 assert(np->rcn_info == NULL && np->rcn_node == NULL);
1171 return (1); /* everyone likes deletes */
1173 if (np->rcn_node == NULL) {
1174 assert(np->rcn_info != NULL || np->rcn_delete != NULL);
1175 return (0);
1177 assert(np->rcn_info == NULL);
1179 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
1180 if (rnip->rni_namelist[i] != NULL) {
1181 if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
1182 return (1);
1184 if (rnip->rni_typelist[i] != NULL) {
1185 if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
1186 return (1);
1189 return (0);
1192 static void
1193 rc_notify_insert_node(rc_node_t *nnp)
1195 rc_notify_t *np = &nnp->rn_notify;
1196 rc_notify_info_t *nip;
1197 int found = 0;
1199 assert(np->rcn_info == NULL);
1201 if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
1202 return;
1204 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1205 np->rcn_node = nnp;
1206 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1207 nip = uu_list_next(rc_notify_info_list, nip)) {
1208 if (rc_notify_info_interested(nip, np)) {
1209 (void) pthread_cond_broadcast(&nip->rni_cv);
1210 found++;
1213 if (found)
1214 (void) uu_list_insert_before(rc_notify_list, NULL, np);
1215 else
1216 np->rcn_node = NULL;
1218 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1221 static void
1222 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
1223 const char *instance, const char *pg)
1225 rc_notify_info_t *nip;
1227 uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
1228 rc_notify_pool);
1229 ndp->rnd_notify.rcn_delete = ndp;
1231 (void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
1232 "svc:/%s%s%s%s%s", service,
1233 (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
1234 (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
1237 * add to notification list, notify watchers
1239 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1240 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1241 nip = uu_list_next(rc_notify_info_list, nip))
1242 (void) pthread_cond_broadcast(&nip->rni_cv);
1243 (void) uu_list_insert_before(rc_notify_list, NULL, ndp);
1244 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1247 static void
1248 rc_notify_remove_node(rc_node_t *nnp)
1250 rc_notify_t *np = &nnp->rn_notify;
1252 assert(np->rcn_info == NULL);
1253 assert(!MUTEX_HELD(&nnp->rn_lock));
1255 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1256 while (np->rcn_node != NULL) {
1257 if (rc_notify_in_use) {
1258 (void) pthread_cond_wait(&rc_pg_notify_cv,
1259 &rc_pg_notify_lock);
1260 continue;
1262 (void) uu_list_remove(rc_notify_list, np);
1263 np->rcn_node = NULL;
1264 break;
1266 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1269 static void
1270 rc_notify_remove_locked(rc_notify_t *np)
1272 assert(MUTEX_HELD(&rc_pg_notify_lock));
1273 assert(rc_notify_in_use == 0);
1275 (void) uu_list_remove(rc_notify_list, np);
1276 if (np->rcn_node) {
1277 np->rcn_node = NULL;
1278 } else if (np->rcn_delete) {
1279 uu_free(np->rcn_delete);
1280 } else {
1281 assert(0); /* CAN'T HAPPEN */
1286 * Permission checking functions. See comment atop this file.
1288 #ifndef NATIVE_BUILD
1289 static permcheck_t *
1290 pc_create()
1292 permcheck_t *p;
1294 p = uu_zalloc(sizeof (*p));
1295 if (p == NULL)
1296 return (NULL);
1297 p->pc_bnum = 8; /* Normal case will only have 2 elts. */
1298 p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
1299 if (p->pc_buckets == NULL) {
1300 uu_free(p);
1301 return (NULL);
1304 p->pc_enum = 0;
1305 return (p);
1308 static void
1309 pc_free(permcheck_t *pcp)
1311 uint_t i;
1312 struct pc_elt *ep, *next;
1314 for (i = 0; i < pcp->pc_bnum; ++i) {
1315 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1316 next = ep->pce_next;
1317 free(ep);
1321 free(pcp->pc_buckets);
1322 free(pcp);
1325 static uint32_t
1326 pc_hash(const char *auth)
1328 uint32_t h = 0, g;
1329 const char *p;
1332 * Generic hash function from uts/common/os/modhash.c.
1334 for (p = auth; *p != '\0'; ++p) {
1335 h = (h << 4) + *p;
1336 g = (h & 0xf0000000);
1337 if (g != 0) {
1338 h ^= (g >> 24);
1339 h ^= g;
1343 return (h);
1346 static perm_status_t
1347 pc_exists(permcheck_t *pcp, const char *auth)
1349 uint32_t h;
1350 struct pc_elt *ep;
1352 h = pc_hash(auth);
1353 for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
1354 ep != NULL;
1355 ep = ep->pce_next) {
1356 if (strcmp(auth, ep->pce_auth) == 0) {
1357 pcp->pc_auth_string = ep->pce_auth;
1358 return (PERM_GRANTED);
1362 return (PERM_DENIED);
1365 static perm_status_t
1366 pc_match(permcheck_t *pcp, const char *pattern)
1368 uint_t i;
1369 struct pc_elt *ep;
1371 for (i = 0; i < pcp->pc_bnum; ++i) {
1372 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
1373 if (_auth_match(pattern, ep->pce_auth)) {
1374 pcp->pc_auth_string = ep->pce_auth;
1375 return (PERM_GRANTED);
1380 return (PERM_DENIED);
1383 static int
1384 pc_grow(permcheck_t *pcp)
1386 uint_t new_bnum, i, j;
1387 struct pc_elt **new_buckets;
1388 struct pc_elt *ep, *next;
1390 new_bnum = pcp->pc_bnum * 2;
1391 if (new_bnum < pcp->pc_bnum)
1392 /* Homey don't play that. */
1393 return (-1);
1395 new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
1396 if (new_buckets == NULL)
1397 return (-1);
1399 for (i = 0; i < pcp->pc_bnum; ++i) {
1400 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1401 next = ep->pce_next;
1402 j = pc_hash(ep->pce_auth) & (new_bnum - 1);
1403 ep->pce_next = new_buckets[j];
1404 new_buckets[j] = ep;
1408 uu_free(pcp->pc_buckets);
1409 pcp->pc_buckets = new_buckets;
1410 pcp->pc_bnum = new_bnum;
1412 return (0);
1415 static int
1416 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type)
1418 struct pc_elt *ep;
1419 uint_t i;
1421 ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
1422 if (ep == NULL)
1423 return (-1);
1425 /* Grow if pc_enum / pc_bnum > 3/4. */
1426 if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
1427 /* Failure is not a stopper; we'll try again next time. */
1428 (void) pc_grow(pcp);
1430 (void) strcpy(ep->pce_auth, auth);
1432 i = pc_hash(auth) & (pcp->pc_bnum - 1);
1433 ep->pce_next = pcp->pc_buckets[i];
1434 pcp->pc_buckets[i] = ep;
1436 if (auth_type > pcp->pc_specific_type) {
1437 pcp->pc_specific_type = auth_type;
1438 pcp->pc_specific = ep;
1441 ++pcp->pc_enum;
1443 return (0);
1447 * For the type of a property group, return the authorization which may be
1448 * used to modify it.
1450 static const char *
1451 perm_auth_for_pgtype(const char *pgtype)
1453 if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
1454 return (AUTH_MODIFY_PREFIX "method");
1455 else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
1456 return (AUTH_MODIFY_PREFIX "dependency");
1457 else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
1458 return (AUTH_MODIFY_PREFIX "application");
1459 else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
1460 return (AUTH_MODIFY_PREFIX "framework");
1461 else
1462 return (NULL);
1466 * Fails with
1467 * _NO_RESOURCES - out of memory
1469 static int
1470 perm_add_enabling_type(permcheck_t *pcp, const char *auth,
1471 pc_auth_type_t auth_type)
1473 return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS :
1474 REP_PROTOCOL_FAIL_NO_RESOURCES);
1478 * Fails with
1479 * _NO_RESOURCES - out of memory
1481 static int
1482 perm_add_enabling(permcheck_t *pcp, const char *auth)
1484 return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF));
1487 /* Note that perm_add_enabling_values() is defined below. */
1490 * perm_granted() returns PERM_GRANTED if the current door caller has one of
1491 * the enabling authorizations in pcp, PERM_DENIED if it doesn't, PERM_GONE if
1492 * the door client went away and PERM_FAIL if an error (usually lack of
1493 * memory) occurs. auth_cb() checks each and every authorizations as
1494 * enumerated by _enum_auths. When we find a result other than PERM_DENIED,
1495 * we short-cut the enumeration and return non-zero.
1498 static int
1499 auth_cb(const char *auth, void *ctxt, void *vres)
1501 permcheck_t *pcp = ctxt;
1502 int *pret = vres;
1504 if (strchr(auth, KV_WILDCHAR) == NULL)
1505 *pret = pc_exists(pcp, auth);
1506 else
1507 *pret = pc_match(pcp, auth);
1509 if (*pret != PERM_DENIED)
1510 return (1);
1512 * If we failed, choose the most specific auth string for use in
1513 * the audit event.
1515 assert(pcp->pc_specific != NULL);
1516 pcp->pc_auth_string = pcp->pc_specific->pce_auth;
1518 return (0); /* Tells that we need to continue */
1521 static perm_status_t
1522 perm_granted(permcheck_t *pcp)
1524 ucred_t *uc;
1526 perm_status_t ret = PERM_DENIED;
1527 uid_t uid;
1528 struct passwd pw;
1529 char pwbuf[1024]; /* XXX should be NSS_BUFLEN_PASSWD */
1531 /* Get the uid */
1532 if ((uc = get_ucred()) == NULL) {
1533 if (errno == EINVAL) {
1535 * Client is no longer waiting for our response (e.g.,
1536 * it received a signal & resumed with EINTR).
1537 * Punting with door_return() would be nice but we
1538 * need to release all of the locks & references we
1539 * hold. And we must report failure to the client
1540 * layer to keep it from ignoring retries as
1541 * already-done (idempotency & all that). None of the
1542 * error codes fit very well, so we might as well
1543 * force the return of _PERMISSION_DENIED since we
1544 * couldn't determine the user.
1546 return (PERM_GONE);
1548 assert(0);
1549 abort();
1552 uid = ucred_geteuid(uc);
1553 assert(uid != (uid_t)-1);
1555 if (getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf)) == NULL) {
1556 return (PERM_FAIL);
1560 * Enumerate all the auths defined for the user and return the
1561 * result in ret.
1563 if (_enum_auths(pw.pw_name, auth_cb, pcp, &ret) < 0)
1564 return (PERM_FAIL);
1566 return (ret);
1569 static int
1570 map_granted_status(perm_status_t status, permcheck_t *pcp,
1571 char **match_auth)
1573 int rc;
1575 *match_auth = NULL;
1576 switch (status) {
1577 case PERM_DENIED:
1578 *match_auth = strdup(pcp->pc_auth_string);
1579 if (*match_auth == NULL)
1580 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1581 else
1582 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1583 break;
1584 case PERM_GRANTED:
1585 *match_auth = strdup(pcp->pc_auth_string);
1586 if (*match_auth == NULL)
1587 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1588 else
1589 rc = REP_PROTOCOL_SUCCESS;
1590 break;
1591 case PERM_GONE:
1592 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1593 break;
1594 case PERM_FAIL:
1595 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1596 break;
1598 return (rc);
1600 #endif /* NATIVE_BUILD */
1603 * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1604 * serialize certain actions, and to wait for certain operations to complete
1606 * The waiting flags are:
1607 * RC_NODE_CHILDREN_CHANGING
1608 * The child list is being built or changed (due to creation
1609 * or deletion). All iterators pause.
1611 * RC_NODE_USING_PARENT
1612 * Someone is actively using the parent pointer, so we can't
1613 * be removed from the parent list.
1615 * RC_NODE_CREATING_CHILD
1616 * A child is being created -- locks out other creations, to
1617 * prevent insert-insert races.
1619 * RC_NODE_IN_TX
1620 * This object is running a transaction.
1622 * RC_NODE_DYING
1623 * This node might be dying. Always set as a set, using
1624 * RC_NODE_DYING_FLAGS (which is everything but
1625 * RC_NODE_USING_PARENT)
1627 static int
1628 rc_node_hold_flag(rc_node_t *np, uint32_t flag)
1630 assert(MUTEX_HELD(&np->rn_lock));
1631 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1633 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
1634 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1636 if (np->rn_flags & RC_NODE_DEAD)
1637 return (0);
1639 np->rn_flags |= flag;
1640 return (1);
1643 static void
1644 rc_node_rele_flag(rc_node_t *np, uint32_t flag)
1646 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1647 assert(MUTEX_HELD(&np->rn_lock));
1648 assert((np->rn_flags & flag) == flag);
1649 np->rn_flags &= ~flag;
1650 (void) pthread_cond_broadcast(&np->rn_cv);
1654 * wait until a particular flag has cleared. Fails if the object dies.
1656 static int
1657 rc_node_wait_flag(rc_node_t *np, uint32_t flag)
1659 assert(MUTEX_HELD(&np->rn_lock));
1660 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
1661 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1663 return (!(np->rn_flags & RC_NODE_DEAD));
1667 * On entry, np's lock must be held, and this thread must be holding
1668 * RC_NODE_USING_PARENT. On return, both of them are released.
1670 * If the return value is NULL, np either does not have a parent, or
1671 * the parent has been marked DEAD.
1673 * If the return value is non-NULL, it is the parent of np, and both
1674 * its lock and the requested flags are held.
1676 static rc_node_t *
1677 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
1679 rc_node_t *pp;
1681 assert(MUTEX_HELD(&np->rn_lock));
1682 assert(np->rn_flags & RC_NODE_USING_PARENT);
1684 if ((pp = np->rn_parent) == NULL) {
1685 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1686 (void) pthread_mutex_unlock(&np->rn_lock);
1687 return (NULL);
1689 (void) pthread_mutex_unlock(&np->rn_lock);
1691 (void) pthread_mutex_lock(&pp->rn_lock);
1692 (void) pthread_mutex_lock(&np->rn_lock);
1693 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1694 (void) pthread_mutex_unlock(&np->rn_lock);
1696 if (!rc_node_hold_flag(pp, flag)) {
1697 (void) pthread_mutex_unlock(&pp->rn_lock);
1698 return (NULL);
1700 return (pp);
1703 rc_node_t *
1704 rc_node_alloc(void)
1706 rc_node_t *np = uu_zalloc(sizeof (*np));
1708 if (np == NULL)
1709 return (NULL);
1711 (void) pthread_mutex_init(&np->rn_lock, NULL);
1712 (void) pthread_cond_init(&np->rn_cv, NULL);
1714 np->rn_children = uu_list_create(rc_children_pool, np, 0);
1715 np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
1717 uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
1719 uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
1720 rc_notify_pool);
1722 return (np);
1725 void
1726 rc_node_destroy(rc_node_t *np)
1728 int i;
1730 if (np->rn_flags & RC_NODE_UNREFED)
1731 return; /* being handled elsewhere */
1733 assert(np->rn_refs == 0 && np->rn_other_refs == 0);
1734 assert(np->rn_former == NULL);
1736 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
1737 /* Release the holds from rc_iter_next(). */
1738 for (i = 0; i < COMPOSITION_DEPTH; ++i) {
1739 /* rn_cchain[i] may be NULL for empty snapshots. */
1740 if (np->rn_cchain[i] != NULL)
1741 rc_node_rele(np->rn_cchain[i]);
1745 if (np->rn_name != NULL)
1746 free((void *)np->rn_name);
1747 np->rn_name = NULL;
1748 if (np->rn_type != NULL)
1749 free((void *)np->rn_type);
1750 np->rn_type = NULL;
1751 if (np->rn_values != NULL)
1752 object_free_values(np->rn_values, np->rn_valtype,
1753 np->rn_values_count, np->rn_values_size);
1754 np->rn_values = NULL;
1755 rc_node_free_fmri(np);
1757 if (np->rn_snaplevel != NULL)
1758 rc_snaplevel_rele(np->rn_snaplevel);
1759 np->rn_snaplevel = NULL;
1761 uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
1763 uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
1764 rc_notify_pool);
1766 assert(uu_list_first(np->rn_children) == NULL);
1767 uu_list_destroy(np->rn_children);
1768 uu_list_destroy(np->rn_pg_notify_list);
1770 (void) pthread_mutex_destroy(&np->rn_lock);
1771 (void) pthread_cond_destroy(&np->rn_cv);
1773 uu_free(np);
1777 * Link in a child node.
1779 * Because of the lock ordering, cp has to already be in the hash table with
1780 * its lock dropped before we get it. To prevent anyone from noticing that
1781 * it is parentless, the creation code sets the RC_NODE_USING_PARENT. Once
1782 * we've linked it in, we release the flag.
1784 static void
1785 rc_node_link_child(rc_node_t *np, rc_node_t *cp)
1787 assert(!MUTEX_HELD(&np->rn_lock));
1788 assert(!MUTEX_HELD(&cp->rn_lock));
1790 (void) pthread_mutex_lock(&np->rn_lock);
1791 (void) pthread_mutex_lock(&cp->rn_lock);
1792 assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
1793 (cp->rn_flags & RC_NODE_USING_PARENT));
1795 assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
1796 REP_PROTOCOL_SUCCESS);
1798 cp->rn_parent = np;
1799 cp->rn_flags |= RC_NODE_IN_PARENT;
1800 (void) uu_list_insert_before(np->rn_children, NULL, cp);
1801 (void) rc_node_build_fmri(cp);
1803 (void) pthread_mutex_unlock(&np->rn_lock);
1805 rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
1806 (void) pthread_mutex_unlock(&cp->rn_lock);
1810 * Sets the rn_parent_ref field of all the children of np to pp -- always
1811 * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1813 * This is used when we mark a node RC_NODE_OLD, so that when the object and
1814 * its children are no longer referenced, they will all be deleted as a unit.
1816 static void
1817 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
1819 rc_node_t *cp;
1821 assert(MUTEX_HELD(&np->rn_lock));
1823 for (cp = uu_list_first(np->rn_children); cp != NULL;
1824 cp = uu_list_next(np->rn_children, cp)) {
1825 (void) pthread_mutex_lock(&cp->rn_lock);
1826 if (cp->rn_flags & RC_NODE_PARENT_REF) {
1827 assert(cp->rn_parent_ref == pp);
1828 } else {
1829 assert(cp->rn_parent_ref == NULL);
1831 cp->rn_flags |= RC_NODE_PARENT_REF;
1832 cp->rn_parent_ref = pp;
1833 if (cp->rn_refs != 0)
1834 rc_node_hold_other(pp);
1836 rc_node_setup_parent_ref(cp, pp); /* recurse */
1837 (void) pthread_mutex_unlock(&cp->rn_lock);
1842 * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1844 * Requirements:
1845 * *no* node locks may be held.
1846 * pp must be held with RC_NODE_CHILDREN_CHANGING
1847 * newp and np must be held with RC_NODE_IN_TX
1848 * np must be marked RC_NODE_IN_PARENT, newp must not be
1849 * np must be marked RC_NODE_OLD
1851 * Afterwards:
1852 * pp's RC_NODE_CHILDREN_CHANGING is dropped
1853 * newp and np's RC_NODE_IN_TX is dropped
1854 * newp->rn_former = np;
1855 * newp is RC_NODE_IN_PARENT, np is not.
1856 * interested notify subscribers have been notified of newp's new status.
1858 static void
1859 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
1861 cache_bucket_t *bp;
1863 * First, swap np and nnp in the cache. newp's RC_NODE_IN_TX flag
1864 * keeps rc_node_update() from seeing it until we are done.
1866 bp = cache_hold(newp->rn_hash);
1867 cache_remove_unlocked(bp, np);
1868 cache_insert_unlocked(bp, newp);
1869 cache_release(bp);
1872 * replace np with newp in pp's list, and attach it to newp's rn_former
1873 * link.
1875 (void) pthread_mutex_lock(&pp->rn_lock);
1876 assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
1878 (void) pthread_mutex_lock(&newp->rn_lock);
1879 assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
1880 assert(newp->rn_flags & RC_NODE_IN_TX);
1882 (void) pthread_mutex_lock(&np->rn_lock);
1883 assert(np->rn_flags & RC_NODE_IN_PARENT);
1884 assert(np->rn_flags & RC_NODE_OLD);
1885 assert(np->rn_flags & RC_NODE_IN_TX);
1887 newp->rn_parent = pp;
1888 newp->rn_flags |= RC_NODE_IN_PARENT;
1891 * Note that we carefully add newp before removing np -- this
1892 * keeps iterators on the list from missing us.
1894 (void) uu_list_insert_after(pp->rn_children, np, newp);
1895 (void) rc_node_build_fmri(newp);
1896 (void) uu_list_remove(pp->rn_children, np);
1899 * re-set np
1901 newp->rn_former = np;
1902 np->rn_parent = NULL;
1903 np->rn_flags &= ~RC_NODE_IN_PARENT;
1904 np->rn_flags |= RC_NODE_ON_FORMER;
1906 rc_notify_insert_node(newp);
1908 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
1909 (void) pthread_mutex_unlock(&pp->rn_lock);
1910 rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
1911 (void) pthread_mutex_unlock(&newp->rn_lock);
1912 rc_node_setup_parent_ref(np, np);
1913 rc_node_rele_flag(np, RC_NODE_IN_TX);
1914 (void) pthread_mutex_unlock(&np->rn_lock);
1918 * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
1919 * 'cp' is used (and returned) if the node does not yet exist. If it does
1920 * exist, 'cp' is freed, and the existent node is returned instead.
1922 rc_node_t *
1923 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1924 rc_node_t *pp)
1926 rc_node_t *np;
1927 cache_bucket_t *bp;
1928 uint32_t h = rc_node_hash(nip);
1930 assert(cp->rn_refs == 0);
1932 bp = cache_hold(h);
1933 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1934 cache_release(bp);
1937 * make sure it matches our expectations
1939 (void) pthread_mutex_lock(&np->rn_lock);
1940 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1941 assert(np->rn_parent == pp);
1942 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1943 assert(strcmp(np->rn_name, name) == 0);
1944 assert(np->rn_type == NULL);
1945 assert(np->rn_flags & RC_NODE_IN_PARENT);
1946 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1948 (void) pthread_mutex_unlock(&np->rn_lock);
1950 rc_node_destroy(cp);
1951 return (np);
1955 * No one is there -- setup & install the new node.
1957 np = cp;
1958 rc_node_hold(np);
1959 np->rn_id = *nip;
1960 np->rn_hash = h;
1961 np->rn_name = strdup(name);
1963 np->rn_flags |= RC_NODE_USING_PARENT;
1965 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
1966 #if COMPOSITION_DEPTH == 2
1967 np->rn_cchain[0] = np;
1968 np->rn_cchain[1] = pp;
1969 #else
1970 #error This code must be updated.
1971 #endif
1974 cache_insert_unlocked(bp, np);
1975 cache_release(bp); /* we are now visible */
1977 rc_node_link_child(pp, np);
1979 return (np);
1983 * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
1984 * 'cp' is used (and returned) if the node does not yet exist. If it does
1985 * exist, 'cp' is freed, and the existent node is returned instead.
1987 rc_node_t *
1988 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1989 uint32_t snap_id, rc_node_t *pp)
1991 rc_node_t *np;
1992 cache_bucket_t *bp;
1993 uint32_t h = rc_node_hash(nip);
1995 assert(cp->rn_refs == 0);
1997 bp = cache_hold(h);
1998 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1999 cache_release(bp);
2002 * make sure it matches our expectations
2004 (void) pthread_mutex_lock(&np->rn_lock);
2005 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2006 assert(np->rn_parent == pp);
2007 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2008 assert(strcmp(np->rn_name, name) == 0);
2009 assert(np->rn_type == NULL);
2010 assert(np->rn_flags & RC_NODE_IN_PARENT);
2011 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2013 (void) pthread_mutex_unlock(&np->rn_lock);
2015 rc_node_destroy(cp);
2016 return (np);
2020 * No one is there -- create a new node.
2022 np = cp;
2023 rc_node_hold(np);
2024 np->rn_id = *nip;
2025 np->rn_hash = h;
2026 np->rn_name = strdup(name);
2027 np->rn_snapshot_id = snap_id;
2029 np->rn_flags |= RC_NODE_USING_PARENT;
2031 cache_insert_unlocked(bp, np);
2032 cache_release(bp); /* we are now visible */
2034 rc_node_link_child(pp, np);
2036 return (np);
2040 * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists. 'cp' is
2041 * used (and returned) if the node does not yet exist. If it does exist, 'cp'
2042 * is freed, and the existent node is returned instead.
2044 rc_node_t *
2045 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
2046 rc_snaplevel_t *lvl, rc_node_t *pp)
2048 rc_node_t *np;
2049 cache_bucket_t *bp;
2050 uint32_t h = rc_node_hash(nip);
2052 assert(cp->rn_refs == 0);
2054 bp = cache_hold(h);
2055 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2056 cache_release(bp);
2059 * make sure it matches our expectations
2061 (void) pthread_mutex_lock(&np->rn_lock);
2062 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2063 assert(np->rn_parent == pp);
2064 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2065 assert(np->rn_name == NULL);
2066 assert(np->rn_type == NULL);
2067 assert(np->rn_flags & RC_NODE_IN_PARENT);
2068 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2070 (void) pthread_mutex_unlock(&np->rn_lock);
2072 rc_node_destroy(cp);
2073 return (np);
2077 * No one is there -- create a new node.
2079 np = cp;
2080 rc_node_hold(np); /* released in snapshot_fill_children() */
2081 np->rn_id = *nip;
2082 np->rn_hash = h;
2084 rc_snaplevel_hold(lvl);
2085 np->rn_snaplevel = lvl;
2087 np->rn_flags |= RC_NODE_USING_PARENT;
2089 cache_insert_unlocked(bp, np);
2090 cache_release(bp); /* we are now visible */
2092 /* Add this snaplevel to the snapshot's composition chain. */
2093 assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
2094 pp->rn_cchain[lvl->rsl_level_num - 1] = np;
2096 rc_node_link_child(pp, np);
2098 return (np);
2102 * Returns NULL if strdup() fails.
2104 rc_node_t *
2105 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2106 const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
2108 rc_node_t *np;
2109 cache_bucket_t *bp;
2111 uint32_t h = rc_node_hash(nip);
2112 bp = cache_hold(h);
2113 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2114 cache_release(bp);
2117 * make sure it matches our expectations (don't check
2118 * the generation number or parent, since someone could
2119 * have gotten a transaction through while we weren't
2120 * looking)
2122 (void) pthread_mutex_lock(&np->rn_lock);
2123 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2124 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2125 assert(strcmp(np->rn_name, name) == 0);
2126 assert(strcmp(np->rn_type, type) == 0);
2127 assert(np->rn_pgflags == flags);
2128 assert(np->rn_flags & RC_NODE_IN_PARENT);
2129 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2131 (void) pthread_mutex_unlock(&np->rn_lock);
2133 rc_node_destroy(cp);
2134 return (np);
2137 np = cp;
2138 rc_node_hold(np); /* released in fill_pg_callback() */
2139 np->rn_id = *nip;
2140 np->rn_hash = h;
2141 np->rn_name = strdup(name);
2142 if (np->rn_name == NULL) {
2143 rc_node_rele(np);
2144 return (NULL);
2146 np->rn_type = strdup(type);
2147 if (np->rn_type == NULL) {
2148 free((void *)np->rn_name);
2149 rc_node_rele(np);
2150 return (NULL);
2152 np->rn_pgflags = flags;
2153 np->rn_gen_id = gen_id;
2155 np->rn_flags |= RC_NODE_USING_PARENT;
2157 cache_insert_unlocked(bp, np);
2158 cache_release(bp); /* we are now visible */
2160 rc_node_link_child(pp, np);
2162 return (np);
2165 #if COMPOSITION_DEPTH == 2
2167 * Initialize a "composed property group" which represents the composition of
2168 * property groups pg1 & pg2. It is ephemeral: once created & returned for an
2169 * ITER_READ request, keeping it out of cache_hash and any child lists
2170 * prevents it from being looked up. Operations besides iteration are passed
2171 * through to pg1.
2173 * pg1 & pg2 should be held before entering this function. They will be
2174 * released in rc_node_destroy().
2176 static int
2177 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
2179 if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
2180 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2182 cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
2183 cpg->rn_name = strdup(pg1->rn_name);
2184 if (cpg->rn_name == NULL)
2185 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2187 cpg->rn_cchain[0] = pg1;
2188 cpg->rn_cchain[1] = pg2;
2190 return (REP_PROTOCOL_SUCCESS);
2192 #else
2193 #error This code must be updated.
2194 #endif
2197 * Fails with _NO_RESOURCES.
2200 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
2201 const char *name, rep_protocol_value_type_t type,
2202 const char *vals, size_t count, size_t size)
2204 rc_node_t *np;
2205 cache_bucket_t *bp;
2207 uint32_t h = rc_node_hash(nip);
2208 bp = cache_hold(h);
2209 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2210 cache_release(bp);
2212 * make sure it matches our expectations
2214 (void) pthread_mutex_lock(&np->rn_lock);
2215 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2216 assert(np->rn_parent == pp);
2217 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2218 assert(strcmp(np->rn_name, name) == 0);
2219 assert(np->rn_valtype == type);
2220 assert(np->rn_values_count == count);
2221 assert(np->rn_values_size == size);
2222 assert(vals == NULL ||
2223 memcmp(np->rn_values, vals, size) == 0);
2224 assert(np->rn_flags & RC_NODE_IN_PARENT);
2225 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2227 rc_node_rele_locked(np);
2228 object_free_values(vals, type, count, size);
2229 return (REP_PROTOCOL_SUCCESS);
2233 * No one is there -- create a new node.
2235 np = rc_node_alloc();
2236 if (np == NULL) {
2237 cache_release(bp);
2238 object_free_values(vals, type, count, size);
2239 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2241 np->rn_id = *nip;
2242 np->rn_hash = h;
2243 np->rn_name = strdup(name);
2244 if (np->rn_name == NULL) {
2245 cache_release(bp);
2246 object_free_values(vals, type, count, size);
2247 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2250 np->rn_valtype = type;
2251 np->rn_values = vals;
2252 np->rn_values_count = count;
2253 np->rn_values_size = size;
2255 np->rn_flags |= RC_NODE_USING_PARENT;
2257 cache_insert_unlocked(bp, np);
2258 cache_release(bp); /* we are now visible */
2260 rc_node_link_child(pp, np);
2262 return (REP_PROTOCOL_SUCCESS);
2266 * This function implements a decision table to determine the event ID for
2267 * changes to the enabled (SCF_PROPERTY_ENABLED) property. The event ID is
2268 * determined by the value of the first property in the command specified
2269 * by cmd_no and the name of the property group. Here is the decision
2270 * table:
2272 * Property Group Name
2273 * Property ------------------------------------------
2274 * Value SCF_PG_GENERAL SCF_PG_GENERAL_OVR
2275 * -------- -------------- ------------------
2276 * "0" ADT_smf_disable ADT_smf_tmp_disable
2277 * "1" ADT_smf_enable ADT_smf_tmp_enable
2279 * This function is called by special_property_event through a function
2280 * pointer in the special_props_list array.
2282 * Since the ADT_smf_* symbols may not be defined in the build machine's
2283 * include files, this function is not compiled when doing native builds.
2285 #ifndef NATIVE_BUILD
2286 static int
2287 general_enable_id(tx_commit_data_t *tx_data, size_t cmd_no, const char *pg,
2288 au_event_t *event_id)
2290 const char *value;
2291 uint32_t nvalues;
2292 int enable;
2295 * First, check property value.
2297 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
2298 return (-1);
2299 if (nvalues == 0)
2300 return (-1);
2301 if (tx_cmd_value(tx_data, cmd_no, 0, &value) != REP_PROTOCOL_SUCCESS)
2302 return (-1);
2303 if (strcmp(value, "0") == 0) {
2304 enable = 0;
2305 } else if (strcmp(value, "1") == 0) {
2306 enable = 1;
2307 } else {
2308 return (-1);
2312 * Now check property group name.
2314 if (strcmp(pg, SCF_PG_GENERAL) == 0) {
2315 *event_id = enable ? ADT_smf_enable : ADT_smf_disable;
2316 return (0);
2317 } else if (strcmp(pg, SCF_PG_GENERAL_OVR) == 0) {
2318 *event_id = enable ? ADT_smf_tmp_enable : ADT_smf_tmp_disable;
2319 return (0);
2321 return (-1);
2323 #endif /* NATIVE_BUILD */
2326 * This function compares two audit_special_prop_item_t structures
2327 * represented by item1 and item2. It returns an integer greater than 0 if
2328 * item1 is greater than item2. It returns 0 if they are equal and an
2329 * integer less than 0 if item1 is less than item2. api_prop_name and
2330 * api_pg_name are the key fields for sorting.
2332 * This function is suitable for calls to bsearch(3C) and qsort(3C).
2334 static int
2335 special_prop_compare(const void *item1, const void *item2)
2337 const audit_special_prop_item_t *a = (audit_special_prop_item_t *)item1;
2338 const audit_special_prop_item_t *b = (audit_special_prop_item_t *)item2;
2339 int r;
2341 r = strcmp(a->api_prop_name, b->api_prop_name);
2342 if (r == 0) {
2344 * Primary keys are the same, so check the secondary key.
2346 r = strcmp(a->api_pg_name, b->api_pg_name);
2348 return (r);
2352 rc_node_init(void)
2354 rc_node_t *np;
2355 cache_bucket_t *bp;
2357 rc_children_pool = uu_list_pool_create("rc_children_pool",
2358 sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
2359 NULL, UU_LIST_POOL_DEBUG);
2361 rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
2362 sizeof (rc_node_pg_notify_t),
2363 offsetof(rc_node_pg_notify_t, rnpn_node),
2364 NULL, UU_LIST_POOL_DEBUG);
2366 rc_notify_pool = uu_list_pool_create("rc_notify_pool",
2367 sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
2368 NULL, UU_LIST_POOL_DEBUG);
2370 rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
2371 sizeof (rc_notify_info_t),
2372 offsetof(rc_notify_info_t, rni_list_node),
2373 NULL, UU_LIST_POOL_DEBUG);
2375 if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
2376 rc_notify_pool == NULL || rc_notify_info_pool == NULL)
2377 uu_die("out of memory");
2379 rc_notify_list = uu_list_create(rc_notify_pool,
2380 &rc_notify_list, 0);
2382 rc_notify_info_list = uu_list_create(rc_notify_info_pool,
2383 &rc_notify_info_list, 0);
2385 if (rc_notify_list == NULL || rc_notify_info_list == NULL)
2386 uu_die("out of memory");
2389 * Sort the special_props_list array so that it can be searched
2390 * with bsearch(3C).
2392 * The special_props_list array is not compiled into the native
2393 * build code, so there is no need to call qsort if NATIVE_BUILD is
2394 * defined.
2396 #ifndef NATIVE_BUILD
2397 qsort(special_props_list, SPECIAL_PROP_COUNT,
2398 sizeof (special_props_list[0]), special_prop_compare);
2399 #endif /* NATIVE_BUILD */
2401 if ((np = rc_node_alloc()) == NULL)
2402 uu_die("out of memory");
2404 rc_node_hold(np);
2405 np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
2406 np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
2407 np->rn_hash = rc_node_hash(&np->rn_id);
2408 np->rn_name = "localhost";
2410 bp = cache_hold(np->rn_hash);
2411 cache_insert_unlocked(bp, np);
2412 cache_release(bp);
2414 rc_scope = np;
2415 return (1);
2419 * Fails with
2420 * _INVALID_TYPE - type is invalid
2421 * _TYPE_MISMATCH - np doesn't carry children of type type
2422 * _DELETED - np has been deleted
2423 * _NO_RESOURCES
2425 static int
2426 rc_node_fill_children(rc_node_t *np, uint32_t type)
2428 int rc;
2430 assert(MUTEX_HELD(&np->rn_lock));
2432 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
2433 REP_PROTOCOL_SUCCESS)
2434 return (rc);
2436 if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
2437 return (REP_PROTOCOL_FAIL_DELETED);
2439 if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
2440 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2441 return (REP_PROTOCOL_SUCCESS);
2444 (void) pthread_mutex_unlock(&np->rn_lock);
2445 rc = object_fill_children(np);
2446 (void) pthread_mutex_lock(&np->rn_lock);
2448 if (rc == REP_PROTOCOL_SUCCESS) {
2449 np->rn_flags |= RC_NODE_HAS_CHILDREN;
2451 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2453 return (rc);
2457 * Returns
2458 * _INVALID_TYPE - type is invalid
2459 * _TYPE_MISMATCH - np doesn't carry children of type type
2460 * _DELETED - np has been deleted
2461 * _NO_RESOURCES
2462 * _SUCCESS - if *cpp is not NULL, it is held
2464 static int
2465 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
2466 rc_node_t **cpp)
2468 int ret;
2469 rc_node_t *cp;
2471 assert(MUTEX_HELD(&np->rn_lock));
2472 assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
2474 ret = rc_node_fill_children(np, type);
2475 if (ret != REP_PROTOCOL_SUCCESS)
2476 return (ret);
2478 for (cp = uu_list_first(np->rn_children);
2479 cp != NULL;
2480 cp = uu_list_next(np->rn_children, cp)) {
2481 if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
2482 break;
2485 if (cp != NULL)
2486 rc_node_hold(cp);
2487 *cpp = cp;
2489 return (REP_PROTOCOL_SUCCESS);
2492 static int rc_node_parent(rc_node_t *, rc_node_t **);
2495 * Returns
2496 * _INVALID_TYPE - type is invalid
2497 * _DELETED - np or an ancestor has been deleted
2498 * _NOT_FOUND - no ancestor of specified type exists
2499 * _SUCCESS - *app is held
2501 static int
2502 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app)
2504 int ret;
2505 rc_node_t *parent, *np_orig;
2507 if (type >= REP_PROTOCOL_ENTITY_MAX)
2508 return (REP_PROTOCOL_FAIL_INVALID_TYPE);
2510 np_orig = np;
2512 while (np->rn_id.rl_type > type) {
2513 ret = rc_node_parent(np, &parent);
2514 if (np != np_orig)
2515 rc_node_rele(np);
2516 if (ret != REP_PROTOCOL_SUCCESS)
2517 return (ret);
2518 np = parent;
2521 if (np->rn_id.rl_type == type) {
2522 *app = parent;
2523 return (REP_PROTOCOL_SUCCESS);
2526 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2529 #ifndef NATIVE_BUILD
2531 * If the propname property exists in pg, and it is of type string, add its
2532 * values as authorizations to pcp. pg must not be locked on entry, and it is
2533 * returned unlocked. Returns
2534 * _DELETED - pg was deleted
2535 * _NO_RESOURCES
2536 * _NOT_FOUND - pg has no property named propname
2537 * _SUCCESS
2539 static int
2540 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2542 rc_node_t *prop;
2543 int result;
2545 uint_t count;
2546 const char *cp;
2548 assert(!MUTEX_HELD(&pg->rn_lock));
2549 assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
2551 (void) pthread_mutex_lock(&pg->rn_lock);
2552 result = rc_node_find_named_child(pg, propname,
2553 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
2554 (void) pthread_mutex_unlock(&pg->rn_lock);
2555 if (result != REP_PROTOCOL_SUCCESS) {
2556 switch (result) {
2557 case REP_PROTOCOL_FAIL_DELETED:
2558 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2559 return (result);
2561 case REP_PROTOCOL_FAIL_INVALID_TYPE:
2562 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
2563 default:
2564 bad_error("rc_node_find_named_child", result);
2568 if (prop == NULL)
2569 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2571 /* rn_valtype is immutable, so no locking. */
2572 if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
2573 rc_node_rele(prop);
2574 return (REP_PROTOCOL_SUCCESS);
2577 (void) pthread_mutex_lock(&prop->rn_lock);
2578 for (count = prop->rn_values_count, cp = prop->rn_values;
2579 count > 0;
2580 --count) {
2581 result = perm_add_enabling_type(pcp, cp,
2582 (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST :
2583 PC_AUTH_SVC);
2584 if (result != REP_PROTOCOL_SUCCESS)
2585 break;
2587 cp = strchr(cp, '\0') + 1;
2590 rc_node_rele_locked(prop);
2592 return (result);
2596 * Assuming that ent is a service or instance node, if the pgname property
2597 * group has type pgtype, and it has a propname property with string type, add
2598 * its values as authorizations to pcp. If pgtype is NULL, it is not checked.
2599 * Returns
2600 * _SUCCESS
2601 * _DELETED - ent was deleted
2602 * _NO_RESOURCES - no resources
2603 * _NOT_FOUND - ent does not have pgname pg or propname property
2605 static int
2606 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
2607 const char *pgtype, const char *propname)
2609 int r;
2610 rc_node_t *pg;
2612 assert(!MUTEX_HELD(&ent->rn_lock));
2614 (void) pthread_mutex_lock(&ent->rn_lock);
2615 r = rc_node_find_named_child(ent, pgname,
2616 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
2617 (void) pthread_mutex_unlock(&ent->rn_lock);
2619 switch (r) {
2620 case REP_PROTOCOL_SUCCESS:
2621 break;
2623 case REP_PROTOCOL_FAIL_DELETED:
2624 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2625 return (r);
2627 default:
2628 bad_error("rc_node_find_named_child", r);
2631 if (pg == NULL)
2632 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2634 if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
2635 r = perm_add_pg_prop_values(pcp, pg, propname);
2636 switch (r) {
2637 case REP_PROTOCOL_FAIL_DELETED:
2638 r = REP_PROTOCOL_FAIL_NOT_FOUND;
2639 break;
2641 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2642 case REP_PROTOCOL_SUCCESS:
2643 case REP_PROTOCOL_FAIL_NOT_FOUND:
2644 break;
2646 default:
2647 bad_error("perm_add_pg_prop_values", r);
2651 rc_node_rele(pg);
2653 return (r);
2657 * If pg has a property named propname, and is string typed, add its values as
2658 * authorizations to pcp. If pg has no such property, and its parent is an
2659 * instance, walk up to the service and try doing the same with the property
2660 * of the same name from the property group of the same name. Returns
2661 * _SUCCESS
2662 * _NO_RESOURCES
2663 * _DELETED - pg (or an ancestor) was deleted
2665 static int
2666 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2668 int r;
2669 char pgname[REP_PROTOCOL_NAME_LEN + 1];
2670 rc_node_t *svc;
2671 size_t sz;
2673 r = perm_add_pg_prop_values(pcp, pg, propname);
2675 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2676 return (r);
2678 assert(!MUTEX_HELD(&pg->rn_lock));
2680 if (pg->rn_id.rl_ids[ID_INSTANCE] == 0)
2681 return (REP_PROTOCOL_SUCCESS);
2683 sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
2684 assert(sz < sizeof (pgname));
2687 * If pg is a child of an instance or snapshot, we want to compose the
2688 * authorization property with the service's (if it exists). The
2689 * snapshot case applies only to read_authorization. In all other
2690 * cases, the pg's parent will be the instance.
2692 r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc);
2693 if (r != REP_PROTOCOL_SUCCESS) {
2694 assert(r == REP_PROTOCOL_FAIL_DELETED);
2695 return (r);
2697 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
2699 r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
2701 rc_node_rele(svc);
2703 if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
2704 r = REP_PROTOCOL_SUCCESS;
2706 return (r);
2710 * Call perm_add_enabling_values() for the "action_authorization" property of
2711 * the "general" property group of inst. Returns
2712 * _DELETED - inst (or an ancestor) was deleted
2713 * _NO_RESOURCES
2714 * _SUCCESS
2716 static int
2717 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
2719 int r;
2720 rc_node_t *svc;
2722 assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
2724 r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
2725 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2727 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2728 return (r);
2730 r = rc_node_parent(inst, &svc);
2731 if (r != REP_PROTOCOL_SUCCESS) {
2732 assert(r == REP_PROTOCOL_FAIL_DELETED);
2733 return (r);
2736 r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
2737 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2739 return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
2741 #endif /* NATIVE_BUILD */
2743 void
2744 rc_node_ptr_init(rc_node_ptr_t *out)
2746 out->rnp_node = NULL;
2747 out->rnp_auth_string = NULL;
2748 out->rnp_authorized = RC_AUTH_UNKNOWN;
2749 out->rnp_deleted = 0;
2752 void
2753 rc_node_ptr_free_mem(rc_node_ptr_t *npp)
2755 if (npp->rnp_auth_string != NULL) {
2756 free((void *)npp->rnp_auth_string);
2757 npp->rnp_auth_string = NULL;
2761 static void
2762 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
2764 rc_node_t *cur = out->rnp_node;
2765 if (val != NULL)
2766 rc_node_hold(val);
2767 out->rnp_node = val;
2768 if (cur != NULL) {
2769 NODE_LOCK(cur);
2772 * Register the ephemeral reference created by reading
2773 * out->rnp_node into cur. Note that the persistent
2774 * reference we're destroying is locked by the client
2775 * layer.
2777 rc_node_hold_ephemeral_locked(cur);
2779 rc_node_rele_locked(cur);
2781 out->rnp_authorized = RC_AUTH_UNKNOWN;
2782 rc_node_ptr_free_mem(out);
2783 out->rnp_deleted = 0;
2786 void
2787 rc_node_clear(rc_node_ptr_t *out, int deleted)
2789 rc_node_assign(out, NULL);
2790 out->rnp_deleted = deleted;
2793 void
2794 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
2796 rc_node_assign(out, val->rnp_node);
2800 * rc_node_check()/RC_NODE_CHECK()
2801 * generic "entry" checks, run before the use of an rc_node pointer.
2803 * Fails with
2804 * _NOT_SET
2805 * _DELETED
2807 static int
2808 rc_node_check_and_lock(rc_node_t *np)
2810 int result = REP_PROTOCOL_SUCCESS;
2811 if (np == NULL)
2812 return (REP_PROTOCOL_FAIL_NOT_SET);
2814 (void) pthread_mutex_lock(&np->rn_lock);
2815 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2816 result = REP_PROTOCOL_FAIL_DELETED;
2817 (void) pthread_mutex_unlock(&np->rn_lock);
2820 return (result);
2824 * Fails with
2825 * _NOT_SET - ptr is reset
2826 * _DELETED - node has been deleted
2828 static rc_node_t *
2829 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
2831 rc_node_t *np = npp->rnp_node;
2832 if (np == NULL) {
2833 if (npp->rnp_deleted)
2834 *res = REP_PROTOCOL_FAIL_DELETED;
2835 else
2836 *res = REP_PROTOCOL_FAIL_NOT_SET;
2837 return (NULL);
2840 (void) pthread_mutex_lock(&np->rn_lock);
2841 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2842 (void) pthread_mutex_unlock(&np->rn_lock);
2843 rc_node_clear(npp, 1);
2844 *res = REP_PROTOCOL_FAIL_DELETED;
2845 return (NULL);
2847 return (np);
2850 #define RC_NODE_CHECK_AND_LOCK(n) { \
2851 int rc__res; \
2852 if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2853 return (rc__res); \
2856 #define RC_NODE_CHECK(n) { \
2857 RC_NODE_CHECK_AND_LOCK(n); \
2858 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2861 #define RC_NODE_CHECK_AND_HOLD(n) { \
2862 RC_NODE_CHECK_AND_LOCK(n); \
2863 rc_node_hold_locked(n); \
2864 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2867 #define RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) { \
2868 int rc__res; \
2869 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL) \
2870 return (rc__res); \
2873 #define RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, mem) { \
2874 int rc__res; \
2875 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == \
2876 NULL) { \
2877 if ((mem) != NULL) \
2878 free((mem)); \
2879 return (rc__res); \
2883 #define RC_NODE_PTR_GET_CHECK(np, npp) { \
2884 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2885 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2888 #define RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) { \
2889 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2890 rc_node_hold_locked(np); \
2891 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2894 #define HOLD_FLAG_OR_RETURN(np, flag) { \
2895 assert(MUTEX_HELD(&(np)->rn_lock)); \
2896 assert(!((np)->rn_flags & RC_NODE_DEAD)); \
2897 if (!rc_node_hold_flag((np), flag)) { \
2898 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2899 return (REP_PROTOCOL_FAIL_DELETED); \
2903 #define HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) { \
2904 assert(MUTEX_HELD(&(np)->rn_lock)); \
2905 if (!rc_node_hold_flag((np), flag)) { \
2906 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2907 assert((np) == (npp)->rnp_node); \
2908 rc_node_clear(npp, 1); \
2909 if ((mem) != NULL) \
2910 free((mem)); \
2911 return (REP_PROTOCOL_FAIL_DELETED); \
2916 rc_local_scope(uint32_t type, rc_node_ptr_t *out)
2918 if (type != REP_PROTOCOL_ENTITY_SCOPE) {
2919 rc_node_clear(out, 0);
2920 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2924 * the main scope never gets destroyed
2926 rc_node_assign(out, rc_scope);
2928 return (REP_PROTOCOL_SUCCESS);
2932 * Fails with
2933 * _NOT_SET - npp is not set
2934 * _DELETED - the node npp pointed at has been deleted
2935 * _TYPE_MISMATCH - type is not _SCOPE
2936 * _NOT_FOUND - scope has no parent
2938 static int
2939 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
2941 rc_node_t *np;
2943 rc_node_clear(out, 0);
2945 RC_NODE_PTR_GET_CHECK(np, npp);
2947 if (type != REP_PROTOCOL_ENTITY_SCOPE)
2948 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2950 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2953 static int rc_node_pg_check_read_protect(rc_node_t *);
2956 * Fails with
2957 * _NOT_SET
2958 * _DELETED
2959 * _NOT_APPLICABLE
2960 * _NOT_FOUND
2961 * _BAD_REQUEST
2962 * _TRUNCATED
2963 * _NO_RESOURCES
2966 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
2967 size_t *sz_out)
2969 size_t actual;
2970 rc_node_t *np;
2972 assert(sz == *sz_out);
2974 RC_NODE_PTR_GET_CHECK(np, npp);
2976 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2977 np = np->rn_cchain[0];
2978 RC_NODE_CHECK(np);
2981 switch (answertype) {
2982 case RP_ENTITY_NAME_NAME:
2983 if (np->rn_name == NULL)
2984 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2985 actual = strlcpy(buf, np->rn_name, sz);
2986 break;
2987 case RP_ENTITY_NAME_PGTYPE:
2988 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2989 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2990 actual = strlcpy(buf, np->rn_type, sz);
2991 break;
2992 case RP_ENTITY_NAME_PGFLAGS:
2993 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2994 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2995 actual = snprintf(buf, sz, "%d", np->rn_pgflags);
2996 break;
2997 case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
2998 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
2999 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3000 actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
3001 break;
3002 case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
3003 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3004 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3005 actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
3006 break;
3007 case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
3008 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3009 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3010 if (np->rn_snaplevel->rsl_instance == NULL)
3011 return (REP_PROTOCOL_FAIL_NOT_FOUND);
3012 actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
3013 break;
3014 case RP_ENTITY_NAME_PGREADPROT:
3016 int ret;
3018 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3019 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3020 ret = rc_node_pg_check_read_protect(np);
3021 assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3022 switch (ret) {
3023 case REP_PROTOCOL_FAIL_PERMISSION_DENIED:
3024 actual = snprintf(buf, sz, "1");
3025 break;
3026 case REP_PROTOCOL_SUCCESS:
3027 actual = snprintf(buf, sz, "0");
3028 break;
3029 default:
3030 return (ret);
3032 break;
3034 default:
3035 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3037 if (actual >= sz)
3038 return (REP_PROTOCOL_FAIL_TRUNCATED);
3040 *sz_out = actual;
3041 return (REP_PROTOCOL_SUCCESS);
3045 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
3047 rc_node_t *np;
3049 RC_NODE_PTR_GET_CHECK(np, npp);
3051 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
3052 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3054 *out = np->rn_valtype;
3056 return (REP_PROTOCOL_SUCCESS);
3060 * Get np's parent. If np is deleted, returns _DELETED. Otherwise puts a hold
3061 * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
3063 static int
3064 rc_node_parent(rc_node_t *np, rc_node_t **out)
3066 rc_node_t *pnp;
3067 rc_node_t *np_orig;
3069 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3070 RC_NODE_CHECK_AND_LOCK(np);
3071 } else {
3072 np = np->rn_cchain[0];
3073 RC_NODE_CHECK_AND_LOCK(np);
3076 np_orig = np;
3077 rc_node_hold_locked(np); /* simplifies the remainder */
3079 for (;;) {
3080 if (!rc_node_wait_flag(np,
3081 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
3082 rc_node_rele_locked(np);
3083 return (REP_PROTOCOL_FAIL_DELETED);
3086 if (!(np->rn_flags & RC_NODE_OLD))
3087 break;
3089 rc_node_rele_locked(np);
3090 np = cache_lookup(&np_orig->rn_id);
3091 assert(np != np_orig);
3093 if (np == NULL)
3094 goto deleted;
3095 (void) pthread_mutex_lock(&np->rn_lock);
3098 /* guaranteed to succeed without dropping the lock */
3099 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3100 (void) pthread_mutex_unlock(&np->rn_lock);
3101 *out = NULL;
3102 rc_node_rele(np);
3103 return (REP_PROTOCOL_FAIL_DELETED);
3106 assert(np->rn_parent != NULL);
3107 pnp = np->rn_parent;
3108 (void) pthread_mutex_unlock(&np->rn_lock);
3110 (void) pthread_mutex_lock(&pnp->rn_lock);
3111 (void) pthread_mutex_lock(&np->rn_lock);
3112 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3113 (void) pthread_mutex_unlock(&np->rn_lock);
3115 rc_node_hold_locked(pnp);
3117 (void) pthread_mutex_unlock(&pnp->rn_lock);
3119 rc_node_rele(np);
3120 *out = pnp;
3121 return (REP_PROTOCOL_SUCCESS);
3123 deleted:
3124 rc_node_rele(np);
3125 return (REP_PROTOCOL_FAIL_DELETED);
3129 * Fails with
3130 * _NOT_SET
3131 * _DELETED
3133 static int
3134 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
3136 rc_node_t *np;
3138 RC_NODE_PTR_GET_CHECK(np, npp);
3140 return (rc_node_parent(np, out));
3144 * Fails with
3145 * _NOT_SET - npp is not set
3146 * _DELETED - the node npp pointed at has been deleted
3147 * _TYPE_MISMATCH - npp's node's parent is not of type type
3149 * If npp points to a scope, can also fail with
3150 * _NOT_FOUND - scope has no parent
3153 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
3155 rc_node_t *pnp;
3156 int rc;
3158 if (npp->rnp_node != NULL &&
3159 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
3160 return (rc_scope_parent_scope(npp, type, out));
3162 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
3163 rc_node_clear(out, 0);
3164 return (rc);
3167 if (type != pnp->rn_id.rl_type) {
3168 rc_node_rele(pnp);
3169 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3172 rc_node_assign(out, pnp);
3173 rc_node_rele(pnp);
3175 return (REP_PROTOCOL_SUCCESS);
3179 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
3181 rc_node_t *pnp;
3182 int rc;
3184 if (npp->rnp_node != NULL &&
3185 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
3186 *type_out = REP_PROTOCOL_ENTITY_SCOPE;
3187 return (REP_PROTOCOL_SUCCESS);
3190 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
3191 return (rc);
3193 *type_out = pnp->rn_id.rl_type;
3195 rc_node_rele(pnp);
3197 return (REP_PROTOCOL_SUCCESS);
3201 * Fails with
3202 * _INVALID_TYPE - type is invalid
3203 * _TYPE_MISMATCH - np doesn't carry children of type type
3204 * _DELETED - np has been deleted
3205 * _NOT_FOUND - no child with that name/type combo found
3206 * _NO_RESOURCES
3207 * _BACKEND_ACCESS
3210 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
3211 rc_node_ptr_t *outp)
3213 rc_node_t *np, *cp;
3214 rc_node_t *child = NULL;
3215 int ret, idx;
3217 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3218 if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
3219 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3220 ret = rc_node_find_named_child(np, name, type, &child);
3221 } else {
3222 (void) pthread_mutex_unlock(&np->rn_lock);
3223 ret = REP_PROTOCOL_SUCCESS;
3224 for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
3225 cp = np->rn_cchain[idx];
3226 if (cp == NULL)
3227 break;
3228 RC_NODE_CHECK_AND_LOCK(cp);
3229 ret = rc_node_find_named_child(cp, name, type,
3230 &child);
3231 (void) pthread_mutex_unlock(&cp->rn_lock);
3233 * loop only if we succeeded, but no child of
3234 * the correct name was found.
3236 if (ret != REP_PROTOCOL_SUCCESS ||
3237 child != NULL)
3238 break;
3240 (void) pthread_mutex_lock(&np->rn_lock);
3243 (void) pthread_mutex_unlock(&np->rn_lock);
3245 if (ret == REP_PROTOCOL_SUCCESS) {
3246 rc_node_assign(outp, child);
3247 if (child != NULL)
3248 rc_node_rele(child);
3249 else
3250 ret = REP_PROTOCOL_FAIL_NOT_FOUND;
3251 } else {
3252 rc_node_assign(outp, NULL);
3254 return (ret);
3258 rc_node_update(rc_node_ptr_t *npp)
3260 cache_bucket_t *bp;
3261 rc_node_t *np = npp->rnp_node;
3262 rc_node_t *nnp;
3263 rc_node_t *cpg = NULL;
3265 if (np != NULL &&
3266 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3268 * If we're updating a composed property group, actually
3269 * update the top-level property group & return the
3270 * appropriate value. But leave *nnp pointing at us.
3272 cpg = np;
3273 np = np->rn_cchain[0];
3276 RC_NODE_CHECK(np);
3278 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
3279 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
3280 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3282 for (;;) {
3283 bp = cache_hold(np->rn_hash);
3284 nnp = cache_lookup_unlocked(bp, &np->rn_id);
3285 if (nnp == NULL) {
3286 cache_release(bp);
3287 rc_node_clear(npp, 1);
3288 return (REP_PROTOCOL_FAIL_DELETED);
3291 * grab the lock before dropping the cache bucket, so
3292 * that no one else can sneak in
3294 (void) pthread_mutex_lock(&nnp->rn_lock);
3295 cache_release(bp);
3297 if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
3298 !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
3299 break;
3301 rc_node_rele_locked(nnp);
3305 * If it is dead, we want to update it so that it will continue to
3306 * report being dead.
3308 if (nnp->rn_flags & RC_NODE_DEAD) {
3309 (void) pthread_mutex_unlock(&nnp->rn_lock);
3310 if (nnp != np && cpg == NULL)
3311 rc_node_assign(npp, nnp); /* updated */
3312 rc_node_rele(nnp);
3313 return (REP_PROTOCOL_FAIL_DELETED);
3316 assert(!(nnp->rn_flags & RC_NODE_OLD));
3317 (void) pthread_mutex_unlock(&nnp->rn_lock);
3319 if (nnp != np && cpg == NULL)
3320 rc_node_assign(npp, nnp); /* updated */
3322 rc_node_rele(nnp);
3324 return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
3328 * does a generic modification check, for creation, deletion, and snapshot
3329 * management only. Property group transactions have different checks.
3331 * The string returned to *match_auth must be freed.
3333 static perm_status_t
3334 rc_node_modify_permission_check(char **match_auth)
3336 permcheck_t *pcp;
3337 perm_status_t granted = PERM_GRANTED;
3338 int rc;
3340 *match_auth = NULL;
3341 #ifdef NATIVE_BUILD
3342 if (!client_is_privileged()) {
3343 granted = PERM_DENIED;
3345 return (granted);
3346 #else
3347 if (is_main_repository == 0)
3348 return (PERM_GRANTED);
3349 pcp = pc_create();
3350 if (pcp != NULL) {
3351 rc = perm_add_enabling(pcp, AUTH_MODIFY);
3353 if (rc == REP_PROTOCOL_SUCCESS) {
3354 granted = perm_granted(pcp);
3356 if ((granted == PERM_GRANTED) ||
3357 (granted == PERM_DENIED)) {
3359 * Copy off the authorization
3360 * string before freeing pcp.
3362 *match_auth =
3363 strdup(pcp->pc_auth_string);
3364 if (*match_auth == NULL)
3365 granted = PERM_FAIL;
3367 } else {
3368 granted = PERM_FAIL;
3371 pc_free(pcp);
3372 } else {
3373 granted = PERM_FAIL;
3376 return (granted);
3377 #endif /* NATIVE_BUILD */
3381 * Native builds are done to create svc.configd-native. This program runs
3382 * only on the Solaris build machines to create the seed repository, and it
3383 * is compiled against the build machine's header files. The ADT_smf_*
3384 * symbols may not be defined in these header files. For this reason
3385 * smf_annotation_event(), smf_audit_event() and special_property_event()
3386 * are not compiled for native builds.
3388 #ifndef NATIVE_BUILD
3391 * This function generates an annotation audit event if one has been setup.
3392 * Annotation events should only be generated immediately before the audit
3393 * record from the first attempt to modify the repository from a client
3394 * which has requested an annotation.
3396 static void
3397 smf_annotation_event(int status, int return_val)
3399 adt_session_data_t *session;
3400 adt_event_data_t *event = NULL;
3401 char file[MAXPATHLEN];
3402 char operation[REP_PROTOCOL_NAME_LEN];
3404 /* Don't audit if we're using an alternate repository. */
3405 if (is_main_repository == 0)
3406 return;
3408 if (client_annotation_needed(operation, sizeof (operation), file,
3409 sizeof (file)) == 0) {
3410 return;
3412 if (file[0] == 0) {
3413 (void) strlcpy(file, "NO FILE", sizeof (file));
3415 if (operation[0] == 0) {
3416 (void) strlcpy(operation, "NO OPERATION",
3417 sizeof (operation));
3419 if ((session = get_audit_session()) == NULL)
3420 return;
3421 if ((event = adt_alloc_event(session, ADT_smf_annotation)) == NULL) {
3422 uu_warn("smf_annotation_event cannot allocate event "
3423 "data. %s\n", strerror(errno));
3424 return;
3426 event->adt_smf_annotation.operation = operation;
3427 event->adt_smf_annotation.file = file;
3428 if (adt_put_event(event, status, return_val) == 0) {
3429 client_annotation_finished();
3430 } else {
3431 uu_warn("smf_annotation_event failed to put event. "
3432 "%s\n", strerror(errno));
3434 adt_free_event(event);
3436 #endif
3439 * smf_audit_event interacts with the security auditing system to generate
3440 * an audit event structure. It establishes an audit session and allocates
3441 * an audit event. The event is filled in from the audit data, and
3442 * adt_put_event is called to generate the event.
3444 static void
3445 smf_audit_event(au_event_t event_id, int status, int return_val,
3446 audit_event_data_t *data)
3448 #ifndef NATIVE_BUILD
3449 char *auth_used;
3450 char *fmri;
3451 char *prop_value;
3452 adt_session_data_t *session;
3453 adt_event_data_t *event = NULL;
3455 /* Don't audit if we're using an alternate repository */
3456 if (is_main_repository == 0)
3457 return;
3459 smf_annotation_event(status, return_val);
3460 if ((session = get_audit_session()) == NULL)
3461 return;
3462 if ((event = adt_alloc_event(session, event_id)) == NULL) {
3463 uu_warn("smf_audit_event cannot allocate event "
3464 "data. %s\n", strerror(errno));
3465 return;
3469 * Handle possibility of NULL authorization strings, FMRIs and
3470 * property values.
3472 if (data->ed_auth == NULL) {
3473 auth_used = "PRIVILEGED";
3474 } else {
3475 auth_used = data->ed_auth;
3477 if (data->ed_fmri == NULL) {
3478 syslog(LOG_WARNING, "smf_audit_event called with "
3479 "empty FMRI string");
3480 fmri = "UNKNOWN FMRI";
3481 } else {
3482 fmri = data->ed_fmri;
3484 if (data->ed_prop_value == NULL) {
3485 prop_value = "";
3486 } else {
3487 prop_value = data->ed_prop_value;
3490 /* Fill in the event data. */
3491 switch (event_id) {
3492 case ADT_smf_attach_snap:
3493 event->adt_smf_attach_snap.auth_used = auth_used;
3494 event->adt_smf_attach_snap.old_fmri = data->ed_old_fmri;
3495 event->adt_smf_attach_snap.old_name = data->ed_old_name;
3496 event->adt_smf_attach_snap.new_fmri = fmri;
3497 event->adt_smf_attach_snap.new_name = data->ed_snapname;
3498 break;
3499 case ADT_smf_change_prop:
3500 event->adt_smf_change_prop.auth_used = auth_used;
3501 event->adt_smf_change_prop.fmri = fmri;
3502 event->adt_smf_change_prop.type = data->ed_type;
3503 event->adt_smf_change_prop.value = prop_value;
3504 break;
3505 case ADT_smf_clear:
3506 event->adt_smf_clear.auth_used = auth_used;
3507 event->adt_smf_clear.fmri = fmri;
3508 break;
3509 case ADT_smf_create:
3510 event->adt_smf_create.fmri = fmri;
3511 event->adt_smf_create.auth_used = auth_used;
3512 break;
3513 case ADT_smf_create_npg:
3514 event->adt_smf_create_npg.auth_used = auth_used;
3515 event->adt_smf_create_npg.fmri = fmri;
3516 event->adt_smf_create_npg.type = data->ed_type;
3517 break;
3518 case ADT_smf_create_pg:
3519 event->adt_smf_create_pg.auth_used = auth_used;
3520 event->adt_smf_create_pg.fmri = fmri;
3521 event->adt_smf_create_pg.type = data->ed_type;
3522 break;
3523 case ADT_smf_create_prop:
3524 event->adt_smf_create_prop.auth_used = auth_used;
3525 event->adt_smf_create_prop.fmri = fmri;
3526 event->adt_smf_create_prop.type = data->ed_type;
3527 event->adt_smf_create_prop.value = prop_value;
3528 break;
3529 case ADT_smf_create_snap:
3530 event->adt_smf_create_snap.auth_used = auth_used;
3531 event->adt_smf_create_snap.fmri = fmri;
3532 event->adt_smf_create_snap.name = data->ed_snapname;
3533 break;
3534 case ADT_smf_degrade:
3535 event->adt_smf_degrade.auth_used = auth_used;
3536 event->adt_smf_degrade.fmri = fmri;
3537 break;
3538 case ADT_smf_delete:
3539 event->adt_smf_delete.fmri = fmri;
3540 event->adt_smf_delete.auth_used = auth_used;
3541 break;
3542 case ADT_smf_delete_npg:
3543 event->adt_smf_delete_npg.auth_used = auth_used;
3544 event->adt_smf_delete_npg.fmri = fmri;
3545 event->adt_smf_delete_npg.type = data->ed_type;
3546 break;
3547 case ADT_smf_delete_pg:
3548 event->adt_smf_delete_pg.auth_used = auth_used;
3549 event->adt_smf_delete_pg.fmri = fmri;
3550 event->adt_smf_delete_pg.type = data->ed_type;
3551 break;
3552 case ADT_smf_delete_prop:
3553 event->adt_smf_delete_prop.auth_used = auth_used;
3554 event->adt_smf_delete_prop.fmri = fmri;
3555 break;
3556 case ADT_smf_delete_snap:
3557 event->adt_smf_delete_snap.auth_used = auth_used;
3558 event->adt_smf_delete_snap.fmri = fmri;
3559 event->adt_smf_delete_snap.name = data->ed_snapname;
3560 break;
3561 case ADT_smf_disable:
3562 event->adt_smf_disable.auth_used = auth_used;
3563 event->adt_smf_disable.fmri = fmri;
3564 break;
3565 case ADT_smf_enable:
3566 event->adt_smf_enable.auth_used = auth_used;
3567 event->adt_smf_enable.fmri = fmri;
3568 break;
3569 case ADT_smf_immediate_degrade:
3570 event->adt_smf_immediate_degrade.auth_used = auth_used;
3571 event->adt_smf_immediate_degrade.fmri = fmri;
3572 break;
3573 case ADT_smf_immediate_maintenance:
3574 event->adt_smf_immediate_maintenance.auth_used = auth_used;
3575 event->adt_smf_immediate_maintenance.fmri = fmri;
3576 break;
3577 case ADT_smf_immtmp_maintenance:
3578 event->adt_smf_immtmp_maintenance.auth_used = auth_used;
3579 event->adt_smf_immtmp_maintenance.fmri = fmri;
3580 break;
3581 case ADT_smf_maintenance:
3582 event->adt_smf_maintenance.auth_used = auth_used;
3583 event->adt_smf_maintenance.fmri = fmri;
3584 break;
3585 case ADT_smf_milestone:
3586 event->adt_smf_milestone.auth_used = auth_used;
3587 event->adt_smf_milestone.fmri = fmri;
3588 break;
3589 case ADT_smf_read_prop:
3590 event->adt_smf_read_prop.auth_used = auth_used;
3591 event->adt_smf_read_prop.fmri = fmri;
3592 break;
3593 case ADT_smf_refresh:
3594 event->adt_smf_refresh.auth_used = auth_used;
3595 event->adt_smf_refresh.fmri = fmri;
3596 break;
3597 case ADT_smf_restart:
3598 event->adt_smf_restart.auth_used = auth_used;
3599 event->adt_smf_restart.fmri = fmri;
3600 break;
3601 case ADT_smf_tmp_disable:
3602 event->adt_smf_tmp_disable.auth_used = auth_used;
3603 event->adt_smf_tmp_disable.fmri = fmri;
3604 break;
3605 case ADT_smf_tmp_enable:
3606 event->adt_smf_tmp_enable.auth_used = auth_used;
3607 event->adt_smf_tmp_enable.fmri = fmri;
3608 break;
3609 case ADT_smf_tmp_maintenance:
3610 event->adt_smf_tmp_maintenance.auth_used = auth_used;
3611 event->adt_smf_tmp_maintenance.fmri = fmri;
3612 break;
3613 default:
3614 abort(); /* Need to cover all SMF event IDs */
3617 if (adt_put_event(event, status, return_val) != 0) {
3618 uu_warn("smf_audit_event failed to put event. %s\n",
3619 strerror(errno));
3621 adt_free_event(event);
3622 #endif
3625 #ifndef NATIVE_BUILD
3627 * Determine if the combination of the property group at pg_name and the
3628 * property at prop_name are in the set of special startd properties. If
3629 * they are, a special audit event will be generated.
3631 static void
3632 special_property_event(audit_event_data_t *evdp, const char *prop_name,
3633 char *pg_name, int status, int return_val, tx_commit_data_t *tx_data,
3634 size_t cmd_no)
3636 au_event_t event_id;
3637 audit_special_prop_item_t search_key;
3638 audit_special_prop_item_t *found;
3640 /* Use bsearch to find the special property information. */
3641 search_key.api_prop_name = prop_name;
3642 search_key.api_pg_name = pg_name;
3643 found = (audit_special_prop_item_t *)bsearch(&search_key,
3644 special_props_list, SPECIAL_PROP_COUNT,
3645 sizeof (special_props_list[0]), special_prop_compare);
3646 if (found == NULL) {
3647 /* Not a special property. */
3648 return;
3651 /* Get the event id */
3652 if (found->api_event_func == NULL) {
3653 event_id = found->api_event_id;
3654 } else {
3655 if ((*found->api_event_func)(tx_data, cmd_no,
3656 found->api_pg_name, &event_id) < 0)
3657 return;
3660 /* Generate the event. */
3661 smf_audit_event(event_id, status, return_val, evdp);
3663 #endif /* NATIVE_BUILD */
3666 * Return a pointer to a string containing all the values of the command
3667 * specified by cmd_no with each value enclosed in quotes. It is up to the
3668 * caller to free the memory at the returned pointer.
3670 static char *
3671 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no)
3673 const char *cp;
3674 const char *cur_value;
3675 size_t byte_count = 0;
3676 uint32_t i;
3677 uint32_t nvalues;
3678 size_t str_size = 0;
3679 char *values = NULL;
3680 char *vp;
3682 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
3683 return (NULL);
3685 * First determine the size of the buffer that we will need. We
3686 * will represent each property value surrounded by quotes with a
3687 * space separating the values. Thus, we need to find the total
3688 * size of all the value strings and add 3 for each value.
3690 * There is one catch, though. We need to escape any internal
3691 * quote marks in the values. So for each quote in the value we
3692 * need to add another byte to the buffer size.
3694 for (i = 0; i < nvalues; i++) {
3695 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3696 REP_PROTOCOL_SUCCESS)
3697 return (NULL);
3698 for (cp = cur_value; *cp != 0; cp++) {
3699 byte_count += (*cp == '"') ? 2 : 1;
3701 byte_count += 3; /* surrounding quotes & space */
3703 byte_count++; /* nul terminator */
3704 values = malloc(byte_count);
3705 if (values == NULL)
3706 return (NULL);
3707 *values = 0;
3709 /* Now build up the string of values. */
3710 for (i = 0; i < nvalues; i++) {
3711 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3712 REP_PROTOCOL_SUCCESS) {
3713 free(values);
3714 return (NULL);
3716 (void) strlcat(values, "\"", byte_count);
3717 for (cp = cur_value, vp = values + strlen(values);
3718 *cp != 0; cp++) {
3719 if (*cp == '"') {
3720 *vp++ = '\\';
3721 *vp++ = '"';
3722 } else {
3723 *vp++ = *cp;
3726 *vp = 0;
3727 str_size = strlcat(values, "\" ", byte_count);
3728 assert(str_size < byte_count);
3730 if (str_size > 0)
3731 values[str_size - 1] = 0; /* get rid of trailing space */
3732 return (values);
3736 * generate_property_events takes the transaction commit data at tx_data
3737 * and generates an audit event for each command.
3739 * Native builds are done to create svc.configd-native. This program runs
3740 * only on the Solaris build machines to create the seed repository. Thus,
3741 * no audit events should be generated when running svc.configd-native.
3743 static void
3744 generate_property_events(
3745 tx_commit_data_t *tx_data,
3746 char *pg_fmri, /* FMRI of property group */
3747 char *auth_string,
3748 int auth_status,
3749 int auth_ret_value)
3751 #ifndef NATIVE_BUILD
3752 enum rep_protocol_transaction_action action;
3753 audit_event_data_t audit_data;
3754 size_t count;
3755 size_t cmd_no;
3756 char *cp;
3757 au_event_t event_id;
3758 char fmri[REP_PROTOCOL_FMRI_LEN];
3759 char pg_name[REP_PROTOCOL_NAME_LEN];
3760 char *pg_end; /* End of prop. group fmri */
3761 const char *prop_name;
3762 uint32_t ptype;
3763 char prop_type[3];
3764 enum rep_protocol_responseid rc;
3765 size_t sz_out;
3767 /* Make sure we have something to do. */
3768 if (tx_data == NULL)
3769 return;
3770 if ((count = tx_cmd_count(tx_data)) == 0)
3771 return;
3773 /* Copy the property group fmri */
3774 pg_end = fmri;
3775 pg_end += strlcpy(fmri, pg_fmri, sizeof (fmri));
3778 * Get the property group name. It is the first component after
3779 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri.
3781 cp = strstr(pg_fmri, SCF_FMRI_PROPERTYGRP_PREFIX);
3782 if (cp == NULL) {
3783 pg_name[0] = 0;
3784 } else {
3785 cp += strlen(SCF_FMRI_PROPERTYGRP_PREFIX);
3786 (void) strlcpy(pg_name, cp, sizeof (pg_name));
3789 audit_data.ed_auth = auth_string;
3790 audit_data.ed_fmri = fmri;
3791 audit_data.ed_type = prop_type;
3794 * Property type is two characters (see
3795 * rep_protocol_value_type_t), so terminate the string.
3797 prop_type[2] = 0;
3799 for (cmd_no = 0; cmd_no < count; cmd_no++) {
3800 /* Construct FMRI of the property */
3801 *pg_end = 0;
3802 if (tx_cmd_prop(tx_data, cmd_no, &prop_name) !=
3803 REP_PROTOCOL_SUCCESS) {
3804 continue;
3806 rc = rc_concat_fmri_element(fmri, sizeof (fmri), &sz_out,
3807 prop_name, REP_PROTOCOL_ENTITY_PROPERTY);
3808 if (rc != REP_PROTOCOL_SUCCESS) {
3810 * If we can't get the FMRI, we'll abandon this
3811 * command
3813 continue;
3816 /* Generate special property event if necessary. */
3817 special_property_event(&audit_data, prop_name, pg_name,
3818 auth_status, auth_ret_value, tx_data, cmd_no);
3820 /* Capture rest of audit data. */
3821 if (tx_cmd_prop_type(tx_data, cmd_no, &ptype) !=
3822 REP_PROTOCOL_SUCCESS) {
3823 continue;
3825 prop_type[0] = REP_PROTOCOL_BASE_TYPE(ptype);
3826 prop_type[1] = REP_PROTOCOL_SUBTYPE(ptype);
3827 audit_data.ed_prop_value = generate_value_list(tx_data, cmd_no);
3829 /* Determine the event type. */
3830 if (tx_cmd_action(tx_data, cmd_no, &action) !=
3831 REP_PROTOCOL_SUCCESS) {
3832 free(audit_data.ed_prop_value);
3833 continue;
3835 switch (action) {
3836 case REP_PROTOCOL_TX_ENTRY_NEW:
3837 event_id = ADT_smf_create_prop;
3838 break;
3839 case REP_PROTOCOL_TX_ENTRY_CLEAR:
3840 event_id = ADT_smf_change_prop;
3841 break;
3842 case REP_PROTOCOL_TX_ENTRY_REPLACE:
3843 event_id = ADT_smf_change_prop;
3844 break;
3845 case REP_PROTOCOL_TX_ENTRY_DELETE:
3846 event_id = ADT_smf_delete_prop;
3847 break;
3848 default:
3849 assert(0); /* Missing a case */
3850 free(audit_data.ed_prop_value);
3851 continue;
3854 /* Generate the event. */
3855 smf_audit_event(event_id, auth_status, auth_ret_value,
3856 &audit_data);
3857 free(audit_data.ed_prop_value);
3859 #endif /* NATIVE_BUILD */
3863 * Fails with
3864 * _DELETED - node has been deleted
3865 * _NOT_SET - npp is reset
3866 * _NOT_APPLICABLE - type is _PROPERTYGRP
3867 * _INVALID_TYPE - node is corrupt or type is invalid
3868 * _TYPE_MISMATCH - node cannot have children of type type
3869 * _BAD_REQUEST - name is invalid
3870 * cannot create children for this type of node
3871 * _NO_RESOURCES - out of memory, or could not allocate new id
3872 * _PERMISSION_DENIED
3873 * _BACKEND_ACCESS
3874 * _BACKEND_READONLY
3875 * _EXISTS - child already exists
3876 * _TRUNCATED - truncated FMRI for the audit record
3879 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
3880 rc_node_ptr_t *cpp)
3882 rc_node_t *np;
3883 rc_node_t *cp = NULL;
3884 int rc;
3885 perm_status_t perm_rc;
3886 size_t sz_out;
3887 char fmri[REP_PROTOCOL_FMRI_LEN];
3888 audit_event_data_t audit_data;
3890 rc_node_clear(cpp, 0);
3893 * rc_node_modify_permission_check() must be called before the node
3894 * is locked. This is because the library functions that check
3895 * authorizations can trigger calls back into configd.
3897 perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth);
3898 switch (perm_rc) {
3899 case PERM_DENIED:
3901 * We continue in this case, so that an audit event can be
3902 * generated later in the function.
3904 break;
3905 case PERM_GRANTED:
3906 break;
3907 case PERM_GONE:
3908 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3909 case PERM_FAIL:
3910 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
3911 default:
3912 bad_error(rc_node_modify_permission_check, perm_rc);
3915 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
3917 audit_data.ed_fmri = fmri;
3920 * there is a separate interface for creating property groups
3922 if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3923 (void) pthread_mutex_unlock(&np->rn_lock);
3924 free(audit_data.ed_auth);
3925 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3928 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3929 (void) pthread_mutex_unlock(&np->rn_lock);
3930 np = np->rn_cchain[0];
3931 if ((rc = rc_node_check_and_lock(np)) != REP_PROTOCOL_SUCCESS) {
3932 free(audit_data.ed_auth);
3933 return (rc);
3937 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
3938 REP_PROTOCOL_SUCCESS) {
3939 (void) pthread_mutex_unlock(&np->rn_lock);
3940 free(audit_data.ed_auth);
3941 return (rc);
3943 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
3944 (void) pthread_mutex_unlock(&np->rn_lock);
3945 free(audit_data.ed_auth);
3946 return (rc);
3949 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
3950 name, type)) != REP_PROTOCOL_SUCCESS) {
3951 (void) pthread_mutex_unlock(&np->rn_lock);
3952 free(audit_data.ed_auth);
3953 return (rc);
3955 if (perm_rc == PERM_DENIED) {
3956 (void) pthread_mutex_unlock(&np->rn_lock);
3957 smf_audit_event(ADT_smf_create, ADT_FAILURE,
3958 ADT_FAIL_VALUE_AUTH, &audit_data);
3959 free(audit_data.ed_auth);
3960 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3963 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
3964 audit_data.ed_auth);
3965 (void) pthread_mutex_unlock(&np->rn_lock);
3967 rc = object_create(np, type, name, &cp);
3968 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3970 if (rc == REP_PROTOCOL_SUCCESS) {
3971 rc_node_assign(cpp, cp);
3972 rc_node_rele(cp);
3975 (void) pthread_mutex_lock(&np->rn_lock);
3976 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
3977 (void) pthread_mutex_unlock(&np->rn_lock);
3979 if (rc == REP_PROTOCOL_SUCCESS) {
3980 smf_audit_event(ADT_smf_create, ADT_SUCCESS, ADT_SUCCESS,
3981 &audit_data);
3984 free(audit_data.ed_auth);
3986 return (rc);
3990 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
3991 const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
3993 rc_node_t *np;
3994 rc_node_t *cp;
3995 int rc;
3996 permcheck_t *pcp;
3997 perm_status_t granted;
3998 char fmri[REP_PROTOCOL_FMRI_LEN];
3999 audit_event_data_t audit_data;
4000 au_event_t event_id;
4001 size_t sz_out;
4003 audit_data.ed_auth = NULL;
4004 audit_data.ed_fmri = fmri;
4005 audit_data.ed_type = (char *)pgtype;
4007 rc_node_clear(cpp, 0);
4009 /* verify flags is valid */
4010 if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
4011 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4013 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
4015 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4016 rc_node_rele(np);
4017 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4020 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
4021 REP_PROTOCOL_SUCCESS) {
4022 rc_node_rele(np);
4023 return (rc);
4025 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
4026 (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
4027 rc_node_rele(np);
4028 return (rc);
4031 #ifdef NATIVE_BUILD
4032 if (!client_is_privileged()) {
4033 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4035 #else
4036 if (flags & SCF_PG_FLAG_NONPERSISTENT) {
4037 event_id = ADT_smf_create_npg;
4038 } else {
4039 event_id = ADT_smf_create_pg;
4041 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
4042 name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) {
4043 rc_node_rele(np);
4044 return (rc);
4047 if (is_main_repository) {
4048 /* Must have .smf.modify or smf.modify.<type> authorization */
4049 pcp = pc_create();
4050 if (pcp != NULL) {
4051 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4053 if (rc == REP_PROTOCOL_SUCCESS) {
4054 const char * const auth =
4055 perm_auth_for_pgtype(pgtype);
4057 if (auth != NULL)
4058 rc = perm_add_enabling(pcp, auth);
4062 * .manage or $action_authorization can be used to
4063 * create the actions pg and the general_ovr pg.
4065 if (rc == REP_PROTOCOL_SUCCESS &&
4066 (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
4067 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
4068 ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
4069 strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
4070 (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
4071 strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
4072 rc = perm_add_enabling(pcp, AUTH_MANAGE);
4074 if (rc == REP_PROTOCOL_SUCCESS)
4075 rc = perm_add_inst_action_auth(pcp, np);
4078 if (rc == REP_PROTOCOL_SUCCESS) {
4079 granted = perm_granted(pcp);
4081 rc = map_granted_status(granted, pcp,
4082 &audit_data.ed_auth);
4083 if (granted == PERM_GONE) {
4084 /* No auditing if client gone. */
4085 pc_free(pcp);
4086 rc_node_rele(np);
4087 return (rc);
4091 pc_free(pcp);
4092 } else {
4093 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4096 } else {
4097 rc = REP_PROTOCOL_SUCCESS;
4099 #endif /* NATIVE_BUILD */
4102 if (rc != REP_PROTOCOL_SUCCESS) {
4103 rc_node_rele(np);
4104 if (rc != REP_PROTOCOL_FAIL_NO_RESOURCES) {
4105 smf_audit_event(event_id, ADT_FAILURE,
4106 ADT_FAIL_VALUE_AUTH, &audit_data);
4108 if (audit_data.ed_auth != NULL)
4109 free(audit_data.ed_auth);
4110 return (rc);
4113 (void) pthread_mutex_lock(&np->rn_lock);
4114 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4115 audit_data.ed_auth);
4116 (void) pthread_mutex_unlock(&np->rn_lock);
4118 rc = object_create_pg(np, type, name, pgtype, flags, &cp);
4120 if (rc == REP_PROTOCOL_SUCCESS) {
4121 rc_node_assign(cpp, cp);
4122 rc_node_rele(cp);
4125 (void) pthread_mutex_lock(&np->rn_lock);
4126 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4127 (void) pthread_mutex_unlock(&np->rn_lock);
4129 if (rc == REP_PROTOCOL_SUCCESS) {
4130 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4131 &audit_data);
4133 if (audit_data.ed_auth != NULL)
4134 free(audit_data.ed_auth);
4136 return (rc);
4139 static void
4140 rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
4142 assert(MUTEX_HELD(&rc_pg_notify_lock));
4144 if (pnp->rnpn_pg != NULL) {
4145 uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
4146 (void) close(pnp->rnpn_fd);
4148 pnp->rnpn_pg = NULL;
4149 pnp->rnpn_fd = -1;
4150 } else {
4151 assert(pnp->rnpn_fd == -1);
4155 static void
4156 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
4158 rc_node_t *svc = NULL;
4159 rc_node_t *inst = NULL;
4160 rc_node_t *pg = NULL;
4161 rc_node_t *np = np_arg;
4162 rc_node_t *nnp;
4164 while (svc == NULL) {
4165 (void) pthread_mutex_lock(&np->rn_lock);
4166 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4167 (void) pthread_mutex_unlock(&np->rn_lock);
4168 goto cleanup;
4170 nnp = np->rn_parent;
4171 rc_node_hold_locked(np); /* hold it in place */
4173 switch (np->rn_id.rl_type) {
4174 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4175 assert(pg == NULL);
4176 pg = np;
4177 break;
4178 case REP_PROTOCOL_ENTITY_INSTANCE:
4179 assert(inst == NULL);
4180 inst = np;
4181 break;
4182 case REP_PROTOCOL_ENTITY_SERVICE:
4183 assert(svc == NULL);
4184 svc = np;
4185 break;
4186 default:
4187 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4188 rc_node_rele_locked(np);
4189 goto cleanup;
4192 (void) pthread_mutex_unlock(&np->rn_lock);
4194 np = nnp;
4195 if (np == NULL)
4196 goto cleanup;
4199 rc_notify_deletion(ndp,
4200 svc->rn_name,
4201 inst != NULL ? inst->rn_name : NULL,
4202 pg != NULL ? pg->rn_name : NULL);
4204 ndp = NULL;
4206 cleanup:
4207 if (ndp != NULL)
4208 uu_free(ndp);
4210 for (;;) {
4211 if (svc != NULL) {
4212 np = svc;
4213 svc = NULL;
4214 } else if (inst != NULL) {
4215 np = inst;
4216 inst = NULL;
4217 } else if (pg != NULL) {
4218 np = pg;
4219 pg = NULL;
4220 } else
4221 break;
4223 (void) pthread_mutex_lock(&np->rn_lock);
4224 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4225 rc_node_rele_locked(np);
4230 * Hold RC_NODE_DYING_FLAGS on np's descendents. If andformer is true, do
4231 * the same down the rn_former chain.
4233 static void
4234 rc_node_delete_hold(rc_node_t *np, int andformer)
4236 rc_node_t *cp;
4238 again:
4239 assert(MUTEX_HELD(&np->rn_lock));
4240 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4242 for (cp = uu_list_first(np->rn_children); cp != NULL;
4243 cp = uu_list_next(np->rn_children, cp)) {
4244 (void) pthread_mutex_lock(&cp->rn_lock);
4245 (void) pthread_mutex_unlock(&np->rn_lock);
4246 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
4248 * already marked as dead -- can't happen, since that
4249 * would require setting RC_NODE_CHILDREN_CHANGING
4250 * in np, and we're holding that...
4252 abort();
4254 rc_node_delete_hold(cp, andformer); /* recurse, drop lock */
4256 (void) pthread_mutex_lock(&np->rn_lock);
4258 if (andformer && (cp = np->rn_former) != NULL) {
4259 (void) pthread_mutex_lock(&cp->rn_lock);
4260 (void) pthread_mutex_unlock(&np->rn_lock);
4261 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
4262 abort(); /* can't happen, see above */
4263 np = cp;
4264 goto again; /* tail-recurse down rn_former */
4266 (void) pthread_mutex_unlock(&np->rn_lock);
4270 * N.B.: this function drops np->rn_lock on the way out.
4272 static void
4273 rc_node_delete_rele(rc_node_t *np, int andformer)
4275 rc_node_t *cp;
4277 again:
4278 assert(MUTEX_HELD(&np->rn_lock));
4279 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4281 for (cp = uu_list_first(np->rn_children); cp != NULL;
4282 cp = uu_list_next(np->rn_children, cp)) {
4283 (void) pthread_mutex_lock(&cp->rn_lock);
4284 (void) pthread_mutex_unlock(&np->rn_lock);
4285 rc_node_delete_rele(cp, andformer); /* recurse, drop lock */
4286 (void) pthread_mutex_lock(&np->rn_lock);
4288 if (andformer && (cp = np->rn_former) != NULL) {
4289 (void) pthread_mutex_lock(&cp->rn_lock);
4290 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4291 (void) pthread_mutex_unlock(&np->rn_lock);
4293 np = cp;
4294 goto again; /* tail-recurse down rn_former */
4296 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4297 (void) pthread_mutex_unlock(&np->rn_lock);
4300 static void
4301 rc_node_finish_delete(rc_node_t *cp)
4303 cache_bucket_t *bp;
4304 rc_node_pg_notify_t *pnp;
4306 assert(MUTEX_HELD(&cp->rn_lock));
4308 if (!(cp->rn_flags & RC_NODE_OLD)) {
4309 assert(cp->rn_flags & RC_NODE_IN_PARENT);
4310 if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
4311 abort(); /* can't happen, see above */
4313 cp->rn_flags &= ~RC_NODE_IN_PARENT;
4314 cp->rn_parent = NULL;
4315 rc_node_free_fmri(cp);
4318 cp->rn_flags |= RC_NODE_DEAD;
4321 * If this node is not out-dated, we need to remove it from
4322 * the notify list and cache hash table.
4324 if (!(cp->rn_flags & RC_NODE_OLD)) {
4325 assert(cp->rn_refs > 0); /* can't go away yet */
4326 (void) pthread_mutex_unlock(&cp->rn_lock);
4328 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4329 while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
4330 rc_pg_notify_fire(pnp);
4331 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4332 rc_notify_remove_node(cp);
4334 bp = cache_hold(cp->rn_hash);
4335 (void) pthread_mutex_lock(&cp->rn_lock);
4336 cache_remove_unlocked(bp, cp);
4337 cache_release(bp);
4342 * For each child, call rc_node_finish_delete() and recurse. If andformer
4343 * is set, also recurse down rn_former. Finally release np, which might
4344 * free it.
4346 static void
4347 rc_node_delete_children(rc_node_t *np, int andformer)
4349 rc_node_t *cp;
4351 again:
4352 assert(np->rn_refs > 0);
4353 assert(MUTEX_HELD(&np->rn_lock));
4354 assert(np->rn_flags & RC_NODE_DEAD);
4356 while ((cp = uu_list_first(np->rn_children)) != NULL) {
4357 uu_list_remove(np->rn_children, cp);
4358 (void) pthread_mutex_lock(&cp->rn_lock);
4359 (void) pthread_mutex_unlock(&np->rn_lock);
4360 rc_node_hold_locked(cp); /* hold while we recurse */
4361 rc_node_finish_delete(cp);
4362 rc_node_delete_children(cp, andformer); /* drops lock + ref */
4363 (void) pthread_mutex_lock(&np->rn_lock);
4367 * When we drop cp's lock, all the children will be gone, so we
4368 * can release DYING_FLAGS.
4370 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4371 if (andformer && (cp = np->rn_former) != NULL) {
4372 np->rn_former = NULL; /* unlink */
4373 (void) pthread_mutex_lock(&cp->rn_lock);
4376 * Register the ephemeral reference created by reading
4377 * np->rn_former into cp. Note that the persistent
4378 * reference (np->rn_former) is locked because we haven't
4379 * dropped np's lock since we dropped its RC_NODE_IN_TX
4380 * (via RC_NODE_DYING_FLAGS).
4382 rc_node_hold_ephemeral_locked(cp);
4384 (void) pthread_mutex_unlock(&np->rn_lock);
4385 cp->rn_flags &= ~RC_NODE_ON_FORMER;
4387 rc_node_hold_locked(cp); /* hold while we loop */
4389 rc_node_finish_delete(cp);
4391 rc_node_rele(np); /* drop the old reference */
4393 np = cp;
4394 goto again; /* tail-recurse down rn_former */
4396 rc_node_rele_locked(np);
4400 * The last client or child reference to np, which must be either
4401 * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed. We'll destroy any
4402 * remaining references (e.g., rn_former) and call rc_node_destroy() to
4403 * free np.
4405 static void
4406 rc_node_no_client_refs(rc_node_t *np)
4408 int unrefed;
4409 rc_node_t *current, *cur;
4411 assert(MUTEX_HELD(&np->rn_lock));
4412 assert(np->rn_refs == 0);
4413 assert(np->rn_other_refs == 0);
4414 assert(np->rn_other_refs_held == 0);
4416 if (np->rn_flags & RC_NODE_DEAD) {
4418 * The node is DEAD, so the deletion code should have
4419 * destroyed all rn_children or rn_former references.
4420 * Since the last client or child reference has been
4421 * destroyed, we're free to destroy np. Unless another
4422 * thread has an ephemeral reference, in which case we'll
4423 * pass the buck.
4425 if (np->rn_erefs > 1) {
4426 --np->rn_erefs;
4427 NODE_UNLOCK(np);
4428 return;
4431 (void) pthread_mutex_unlock(&np->rn_lock);
4432 rc_node_destroy(np);
4433 return;
4436 /* We only collect DEAD and OLD nodes, thank you. */
4437 assert(np->rn_flags & RC_NODE_OLD);
4440 * RC_NODE_UNREFED keeps multiple threads from processing OLD
4441 * nodes. But it's vulnerable to unfriendly scheduling, so full
4442 * use of rn_erefs should supersede it someday.
4444 if (np->rn_flags & RC_NODE_UNREFED) {
4445 (void) pthread_mutex_unlock(&np->rn_lock);
4446 return;
4448 np->rn_flags |= RC_NODE_UNREFED;
4451 * Now we'll remove the node from the rn_former chain and take its
4452 * DYING_FLAGS.
4456 * Since this node is OLD, it should be on an rn_former chain. To
4457 * remove it, we must find the current in-hash object and grab its
4458 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
4461 (void) pthread_mutex_unlock(&np->rn_lock);
4463 for (;;) {
4464 current = cache_lookup(&np->rn_id);
4466 if (current == NULL) {
4467 (void) pthread_mutex_lock(&np->rn_lock);
4469 if (np->rn_flags & RC_NODE_DEAD)
4470 goto died;
4473 * We are trying to unreference this node, but the
4474 * owner of the former list does not exist. It must
4475 * be the case that another thread is deleting this
4476 * entire sub-branch, but has not yet reached us.
4477 * We will in short order be deleted.
4479 np->rn_flags &= ~RC_NODE_UNREFED;
4480 (void) pthread_mutex_unlock(&np->rn_lock);
4481 return;
4484 if (current == np) {
4486 * no longer unreferenced
4488 (void) pthread_mutex_lock(&np->rn_lock);
4489 np->rn_flags &= ~RC_NODE_UNREFED;
4490 /* held in cache_lookup() */
4491 rc_node_rele_locked(np);
4492 return;
4495 (void) pthread_mutex_lock(&current->rn_lock);
4496 if (current->rn_flags & RC_NODE_OLD) {
4498 * current has been replaced since we looked it
4499 * up. Try again.
4501 /* held in cache_lookup() */
4502 rc_node_rele_locked(current);
4503 continue;
4506 if (!rc_node_hold_flag(current, RC_NODE_IN_TX)) {
4508 * current has been deleted since we looked it up. Try
4509 * again.
4511 /* held in cache_lookup() */
4512 rc_node_rele_locked(current);
4513 continue;
4517 * rc_node_hold_flag() might have dropped current's lock, so
4518 * check OLD again.
4520 if (!(current->rn_flags & RC_NODE_OLD)) {
4521 /* Not old. Stop looping. */
4522 (void) pthread_mutex_unlock(&current->rn_lock);
4523 break;
4526 rc_node_rele_flag(current, RC_NODE_IN_TX);
4527 rc_node_rele_locked(current);
4530 /* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
4531 (void) pthread_mutex_lock(&np->rn_lock);
4534 * While we didn't have the lock, a thread may have added
4535 * a reference or changed the flags.
4537 if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
4538 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4539 np->rn_other_refs_held != 0) {
4540 np->rn_flags &= ~RC_NODE_UNREFED;
4542 (void) pthread_mutex_lock(&current->rn_lock);
4543 rc_node_rele_flag(current, RC_NODE_IN_TX);
4544 /* held by cache_lookup() */
4545 rc_node_rele_locked(current);
4546 return;
4549 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4551 * Someone deleted the node while we were waiting for
4552 * DYING_FLAGS. Undo the modifications to current.
4554 (void) pthread_mutex_unlock(&np->rn_lock);
4556 rc_node_rele_flag(current, RC_NODE_IN_TX);
4557 /* held by cache_lookup() */
4558 rc_node_rele_locked(current);
4560 (void) pthread_mutex_lock(&np->rn_lock);
4561 goto died;
4564 /* Take RC_NODE_DYING_FLAGS on np's descendents. */
4565 rc_node_delete_hold(np, 0); /* drops np->rn_lock */
4567 /* Mark np DEAD. This requires the lock. */
4568 (void) pthread_mutex_lock(&np->rn_lock);
4570 /* Recheck for new references. */
4571 if (!(np->rn_flags & RC_NODE_OLD) ||
4572 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4573 np->rn_other_refs_held != 0) {
4574 np->rn_flags &= ~RC_NODE_UNREFED;
4575 rc_node_delete_rele(np, 0); /* drops np's lock */
4577 (void) pthread_mutex_lock(&current->rn_lock);
4578 rc_node_rele_flag(current, RC_NODE_IN_TX);
4579 /* held by cache_lookup() */
4580 rc_node_rele_locked(current);
4581 return;
4584 np->rn_flags |= RC_NODE_DEAD;
4587 * Delete the children. This calls rc_node_rele_locked() on np at
4588 * the end, so add a reference to keep the count from going
4589 * negative. It will recurse with RC_NODE_DEAD set, so we'll call
4590 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
4591 * shouldn't actually free() np.
4593 rc_node_hold_locked(np);
4594 rc_node_delete_children(np, 0); /* unlocks np */
4596 /* Remove np from current's rn_former chain. */
4597 (void) pthread_mutex_lock(&current->rn_lock);
4598 for (cur = current; cur != NULL && cur->rn_former != np;
4599 cur = cur->rn_former)
4601 assert(cur != NULL && cur != np);
4603 cur->rn_former = np->rn_former;
4604 np->rn_former = NULL;
4606 rc_node_rele_flag(current, RC_NODE_IN_TX);
4607 /* held by cache_lookup() */
4608 rc_node_rele_locked(current);
4610 /* Clear ON_FORMER and UNREFED, and destroy. */
4611 (void) pthread_mutex_lock(&np->rn_lock);
4612 assert(np->rn_flags & RC_NODE_ON_FORMER);
4613 np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
4615 if (np->rn_erefs > 1) {
4616 /* Still referenced. Stay execution. */
4617 --np->rn_erefs;
4618 NODE_UNLOCK(np);
4619 return;
4622 (void) pthread_mutex_unlock(&np->rn_lock);
4623 rc_node_destroy(np);
4624 return;
4626 died:
4628 * Another thread marked np DEAD. If there still aren't any
4629 * persistent references, destroy the node.
4631 np->rn_flags &= ~RC_NODE_UNREFED;
4633 unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
4634 np->rn_other_refs_held == 0);
4636 if (np->rn_erefs > 0)
4637 --np->rn_erefs;
4639 if (unrefed && np->rn_erefs > 0) {
4640 NODE_UNLOCK(np);
4641 return;
4644 (void) pthread_mutex_unlock(&np->rn_lock);
4646 if (unrefed)
4647 rc_node_destroy(np);
4650 static au_event_t
4651 get_delete_event_id(rep_protocol_entity_t entity, uint32_t pgflags)
4653 au_event_t id = 0;
4655 #ifndef NATIVE_BUILD
4656 switch (entity) {
4657 case REP_PROTOCOL_ENTITY_SERVICE:
4658 case REP_PROTOCOL_ENTITY_INSTANCE:
4659 id = ADT_smf_delete;
4660 break;
4661 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4662 id = ADT_smf_delete_snap;
4663 break;
4664 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4665 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4666 if (pgflags & SCF_PG_FLAG_NONPERSISTENT) {
4667 id = ADT_smf_delete_npg;
4668 } else {
4669 id = ADT_smf_delete_pg;
4671 break;
4672 default:
4673 abort();
4675 #endif /* NATIVE_BUILD */
4676 return (id);
4680 * Fails with
4681 * _NOT_SET
4682 * _DELETED
4683 * _BAD_REQUEST
4684 * _PERMISSION_DENIED
4685 * _NO_RESOURCES
4686 * _TRUNCATED
4687 * and whatever object_delete() fails with.
4690 rc_node_delete(rc_node_ptr_t *npp)
4692 rc_node_t *np, *np_orig;
4693 rc_node_t *pp = NULL;
4694 int rc;
4695 rc_node_pg_notify_t *pnp;
4696 cache_bucket_t *bp;
4697 rc_notify_delete_t *ndp;
4698 permcheck_t *pcp;
4699 int granted;
4700 au_event_t event_id = 0;
4701 size_t sz_out;
4702 audit_event_data_t audit_data;
4703 int audit_failure = 0;
4705 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4707 audit_data.ed_fmri = NULL;
4708 audit_data.ed_auth = NULL;
4709 audit_data.ed_snapname = NULL;
4710 audit_data.ed_type = NULL;
4712 switch (np->rn_id.rl_type) {
4713 case REP_PROTOCOL_ENTITY_SERVICE:
4714 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE,
4715 np->rn_pgflags);
4716 break;
4717 case REP_PROTOCOL_ENTITY_INSTANCE:
4718 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE,
4719 np->rn_pgflags);
4720 break;
4721 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4722 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT,
4723 np->rn_pgflags);
4724 audit_data.ed_snapname = strdup(np->rn_name);
4725 if (audit_data.ed_snapname == NULL) {
4726 (void) pthread_mutex_unlock(&np->rn_lock);
4727 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4729 break; /* deletable */
4731 case REP_PROTOCOL_ENTITY_SCOPE:
4732 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
4733 /* Scopes and snaplevels are indelible. */
4734 (void) pthread_mutex_unlock(&np->rn_lock);
4735 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4737 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4738 (void) pthread_mutex_unlock(&np->rn_lock);
4739 np = np->rn_cchain[0];
4740 RC_NODE_CHECK_AND_LOCK(np);
4741 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP,
4742 np->rn_pgflags);
4743 break;
4745 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4746 if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) {
4747 event_id =
4748 get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP,
4749 np->rn_pgflags);
4750 audit_data.ed_type = strdup(np->rn_type);
4751 if (audit_data.ed_type == NULL) {
4752 (void) pthread_mutex_unlock(&np->rn_lock);
4753 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4755 break;
4758 /* Snapshot property groups are indelible. */
4759 (void) pthread_mutex_unlock(&np->rn_lock);
4760 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4762 case REP_PROTOCOL_ENTITY_PROPERTY:
4763 (void) pthread_mutex_unlock(&np->rn_lock);
4764 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4766 default:
4767 assert(0);
4768 abort();
4769 break;
4772 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
4773 if (audit_data.ed_fmri == NULL) {
4774 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4775 goto cleanout;
4777 np_orig = np;
4778 rc_node_hold_locked(np); /* simplifies rest of the code */
4780 again:
4782 * The following loop is to deal with the fact that snapshots and
4783 * property groups are moving targets -- changes to them result
4784 * in a new "child" node. Since we can only delete from the top node,
4785 * we have to loop until we have a non-RC_NODE_OLD version.
4787 for (;;) {
4788 if (!rc_node_wait_flag(np,
4789 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
4790 rc_node_rele_locked(np);
4791 rc = REP_PROTOCOL_FAIL_DELETED;
4792 goto cleanout;
4795 if (np->rn_flags & RC_NODE_OLD) {
4796 rc_node_rele_locked(np);
4797 np = cache_lookup(&np_orig->rn_id);
4798 assert(np != np_orig);
4800 if (np == NULL) {
4801 rc = REP_PROTOCOL_FAIL_DELETED;
4802 goto fail;
4804 (void) pthread_mutex_lock(&np->rn_lock);
4805 continue;
4808 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4809 rc_node_rele_locked(np);
4810 rc_node_clear(npp, 1);
4811 rc = REP_PROTOCOL_FAIL_DELETED;
4815 * Mark our parent as children changing. this call drops our
4816 * lock and the RC_NODE_USING_PARENT flag, and returns with
4817 * pp's lock held
4819 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
4820 if (pp == NULL) {
4821 /* our parent is gone, we're going next... */
4822 rc_node_rele(np);
4824 rc_node_clear(npp, 1);
4825 rc = REP_PROTOCOL_FAIL_DELETED;
4826 goto cleanout;
4829 rc_node_hold_locked(pp); /* hold for later */
4830 (void) pthread_mutex_unlock(&pp->rn_lock);
4832 (void) pthread_mutex_lock(&np->rn_lock);
4833 if (!(np->rn_flags & RC_NODE_OLD))
4834 break; /* not old -- we're done */
4836 (void) pthread_mutex_unlock(&np->rn_lock);
4837 (void) pthread_mutex_lock(&pp->rn_lock);
4838 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4839 rc_node_rele_locked(pp);
4840 (void) pthread_mutex_lock(&np->rn_lock);
4841 continue; /* loop around and try again */
4844 * Everyone out of the pool -- we grab everything but
4845 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
4846 * any changes from occurring while we are attempting to
4847 * delete the node.
4849 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4850 (void) pthread_mutex_unlock(&np->rn_lock);
4851 rc = REP_PROTOCOL_FAIL_DELETED;
4852 goto fail;
4855 assert(!(np->rn_flags & RC_NODE_OLD));
4857 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
4858 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
4859 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4860 (void) pthread_mutex_unlock(&np->rn_lock);
4861 goto fail;
4864 #ifdef NATIVE_BUILD
4865 if (!client_is_privileged()) {
4866 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4868 #else
4869 if (is_main_repository) {
4870 /* permission check */
4871 (void) pthread_mutex_unlock(&np->rn_lock);
4872 pcp = pc_create();
4873 if (pcp != NULL) {
4874 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4876 /* add .smf.modify.<type> for pgs. */
4877 if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
4878 REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4879 const char * const auth =
4880 perm_auth_for_pgtype(np->rn_type);
4882 if (auth != NULL)
4883 rc = perm_add_enabling(pcp, auth);
4886 if (rc == REP_PROTOCOL_SUCCESS) {
4887 granted = perm_granted(pcp);
4889 rc = map_granted_status(granted, pcp,
4890 &audit_data.ed_auth);
4891 if (granted == PERM_GONE) {
4892 /* No need to audit if client gone. */
4893 pc_free(pcp);
4894 rc_node_rele_flag(np,
4895 RC_NODE_DYING_FLAGS);
4896 return (rc);
4898 if (granted == PERM_DENIED)
4899 audit_failure = 1;
4902 pc_free(pcp);
4903 } else {
4904 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4907 (void) pthread_mutex_lock(&np->rn_lock);
4908 } else {
4909 rc = REP_PROTOCOL_SUCCESS;
4911 #endif /* NATIVE_BUILD */
4913 if (rc != REP_PROTOCOL_SUCCESS) {
4914 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4915 (void) pthread_mutex_unlock(&np->rn_lock);
4916 goto fail;
4919 ndp = uu_zalloc(sizeof (*ndp));
4920 if (ndp == NULL) {
4921 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4922 (void) pthread_mutex_unlock(&np->rn_lock);
4923 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4924 goto fail;
4927 rc_node_delete_hold(np, 1); /* hold entire subgraph, drop lock */
4929 rc = object_delete(np);
4931 if (rc != REP_PROTOCOL_SUCCESS) {
4932 (void) pthread_mutex_lock(&np->rn_lock);
4933 rc_node_delete_rele(np, 1); /* drops lock */
4934 uu_free(ndp);
4935 goto fail;
4939 * Now, delicately unlink and delete the object.
4941 * Create the delete notification, atomically remove
4942 * from the hash table and set the NODE_DEAD flag, and
4943 * remove from the parent's children list.
4945 rc_notify_node_delete(ndp, np); /* frees or uses ndp */
4947 bp = cache_hold(np->rn_hash);
4949 (void) pthread_mutex_lock(&np->rn_lock);
4950 cache_remove_unlocked(bp, np);
4951 cache_release(bp);
4953 np->rn_flags |= RC_NODE_DEAD;
4955 if (pp != NULL) {
4957 * Remove from pp's rn_children. This requires pp's lock,
4958 * so we must drop np's lock to respect lock order.
4960 (void) pthread_mutex_unlock(&np->rn_lock);
4961 (void) pthread_mutex_lock(&pp->rn_lock);
4962 (void) pthread_mutex_lock(&np->rn_lock);
4964 uu_list_remove(pp->rn_children, np);
4966 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4968 (void) pthread_mutex_unlock(&pp->rn_lock);
4970 np->rn_flags &= ~RC_NODE_IN_PARENT;
4974 * finally, propagate death to our children (including marking
4975 * them DEAD), handle notifications, and release our hold.
4977 rc_node_hold_locked(np); /* hold for delete */
4978 rc_node_delete_children(np, 1); /* drops DYING_FLAGS, lock, ref */
4980 rc_node_clear(npp, 1);
4982 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4983 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
4984 rc_pg_notify_fire(pnp);
4985 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4986 rc_notify_remove_node(np);
4988 rc_node_rele(np);
4990 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4991 &audit_data);
4992 free(audit_data.ed_auth);
4993 free(audit_data.ed_snapname);
4994 free(audit_data.ed_type);
4995 free(audit_data.ed_fmri);
4996 return (rc);
4998 fail:
4999 rc_node_rele(np);
5000 if (rc == REP_PROTOCOL_FAIL_DELETED)
5001 rc_node_clear(npp, 1);
5002 if (pp != NULL) {
5003 (void) pthread_mutex_lock(&pp->rn_lock);
5004 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5005 rc_node_rele_locked(pp); /* drop ref and lock */
5007 if (audit_failure) {
5008 smf_audit_event(event_id, ADT_FAILURE,
5009 ADT_FAIL_VALUE_AUTH, &audit_data);
5011 cleanout:
5012 free(audit_data.ed_auth);
5013 free(audit_data.ed_snapname);
5014 free(audit_data.ed_type);
5015 free(audit_data.ed_fmri);
5016 return (rc);
5020 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5022 rc_node_t *np;
5023 rc_node_t *cp, *pp;
5024 int res;
5026 rc_node_clear(cpp, 0);
5028 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5030 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
5031 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
5032 (void) pthread_mutex_unlock(&np->rn_lock);
5033 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5036 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5037 if ((res = rc_node_fill_children(np,
5038 REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
5039 (void) pthread_mutex_unlock(&np->rn_lock);
5040 return (res);
5043 for (cp = uu_list_first(np->rn_children);
5044 cp != NULL;
5045 cp = uu_list_next(np->rn_children, cp)) {
5046 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5047 continue;
5048 rc_node_hold(cp);
5049 break;
5052 (void) pthread_mutex_unlock(&np->rn_lock);
5053 } else {
5054 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5055 (void) pthread_mutex_unlock(&np->rn_lock);
5056 rc_node_clear(npp, 1);
5057 return (REP_PROTOCOL_FAIL_DELETED);
5061 * mark our parent as children changing. This call drops our
5062 * lock and the RC_NODE_USING_PARENT flag, and returns with
5063 * pp's lock held
5065 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
5066 if (pp == NULL) {
5067 /* our parent is gone, we're going next... */
5069 rc_node_clear(npp, 1);
5070 return (REP_PROTOCOL_FAIL_DELETED);
5074 * find the next snaplevel
5076 cp = np;
5077 while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
5078 cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5081 /* it must match the snaplevel list */
5082 assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
5083 (cp != NULL && np->rn_snaplevel->rsl_next ==
5084 cp->rn_snaplevel));
5086 if (cp != NULL)
5087 rc_node_hold(cp);
5089 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5091 (void) pthread_mutex_unlock(&pp->rn_lock);
5094 rc_node_assign(cpp, cp);
5095 if (cp != NULL) {
5096 rc_node_rele(cp);
5098 return (REP_PROTOCOL_SUCCESS);
5100 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5104 * This call takes a snapshot (np) and either:
5105 * an existing snapid (to be associated with np), or
5106 * a non-NULL parentp (from which a new snapshot is taken, and associated
5107 * with np)
5109 * To do the association, np is duplicated, the duplicate is made to
5110 * represent the new snapid, and np is replaced with the new rc_node_t on
5111 * np's parent's child list. np is placed on the new node's rn_former list,
5112 * and replaces np in cache_hash (so rc_node_update() will find the new one).
5114 * old_fmri and old_name point to the original snap shot's FMRI and name.
5115 * These values are used when generating audit events.
5117 * Fails with
5118 * _BAD_REQUEST
5119 * _BACKEND_READONLY
5120 * _DELETED
5121 * _NO_RESOURCES
5122 * _TRUNCATED
5123 * _TYPE_MISMATCH
5125 static int
5126 rc_attach_snapshot(
5127 rc_node_t *np,
5128 uint32_t snapid,
5129 rc_node_t *parentp,
5130 char *old_fmri,
5131 char *old_name)
5133 rc_node_t *np_orig;
5134 rc_node_t *nnp, *prev;
5135 rc_node_t *pp;
5136 int rc;
5137 size_t sz_out;
5138 perm_status_t granted;
5139 au_event_t event_id;
5140 audit_event_data_t audit_data;
5142 if (parentp == NULL) {
5143 assert(old_fmri != NULL);
5144 } else {
5145 assert(snapid == 0);
5147 assert(MUTEX_HELD(&np->rn_lock));
5149 /* Gather the audit data. */
5151 * ADT_smf_* symbols may not be defined in the /usr/include header
5152 * files on the build machine. Thus, the following if-else will
5153 * not be compiled when doing native builds.
5155 #ifndef NATIVE_BUILD
5156 if (parentp == NULL) {
5157 event_id = ADT_smf_attach_snap;
5158 } else {
5159 event_id = ADT_smf_create_snap;
5161 #endif /* NATIVE_BUILD */
5162 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5163 audit_data.ed_snapname = malloc(REP_PROTOCOL_NAME_LEN);
5164 if ((audit_data.ed_fmri == NULL) || (audit_data.ed_snapname == NULL)) {
5165 (void) pthread_mutex_unlock(&np->rn_lock);
5166 free(audit_data.ed_fmri);
5167 free(audit_data.ed_snapname);
5168 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5170 audit_data.ed_auth = NULL;
5171 if (strlcpy(audit_data.ed_snapname, np->rn_name,
5172 REP_PROTOCOL_NAME_LEN) >= REP_PROTOCOL_NAME_LEN) {
5173 abort();
5175 audit_data.ed_old_fmri = old_fmri;
5176 audit_data.ed_old_name = old_name ? old_name : "NO NAME";
5178 if (parentp == NULL) {
5180 * In the attach case, get the instance FMRIs of the
5181 * snapshots.
5183 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5184 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
5185 (void) pthread_mutex_unlock(&np->rn_lock);
5186 free(audit_data.ed_fmri);
5187 free(audit_data.ed_snapname);
5188 return (rc);
5190 } else {
5192 * Capture the FMRI of the parent if we're actually going
5193 * to take the snapshot.
5195 if ((rc = rc_node_get_fmri_or_fragment(parentp,
5196 audit_data.ed_fmri, REP_PROTOCOL_FMRI_LEN, &sz_out)) !=
5197 REP_PROTOCOL_SUCCESS) {
5198 (void) pthread_mutex_unlock(&np->rn_lock);
5199 free(audit_data.ed_fmri);
5200 free(audit_data.ed_snapname);
5201 return (rc);
5205 np_orig = np;
5206 rc_node_hold_locked(np); /* simplifies the remainder */
5208 (void) pthread_mutex_unlock(&np->rn_lock);
5209 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5210 switch (granted) {
5211 case PERM_DENIED:
5212 smf_audit_event(event_id, ADT_FAILURE, ADT_FAIL_VALUE_AUTH,
5213 &audit_data);
5214 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5215 rc_node_rele(np);
5216 goto cleanout;
5217 case PERM_GRANTED:
5218 break;
5219 case PERM_GONE:
5220 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5221 rc_node_rele(np);
5222 goto cleanout;
5223 case PERM_FAIL:
5224 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5225 rc_node_rele(np);
5226 goto cleanout;
5227 default:
5228 bad_error(rc_node_modify_permission_check, granted);
5230 (void) pthread_mutex_lock(&np->rn_lock);
5233 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
5234 * list from changing.
5236 for (;;) {
5237 if (!(np->rn_flags & RC_NODE_OLD)) {
5238 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5239 goto again;
5241 pp = rc_node_hold_parent_flag(np,
5242 RC_NODE_CHILDREN_CHANGING);
5244 (void) pthread_mutex_lock(&np->rn_lock);
5245 if (pp == NULL) {
5246 goto again;
5248 if (np->rn_flags & RC_NODE_OLD) {
5249 rc_node_rele_flag(pp,
5250 RC_NODE_CHILDREN_CHANGING);
5251 (void) pthread_mutex_unlock(&pp->rn_lock);
5252 goto again;
5254 (void) pthread_mutex_unlock(&pp->rn_lock);
5256 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
5258 * Can't happen, since we're holding our
5259 * parent's CHILDREN_CHANGING flag...
5261 abort();
5263 break; /* everything's ready */
5265 again:
5266 rc_node_rele_locked(np);
5267 np = cache_lookup(&np_orig->rn_id);
5269 if (np == NULL) {
5270 rc = REP_PROTOCOL_FAIL_DELETED;
5271 goto cleanout;
5274 (void) pthread_mutex_lock(&np->rn_lock);
5277 if (parentp != NULL) {
5278 if (pp != parentp) {
5279 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
5280 goto fail;
5282 nnp = NULL;
5283 } else {
5285 * look for a former node with the snapid we need.
5287 if (np->rn_snapshot_id == snapid) {
5288 rc_node_rele_flag(np, RC_NODE_IN_TX);
5289 rc_node_rele_locked(np);
5291 (void) pthread_mutex_lock(&pp->rn_lock);
5292 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5293 (void) pthread_mutex_unlock(&pp->rn_lock);
5294 rc = REP_PROTOCOL_SUCCESS; /* nothing to do */
5295 goto cleanout;
5298 prev = np;
5299 while ((nnp = prev->rn_former) != NULL) {
5300 if (nnp->rn_snapshot_id == snapid) {
5301 rc_node_hold(nnp);
5302 break; /* existing node with that id */
5304 prev = nnp;
5308 if (nnp == NULL) {
5309 prev = NULL;
5310 nnp = rc_node_alloc();
5311 if (nnp == NULL) {
5312 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5313 goto fail;
5316 nnp->rn_id = np->rn_id; /* structure assignment */
5317 nnp->rn_hash = np->rn_hash;
5318 nnp->rn_name = strdup(np->rn_name);
5319 nnp->rn_snapshot_id = snapid;
5320 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
5322 if (nnp->rn_name == NULL) {
5323 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5324 goto fail;
5328 (void) pthread_mutex_unlock(&np->rn_lock);
5330 rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
5332 if (parentp != NULL)
5333 nnp->rn_snapshot_id = snapid; /* fill in new snapid */
5334 else
5335 assert(nnp->rn_snapshot_id == snapid);
5337 (void) pthread_mutex_lock(&np->rn_lock);
5338 if (rc != REP_PROTOCOL_SUCCESS)
5339 goto fail;
5342 * fix up the former chain
5344 if (prev != NULL) {
5345 prev->rn_former = nnp->rn_former;
5346 (void) pthread_mutex_lock(&nnp->rn_lock);
5347 nnp->rn_flags &= ~RC_NODE_ON_FORMER;
5348 nnp->rn_former = NULL;
5349 (void) pthread_mutex_unlock(&nnp->rn_lock);
5351 np->rn_flags |= RC_NODE_OLD;
5352 (void) pthread_mutex_unlock(&np->rn_lock);
5355 * replace np with nnp
5357 rc_node_relink_child(pp, np, nnp);
5359 rc_node_rele(np);
5360 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, &audit_data);
5361 rc = REP_PROTOCOL_SUCCESS;
5363 cleanout:
5364 free(audit_data.ed_auth);
5365 free(audit_data.ed_fmri);
5366 free(audit_data.ed_snapname);
5367 return (rc);
5369 fail:
5370 rc_node_rele_flag(np, RC_NODE_IN_TX);
5371 rc_node_rele_locked(np);
5372 (void) pthread_mutex_lock(&pp->rn_lock);
5373 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5374 (void) pthread_mutex_unlock(&pp->rn_lock);
5376 if (nnp != NULL) {
5377 if (prev == NULL)
5378 rc_node_destroy(nnp);
5379 else
5380 rc_node_rele(nnp);
5383 free(audit_data.ed_auth);
5384 free(audit_data.ed_fmri);
5385 free(audit_data.ed_snapname);
5386 return (rc);
5390 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
5391 const char *instname, const char *name, rc_node_ptr_t *outpp)
5393 perm_status_t granted;
5394 rc_node_t *np;
5395 rc_node_t *outp = NULL;
5396 int rc, perm_rc;
5397 char fmri[REP_PROTOCOL_FMRI_LEN];
5398 audit_event_data_t audit_data;
5399 size_t sz_out;
5401 rc_node_clear(outpp, 0);
5404 * rc_node_modify_permission_check() must be called before the node
5405 * is locked. This is because the library functions that check
5406 * authorizations can trigger calls back into configd.
5408 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5409 switch (granted) {
5410 case PERM_DENIED:
5412 * We continue in this case, so that we can generate an
5413 * audit event later in this function.
5415 perm_rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5416 break;
5417 case PERM_GRANTED:
5418 perm_rc = REP_PROTOCOL_SUCCESS;
5419 break;
5420 case PERM_GONE:
5421 /* No need to produce audit event if client is gone. */
5422 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5423 case PERM_FAIL:
5424 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5425 default:
5426 bad_error("rc_node_modify_permission_check", granted);
5427 break;
5430 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
5431 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5432 (void) pthread_mutex_unlock(&np->rn_lock);
5433 free(audit_data.ed_auth);
5434 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5437 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
5438 if (rc != REP_PROTOCOL_SUCCESS) {
5439 (void) pthread_mutex_unlock(&np->rn_lock);
5440 free(audit_data.ed_auth);
5441 return (rc);
5444 if (svcname != NULL && (rc =
5445 rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
5446 REP_PROTOCOL_SUCCESS) {
5447 (void) pthread_mutex_unlock(&np->rn_lock);
5448 free(audit_data.ed_auth);
5449 return (rc);
5452 if (instname != NULL && (rc =
5453 rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
5454 REP_PROTOCOL_SUCCESS) {
5455 (void) pthread_mutex_unlock(&np->rn_lock);
5456 free(audit_data.ed_auth);
5457 return (rc);
5460 audit_data.ed_fmri = fmri;
5461 audit_data.ed_snapname = (char *)name;
5463 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri),
5464 &sz_out)) != REP_PROTOCOL_SUCCESS) {
5465 (void) pthread_mutex_unlock(&np->rn_lock);
5466 free(audit_data.ed_auth);
5467 return (rc);
5469 if (perm_rc != REP_PROTOCOL_SUCCESS) {
5470 (void) pthread_mutex_unlock(&np->rn_lock);
5471 smf_audit_event(ADT_smf_create_snap, ADT_FAILURE,
5472 ADT_FAIL_VALUE_AUTH, &audit_data);
5473 free(audit_data.ed_auth);
5474 return (perm_rc);
5477 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
5478 audit_data.ed_auth);
5479 (void) pthread_mutex_unlock(&np->rn_lock);
5481 rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
5483 if (rc == REP_PROTOCOL_SUCCESS) {
5484 rc_node_assign(outpp, outp);
5485 rc_node_rele(outp);
5488 (void) pthread_mutex_lock(&np->rn_lock);
5489 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
5490 (void) pthread_mutex_unlock(&np->rn_lock);
5492 if (rc == REP_PROTOCOL_SUCCESS) {
5493 smf_audit_event(ADT_smf_create_snap, ADT_SUCCESS, ADT_SUCCESS,
5494 &audit_data);
5496 if (audit_data.ed_auth != NULL)
5497 free(audit_data.ed_auth);
5498 return (rc);
5502 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
5504 rc_node_t *np, *outp;
5506 RC_NODE_PTR_GET_CHECK(np, npp);
5507 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5508 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5511 RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
5512 if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5513 (void) pthread_mutex_unlock(&outp->rn_lock);
5514 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5517 return (rc_attach_snapshot(outp, 0, np, NULL,
5518 NULL)); /* drops outp's lock */
5522 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5524 rc_node_t *np;
5525 rc_node_t *cp;
5526 uint32_t snapid;
5527 char old_name[REP_PROTOCOL_NAME_LEN];
5528 int rc;
5529 size_t sz_out;
5530 char old_fmri[REP_PROTOCOL_FMRI_LEN];
5532 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5533 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5534 (void) pthread_mutex_unlock(&np->rn_lock);
5535 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5537 snapid = np->rn_snapshot_id;
5538 rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri),
5539 &sz_out);
5540 (void) pthread_mutex_unlock(&np->rn_lock);
5541 if (rc != REP_PROTOCOL_SUCCESS)
5542 return (rc);
5543 if (np->rn_name != NULL) {
5544 if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >=
5545 sizeof (old_name)) {
5546 return (REP_PROTOCOL_FAIL_TRUNCATED);
5550 RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
5551 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5552 (void) pthread_mutex_unlock(&cp->rn_lock);
5553 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5556 rc = rc_attach_snapshot(cp, snapid, NULL,
5557 old_fmri, old_name); /* drops cp's lock */
5558 return (rc);
5562 * If the pgname property group under ent has type pgtype, and it has a
5563 * propname property with type ptype, return _SUCCESS. If pgtype is NULL,
5564 * it is not checked. If ent is not a service node, we will return _SUCCESS if
5565 * a property meeting the requirements exists in either the instance or its
5566 * parent.
5568 * Returns
5569 * _SUCCESS - see above
5570 * _DELETED - ent or one of its ancestors was deleted
5571 * _NO_RESOURCES - no resources
5572 * _NOT_FOUND - no matching property was found
5574 static int
5575 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype,
5576 const char *propname, rep_protocol_value_type_t ptype)
5578 int ret;
5579 rc_node_t *pg = NULL, *spg = NULL, *svc, *prop;
5581 assert(!MUTEX_HELD(&ent->rn_lock));
5583 (void) pthread_mutex_lock(&ent->rn_lock);
5584 ret = rc_node_find_named_child(ent, pgname,
5585 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
5586 (void) pthread_mutex_unlock(&ent->rn_lock);
5588 switch (ret) {
5589 case REP_PROTOCOL_SUCCESS:
5590 break;
5592 case REP_PROTOCOL_FAIL_DELETED:
5593 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5594 return (ret);
5596 default:
5597 bad_error("rc_node_find_named_child", ret);
5600 if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) {
5601 ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE,
5602 &svc);
5603 if (ret != REP_PROTOCOL_SUCCESS) {
5604 assert(ret == REP_PROTOCOL_FAIL_DELETED);
5605 if (pg != NULL)
5606 rc_node_rele(pg);
5607 return (ret);
5609 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
5611 (void) pthread_mutex_lock(&svc->rn_lock);
5612 ret = rc_node_find_named_child(svc, pgname,
5613 REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg);
5614 (void) pthread_mutex_unlock(&svc->rn_lock);
5616 rc_node_rele(svc);
5618 switch (ret) {
5619 case REP_PROTOCOL_SUCCESS:
5620 break;
5622 case REP_PROTOCOL_FAIL_DELETED:
5623 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5624 if (pg != NULL)
5625 rc_node_rele(pg);
5626 return (ret);
5628 default:
5629 bad_error("rc_node_find_named_child", ret);
5633 if (pg != NULL &&
5634 pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) {
5635 rc_node_rele(pg);
5636 pg = NULL;
5639 if (spg != NULL &&
5640 pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) {
5641 rc_node_rele(spg);
5642 spg = NULL;
5645 if (pg == NULL) {
5646 if (spg == NULL)
5647 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5648 pg = spg;
5649 spg = NULL;
5653 * At this point, pg is non-NULL, and is a property group node of the
5654 * correct type. spg, if non-NULL, is also a property group node of
5655 * the correct type. Check for the property in pg first, then spg
5656 * (if applicable).
5658 (void) pthread_mutex_lock(&pg->rn_lock);
5659 ret = rc_node_find_named_child(pg, propname,
5660 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5661 (void) pthread_mutex_unlock(&pg->rn_lock);
5662 rc_node_rele(pg);
5663 switch (ret) {
5664 case REP_PROTOCOL_SUCCESS:
5665 if (prop != NULL) {
5666 if (prop->rn_valtype == ptype) {
5667 rc_node_rele(prop);
5668 if (spg != NULL)
5669 rc_node_rele(spg);
5670 return (REP_PROTOCOL_SUCCESS);
5672 rc_node_rele(prop);
5674 break;
5676 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5677 if (spg != NULL)
5678 rc_node_rele(spg);
5679 return (ret);
5681 case REP_PROTOCOL_FAIL_DELETED:
5682 break;
5684 default:
5685 bad_error("rc_node_find_named_child", ret);
5688 if (spg == NULL)
5689 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5691 pg = spg;
5693 (void) pthread_mutex_lock(&pg->rn_lock);
5694 ret = rc_node_find_named_child(pg, propname,
5695 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5696 (void) pthread_mutex_unlock(&pg->rn_lock);
5697 rc_node_rele(pg);
5698 switch (ret) {
5699 case REP_PROTOCOL_SUCCESS:
5700 if (prop != NULL) {
5701 if (prop->rn_valtype == ptype) {
5702 rc_node_rele(prop);
5703 return (REP_PROTOCOL_SUCCESS);
5705 rc_node_rele(prop);
5707 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5709 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5710 return (ret);
5712 case REP_PROTOCOL_FAIL_DELETED:
5713 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5715 default:
5716 bad_error("rc_node_find_named_child", ret);
5719 return (REP_PROTOCOL_SUCCESS);
5723 * Given a property group node, returns _SUCCESS if the property group may
5724 * be read without any special authorization.
5726 * Fails with:
5727 * _DELETED - np or an ancestor node was deleted
5728 * _TYPE_MISMATCH - np does not refer to a property group
5729 * _NO_RESOURCES - no resources
5730 * _PERMISSION_DENIED - authorization is required
5732 static int
5733 rc_node_pg_check_read_protect(rc_node_t *np)
5735 int ret;
5736 rc_node_t *ent;
5738 assert(!MUTEX_HELD(&np->rn_lock));
5740 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
5741 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5743 if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 ||
5744 strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 ||
5745 strcmp(np->rn_type, SCF_GROUP_METHOD) == 0)
5746 return (REP_PROTOCOL_SUCCESS);
5748 ret = rc_node_parent(np, &ent);
5750 if (ret != REP_PROTOCOL_SUCCESS)
5751 return (ret);
5753 ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type,
5754 AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING);
5756 rc_node_rele(ent);
5758 switch (ret) {
5759 case REP_PROTOCOL_FAIL_NOT_FOUND:
5760 return (REP_PROTOCOL_SUCCESS);
5761 case REP_PROTOCOL_SUCCESS:
5762 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5763 case REP_PROTOCOL_FAIL_DELETED:
5764 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5765 return (ret);
5766 default:
5767 bad_error("rc_svc_prop_exists", ret);
5770 return (REP_PROTOCOL_SUCCESS);
5774 * Fails with
5775 * _DELETED - np's node or parent has been deleted
5776 * _TYPE_MISMATCH - np's node is not a property
5777 * _NO_RESOURCES - out of memory
5778 * _PERMISSION_DENIED - no authorization to read this property's value(s)
5779 * _BAD_REQUEST - np's parent is not a property group
5781 static int
5782 rc_node_property_may_read(rc_node_t *np)
5784 int ret;
5785 perm_status_t granted = PERM_DENIED;
5786 rc_node_t *pgp;
5787 permcheck_t *pcp;
5788 audit_event_data_t audit_data;
5789 size_t sz_out;
5791 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
5792 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5794 if (client_is_privileged())
5795 return (REP_PROTOCOL_SUCCESS);
5797 #ifdef NATIVE_BUILD
5798 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5799 #else
5800 ret = rc_node_parent(np, &pgp);
5802 if (ret != REP_PROTOCOL_SUCCESS)
5803 return (ret);
5805 if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5806 rc_node_rele(pgp);
5807 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5810 ret = rc_node_pg_check_read_protect(pgp);
5812 if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) {
5813 rc_node_rele(pgp);
5814 return (ret);
5817 pcp = pc_create();
5819 if (pcp == NULL) {
5820 rc_node_rele(pgp);
5821 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5824 ret = perm_add_enabling(pcp, AUTH_MODIFY);
5826 if (ret == REP_PROTOCOL_SUCCESS) {
5827 const char * const auth =
5828 perm_auth_for_pgtype(pgp->rn_type);
5830 if (auth != NULL)
5831 ret = perm_add_enabling(pcp, auth);
5835 * If you are permitted to modify the value, you may also
5836 * read it. This means that both the MODIFY and VALUE
5837 * authorizations are acceptable. We don't allow requests
5838 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
5839 * however, to avoid leaking possibly valuable information
5840 * since such a user can't change the property anyway.
5842 if (ret == REP_PROTOCOL_SUCCESS)
5843 ret = perm_add_enabling_values(pcp, pgp,
5844 AUTH_PROP_MODIFY);
5846 if (ret == REP_PROTOCOL_SUCCESS &&
5847 strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0)
5848 ret = perm_add_enabling_values(pcp, pgp,
5849 AUTH_PROP_VALUE);
5851 if (ret == REP_PROTOCOL_SUCCESS)
5852 ret = perm_add_enabling_values(pcp, pgp,
5853 AUTH_PROP_READ);
5855 rc_node_rele(pgp);
5857 if (ret == REP_PROTOCOL_SUCCESS) {
5858 granted = perm_granted(pcp);
5859 if (granted == PERM_FAIL)
5860 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5861 if (granted == PERM_GONE)
5862 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5865 if (ret == REP_PROTOCOL_SUCCESS) {
5866 /* Generate a read_prop audit event. */
5867 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5868 if (audit_data.ed_fmri == NULL)
5869 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5871 if (ret == REP_PROTOCOL_SUCCESS) {
5872 ret = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5873 REP_PROTOCOL_FMRI_LEN, &sz_out);
5875 if (ret == REP_PROTOCOL_SUCCESS) {
5876 int status;
5877 int ret_value;
5879 if (granted == PERM_DENIED) {
5880 status = ADT_FAILURE;
5881 ret_value = ADT_FAIL_VALUE_AUTH;
5882 } else {
5883 status = ADT_SUCCESS;
5884 ret_value = ADT_SUCCESS;
5886 audit_data.ed_auth = pcp->pc_auth_string;
5887 smf_audit_event(ADT_smf_read_prop,
5888 status, ret_value, &audit_data);
5890 free(audit_data.ed_fmri);
5892 pc_free(pcp);
5894 if ((ret == REP_PROTOCOL_SUCCESS) && (granted == PERM_DENIED))
5895 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5897 return (ret);
5898 #endif /* NATIVE_BUILD */
5902 * Iteration
5904 static int
5905 rc_iter_filter_name(rc_node_t *np, void *s)
5907 const char *name = s;
5909 return (strcmp(np->rn_name, name) == 0);
5912 static int
5913 rc_iter_filter_type(rc_node_t *np, void *s)
5915 const char *type = s;
5917 return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
5920 /*ARGSUSED*/
5921 static int
5922 rc_iter_null_filter(rc_node_t *np, void *s)
5924 return (1);
5928 * Allocate & initialize an rc_node_iter_t structure. Essentially, ensure
5929 * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
5930 * If successful, leaves a hold on np & increments np->rn_other_refs
5932 * If composed is true, then set up for iteration across the top level of np's
5933 * composition chain. If successful, leaves a hold on np and increments
5934 * rn_other_refs for the top level of np's composition chain.
5936 * Fails with
5937 * _NO_RESOURCES
5938 * _INVALID_TYPE
5939 * _TYPE_MISMATCH - np cannot carry type children
5940 * _DELETED
5942 static int
5943 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
5944 rc_iter_filter_func *filter, void *arg, boolean_t composed)
5946 rc_node_iter_t *nip;
5947 int res;
5949 assert(*resp == NULL);
5951 nip = uu_zalloc(sizeof (*nip));
5952 if (nip == NULL)
5953 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5955 /* np is held by the client's rc_node_ptr_t */
5956 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
5957 composed = 1;
5959 if (!composed) {
5960 (void) pthread_mutex_lock(&np->rn_lock);
5962 if ((res = rc_node_fill_children(np, type)) !=
5963 REP_PROTOCOL_SUCCESS) {
5964 (void) pthread_mutex_unlock(&np->rn_lock);
5965 uu_free(nip);
5966 return (res);
5969 nip->rni_clevel = -1;
5971 nip->rni_iter = uu_list_walk_start(np->rn_children,
5972 UU_WALK_ROBUST);
5973 if (nip->rni_iter != NULL) {
5974 nip->rni_iter_node = np;
5975 rc_node_hold_other(np);
5976 } else {
5977 (void) pthread_mutex_unlock(&np->rn_lock);
5978 uu_free(nip);
5979 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5981 (void) pthread_mutex_unlock(&np->rn_lock);
5982 } else {
5983 rc_node_t *ent;
5985 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5986 /* rn_cchain isn't valid until children are loaded. */
5987 (void) pthread_mutex_lock(&np->rn_lock);
5988 res = rc_node_fill_children(np,
5989 REP_PROTOCOL_ENTITY_SNAPLEVEL);
5990 (void) pthread_mutex_unlock(&np->rn_lock);
5991 if (res != REP_PROTOCOL_SUCCESS) {
5992 uu_free(nip);
5993 return (res);
5996 /* Check for an empty snapshot. */
5997 if (np->rn_cchain[0] == NULL)
5998 goto empty;
6001 /* Start at the top of the composition chain. */
6002 for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
6003 if (nip->rni_clevel >= COMPOSITION_DEPTH) {
6004 /* Empty composition chain. */
6005 empty:
6006 nip->rni_clevel = -1;
6007 nip->rni_iter = NULL;
6008 /* It's ok, iter_next() will return _DONE. */
6009 goto out;
6012 ent = np->rn_cchain[nip->rni_clevel];
6013 assert(ent != NULL);
6015 if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
6016 break;
6018 /* Someone deleted it, so try the next one. */
6021 res = rc_node_fill_children(ent, type);
6023 if (res == REP_PROTOCOL_SUCCESS) {
6024 nip->rni_iter = uu_list_walk_start(ent->rn_children,
6025 UU_WALK_ROBUST);
6027 if (nip->rni_iter == NULL)
6028 res = REP_PROTOCOL_FAIL_NO_RESOURCES;
6029 else {
6030 nip->rni_iter_node = ent;
6031 rc_node_hold_other(ent);
6035 if (res != REP_PROTOCOL_SUCCESS) {
6036 (void) pthread_mutex_unlock(&ent->rn_lock);
6037 uu_free(nip);
6038 return (res);
6041 (void) pthread_mutex_unlock(&ent->rn_lock);
6044 out:
6045 rc_node_hold(np); /* released by rc_iter_end() */
6046 nip->rni_parent = np;
6047 nip->rni_type = type;
6048 nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
6049 nip->rni_filter_arg = arg;
6050 *resp = nip;
6051 return (REP_PROTOCOL_SUCCESS);
6054 static void
6055 rc_iter_end(rc_node_iter_t *iter)
6057 rc_node_t *np = iter->rni_parent;
6059 if (iter->rni_clevel >= 0)
6060 np = np->rn_cchain[iter->rni_clevel];
6062 assert(MUTEX_HELD(&np->rn_lock));
6063 if (iter->rni_iter != NULL)
6064 uu_list_walk_end(iter->rni_iter);
6065 iter->rni_iter = NULL;
6067 (void) pthread_mutex_unlock(&np->rn_lock);
6068 rc_node_rele(iter->rni_parent);
6069 if (iter->rni_iter_node != NULL)
6070 rc_node_rele_other(iter->rni_iter_node);
6074 * Fails with
6075 * _NOT_SET - npp is reset
6076 * _DELETED - npp's node has been deleted
6077 * _NOT_APPLICABLE - npp's node is not a property
6078 * _NO_RESOURCES - out of memory
6080 static int
6081 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
6083 rc_node_t *np;
6085 rc_node_iter_t *nip;
6087 assert(*iterp == NULL);
6089 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6091 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6092 (void) pthread_mutex_unlock(&np->rn_lock);
6093 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6096 nip = uu_zalloc(sizeof (*nip));
6097 if (nip == NULL) {
6098 (void) pthread_mutex_unlock(&np->rn_lock);
6099 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6102 nip->rni_parent = np;
6103 nip->rni_iter = NULL;
6104 nip->rni_clevel = -1;
6105 nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
6106 nip->rni_offset = 0;
6107 nip->rni_last_offset = 0;
6109 rc_node_hold_locked(np);
6111 *iterp = nip;
6112 (void) pthread_mutex_unlock(&np->rn_lock);
6114 return (REP_PROTOCOL_SUCCESS);
6118 * Returns:
6119 * _NO_RESOURCES - out of memory
6120 * _NOT_SET - npp is reset
6121 * _DELETED - npp's node has been deleted
6122 * _TYPE_MISMATCH - npp's node is not a property
6123 * _NOT_FOUND - property has no values
6124 * _TRUNCATED - property has >1 values (first is written into out)
6125 * _SUCCESS - property has 1 value (which is written into out)
6126 * _PERMISSION_DENIED - no authorization to read property value(s)
6128 * We shorten *sz_out to not include anything after the final '\0'.
6131 rc_node_get_property_value(rc_node_ptr_t *npp,
6132 struct rep_protocol_value_response *out, size_t *sz_out)
6134 rc_node_t *np;
6135 size_t w;
6136 int ret;
6138 assert(*sz_out == sizeof (*out));
6140 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6141 ret = rc_node_property_may_read(np);
6142 rc_node_rele(np);
6144 if (ret != REP_PROTOCOL_SUCCESS)
6145 return (ret);
6147 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6149 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6150 (void) pthread_mutex_unlock(&np->rn_lock);
6151 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6154 if (np->rn_values_size == 0) {
6155 (void) pthread_mutex_unlock(&np->rn_lock);
6156 return (REP_PROTOCOL_FAIL_NOT_FOUND);
6158 out->rpr_type = np->rn_valtype;
6159 w = strlcpy(out->rpr_value, &np->rn_values[0],
6160 sizeof (out->rpr_value));
6162 if (w >= sizeof (out->rpr_value))
6163 backend_panic("value too large");
6165 *sz_out = offsetof(struct rep_protocol_value_response,
6166 rpr_value[w + 1]);
6168 ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
6169 REP_PROTOCOL_SUCCESS;
6170 (void) pthread_mutex_unlock(&np->rn_lock);
6171 return (ret);
6175 rc_iter_next_value(rc_node_iter_t *iter,
6176 struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
6178 rc_node_t *np = iter->rni_parent;
6179 const char *vals;
6180 size_t len;
6182 size_t start;
6183 size_t w;
6184 int ret;
6186 rep_protocol_responseid_t result;
6188 assert(*sz_out == sizeof (*out));
6190 (void) memset(out, '\0', *sz_out);
6192 if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
6193 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6195 RC_NODE_CHECK(np);
6196 ret = rc_node_property_may_read(np);
6198 if (ret != REP_PROTOCOL_SUCCESS)
6199 return (ret);
6201 RC_NODE_CHECK_AND_LOCK(np);
6203 vals = np->rn_values;
6204 len = np->rn_values_size;
6206 out->rpr_type = np->rn_valtype;
6208 start = (repeat)? iter->rni_last_offset : iter->rni_offset;
6210 if (len == 0 || start >= len) {
6211 result = REP_PROTOCOL_DONE;
6212 *sz_out -= sizeof (out->rpr_value);
6213 } else {
6214 w = strlcpy(out->rpr_value, &vals[start],
6215 sizeof (out->rpr_value));
6217 if (w >= sizeof (out->rpr_value))
6218 backend_panic("value too large");
6220 *sz_out = offsetof(struct rep_protocol_value_response,
6221 rpr_value[w + 1]);
6224 * update the offsets if we're not repeating
6226 if (!repeat) {
6227 iter->rni_last_offset = iter->rni_offset;
6228 iter->rni_offset += (w + 1);
6231 result = REP_PROTOCOL_SUCCESS;
6234 (void) pthread_mutex_unlock(&np->rn_lock);
6235 return (result);
6239 * Entry point for ITER_START from client.c. Validate the arguments & call
6240 * rc_iter_create().
6242 * Fails with
6243 * _NOT_SET
6244 * _DELETED
6245 * _TYPE_MISMATCH - np cannot carry type children
6246 * _BAD_REQUEST - flags is invalid
6247 * pattern is invalid
6248 * _NO_RESOURCES
6249 * _INVALID_TYPE
6250 * _TYPE_MISMATCH - *npp cannot have children of type
6251 * _BACKEND_ACCESS
6254 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
6255 uint32_t type, uint32_t flags, const char *pattern)
6257 rc_node_t *np;
6258 rc_iter_filter_func *f = NULL;
6259 int rc;
6261 RC_NODE_PTR_GET_CHECK(np, npp);
6263 if (pattern != NULL && pattern[0] == '\0')
6264 pattern = NULL;
6266 if (type == REP_PROTOCOL_ENTITY_VALUE) {
6267 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
6268 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6269 if (flags != RP_ITER_START_ALL || pattern != NULL)
6270 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6272 rc = rc_node_setup_value_iter(npp, iterp);
6273 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6274 return (rc);
6277 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
6278 REP_PROTOCOL_SUCCESS)
6279 return (rc);
6281 if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
6282 (pattern == NULL))
6283 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6285 /* Composition only works for instances & snapshots. */
6286 if ((flags & RP_ITER_START_COMPOSED) &&
6287 (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
6288 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
6289 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6291 if (pattern != NULL) {
6292 if ((rc = rc_check_type_name(type, pattern)) !=
6293 REP_PROTOCOL_SUCCESS)
6294 return (rc);
6295 pattern = strdup(pattern);
6296 if (pattern == NULL)
6297 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6300 switch (flags & RP_ITER_START_FILT_MASK) {
6301 case RP_ITER_START_ALL:
6302 f = NULL;
6303 break;
6304 case RP_ITER_START_EXACT:
6305 f = rc_iter_filter_name;
6306 break;
6307 case RP_ITER_START_PGTYPE:
6308 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6309 free((void *)pattern);
6310 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6312 f = rc_iter_filter_type;
6313 break;
6314 default:
6315 free((void *)pattern);
6316 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6319 rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
6320 flags & RP_ITER_START_COMPOSED);
6321 if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
6322 free((void *)pattern);
6324 return (rc);
6328 * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
6329 * the filter.
6330 * For composed iterators, then check to see if there's an overlapping entity
6331 * (see embedded comments). If we reach the end of the list, start over at
6332 * the next level.
6334 * Returns
6335 * _BAD_REQUEST - iter walks values
6336 * _TYPE_MISMATCH - iter does not walk type entities
6337 * _DELETED - parent was deleted
6338 * _NO_RESOURCES
6339 * _INVALID_TYPE - type is invalid
6340 * _DONE
6341 * _SUCCESS
6343 * For composed property group iterators, can also return
6344 * _TYPE_MISMATCH - parent cannot have type children
6347 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
6349 rc_node_t *np = iter->rni_parent;
6350 rc_node_t *res;
6351 int rc;
6353 if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
6354 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6356 if (iter->rni_iter == NULL) {
6357 rc_node_clear(out, 0);
6358 return (REP_PROTOCOL_DONE);
6361 if (iter->rni_type != type) {
6362 rc_node_clear(out, 0);
6363 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6366 (void) pthread_mutex_lock(&np->rn_lock); /* held by _iter_create() */
6368 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6369 (void) pthread_mutex_unlock(&np->rn_lock);
6370 rc_node_clear(out, 1);
6371 return (REP_PROTOCOL_FAIL_DELETED);
6374 if (iter->rni_clevel >= 0) {
6375 /* Composed iterator. Iterate over appropriate level. */
6376 (void) pthread_mutex_unlock(&np->rn_lock);
6377 np = np->rn_cchain[iter->rni_clevel];
6379 * If iter->rni_parent is an instance or a snapshot, np must
6380 * be valid since iter holds iter->rni_parent & possible
6381 * levels (service, instance, snaplevel) cannot be destroyed
6382 * while rni_parent is held. If iter->rni_parent is
6383 * a composed property group then rc_node_setup_cpg() put
6384 * a hold on np.
6387 (void) pthread_mutex_lock(&np->rn_lock);
6389 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6390 (void) pthread_mutex_unlock(&np->rn_lock);
6391 rc_node_clear(out, 1);
6392 return (REP_PROTOCOL_FAIL_DELETED);
6396 assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
6398 for (;;) {
6399 res = uu_list_walk_next(iter->rni_iter);
6400 if (res == NULL) {
6401 rc_node_t *parent = iter->rni_parent;
6403 #if COMPOSITION_DEPTH == 2
6404 if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
6405 /* release walker and lock */
6406 rc_iter_end(iter);
6407 break;
6410 /* Stop walking current level. */
6411 uu_list_walk_end(iter->rni_iter);
6412 iter->rni_iter = NULL;
6413 (void) pthread_mutex_unlock(&np->rn_lock);
6414 rc_node_rele_other(iter->rni_iter_node);
6415 iter->rni_iter_node = NULL;
6417 /* Start walking next level. */
6418 ++iter->rni_clevel;
6419 np = parent->rn_cchain[iter->rni_clevel];
6420 assert(np != NULL);
6421 #else
6422 #error This code must be updated.
6423 #endif
6425 (void) pthread_mutex_lock(&np->rn_lock);
6427 rc = rc_node_fill_children(np, iter->rni_type);
6429 if (rc == REP_PROTOCOL_SUCCESS) {
6430 iter->rni_iter =
6431 uu_list_walk_start(np->rn_children,
6432 UU_WALK_ROBUST);
6434 if (iter->rni_iter == NULL)
6435 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6436 else {
6437 iter->rni_iter_node = np;
6438 rc_node_hold_other(np);
6442 if (rc != REP_PROTOCOL_SUCCESS) {
6443 (void) pthread_mutex_unlock(&np->rn_lock);
6444 rc_node_clear(out, 0);
6445 return (rc);
6448 continue;
6451 if (res->rn_id.rl_type != type ||
6452 !iter->rni_filter(res, iter->rni_filter_arg))
6453 continue;
6456 * If we're composed and not at the top level, check to see if
6457 * there's an entity at a higher level with the same name. If
6458 * so, skip this one.
6460 if (iter->rni_clevel > 0) {
6461 rc_node_t *ent = iter->rni_parent->rn_cchain[0];
6462 rc_node_t *pg;
6464 #if COMPOSITION_DEPTH == 2
6465 assert(iter->rni_clevel == 1);
6467 (void) pthread_mutex_unlock(&np->rn_lock);
6468 (void) pthread_mutex_lock(&ent->rn_lock);
6469 rc = rc_node_find_named_child(ent, res->rn_name, type,
6470 &pg);
6471 if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
6472 rc_node_rele(pg);
6473 (void) pthread_mutex_unlock(&ent->rn_lock);
6474 if (rc != REP_PROTOCOL_SUCCESS) {
6475 rc_node_clear(out, 0);
6476 return (rc);
6478 (void) pthread_mutex_lock(&np->rn_lock);
6480 /* Make sure np isn't being deleted all of a sudden. */
6481 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6482 (void) pthread_mutex_unlock(&np->rn_lock);
6483 rc_node_clear(out, 1);
6484 return (REP_PROTOCOL_FAIL_DELETED);
6487 if (pg != NULL)
6488 /* Keep going. */
6489 continue;
6490 #else
6491 #error This code must be updated.
6492 #endif
6496 * If we're composed, iterating over property groups, and not
6497 * at the bottom level, check to see if there's a pg at lower
6498 * level with the same name. If so, return a cpg.
6500 if (iter->rni_clevel >= 0 &&
6501 type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
6502 iter->rni_clevel < COMPOSITION_DEPTH - 1) {
6503 #if COMPOSITION_DEPTH == 2
6504 rc_node_t *pg;
6505 rc_node_t *ent = iter->rni_parent->rn_cchain[1];
6507 rc_node_hold(res); /* While we drop np->rn_lock */
6509 (void) pthread_mutex_unlock(&np->rn_lock);
6510 (void) pthread_mutex_lock(&ent->rn_lock);
6511 rc = rc_node_find_named_child(ent, res->rn_name, type,
6512 &pg);
6513 /* holds pg if not NULL */
6514 (void) pthread_mutex_unlock(&ent->rn_lock);
6515 if (rc != REP_PROTOCOL_SUCCESS) {
6516 rc_node_rele(res);
6517 rc_node_clear(out, 0);
6518 return (rc);
6521 (void) pthread_mutex_lock(&np->rn_lock);
6522 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6523 (void) pthread_mutex_unlock(&np->rn_lock);
6524 rc_node_rele(res);
6525 if (pg != NULL)
6526 rc_node_rele(pg);
6527 rc_node_clear(out, 1);
6528 return (REP_PROTOCOL_FAIL_DELETED);
6531 if (pg == NULL) {
6532 (void) pthread_mutex_unlock(&np->rn_lock);
6533 rc_node_rele(res);
6534 (void) pthread_mutex_lock(&np->rn_lock);
6535 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6536 (void) pthread_mutex_unlock(&np->
6537 rn_lock);
6538 rc_node_clear(out, 1);
6539 return (REP_PROTOCOL_FAIL_DELETED);
6541 } else {
6542 rc_node_t *cpg;
6544 /* Keep res held for rc_node_setup_cpg(). */
6546 cpg = rc_node_alloc();
6547 if (cpg == NULL) {
6548 (void) pthread_mutex_unlock(
6549 &np->rn_lock);
6550 rc_node_rele(res);
6551 rc_node_rele(pg);
6552 rc_node_clear(out, 0);
6553 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6556 switch (rc_node_setup_cpg(cpg, res, pg)) {
6557 case REP_PROTOCOL_SUCCESS:
6558 res = cpg;
6559 break;
6561 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
6562 /* Nevermind. */
6563 (void) pthread_mutex_unlock(&np->
6564 rn_lock);
6565 rc_node_destroy(cpg);
6566 rc_node_rele(pg);
6567 rc_node_rele(res);
6568 (void) pthread_mutex_lock(&np->
6569 rn_lock);
6570 if (!rc_node_wait_flag(np,
6571 RC_NODE_DYING)) {
6572 (void) pthread_mutex_unlock(&
6573 np->rn_lock);
6574 rc_node_clear(out, 1);
6575 return
6576 (REP_PROTOCOL_FAIL_DELETED);
6578 break;
6580 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6581 rc_node_destroy(cpg);
6582 (void) pthread_mutex_unlock(
6583 &np->rn_lock);
6584 rc_node_rele(res);
6585 rc_node_rele(pg);
6586 rc_node_clear(out, 0);
6587 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6589 default:
6590 assert(0);
6591 abort();
6594 #else
6595 #error This code must be updated.
6596 #endif
6599 rc_node_hold(res);
6600 (void) pthread_mutex_unlock(&np->rn_lock);
6601 break;
6603 rc_node_assign(out, res);
6605 if (res == NULL)
6606 return (REP_PROTOCOL_DONE);
6607 rc_node_rele(res);
6608 return (REP_PROTOCOL_SUCCESS);
6611 void
6612 rc_iter_destroy(rc_node_iter_t **nipp)
6614 rc_node_iter_t *nip = *nipp;
6615 rc_node_t *np;
6617 if (nip == NULL)
6618 return; /* already freed */
6620 np = nip->rni_parent;
6622 if (nip->rni_filter_arg != NULL)
6623 free(nip->rni_filter_arg);
6624 nip->rni_filter_arg = NULL;
6626 if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
6627 nip->rni_iter != NULL) {
6628 if (nip->rni_clevel < 0)
6629 (void) pthread_mutex_lock(&np->rn_lock);
6630 else
6631 (void) pthread_mutex_lock(
6632 &np->rn_cchain[nip->rni_clevel]->rn_lock);
6633 rc_iter_end(nip); /* release walker and lock */
6635 nip->rni_parent = NULL;
6637 uu_free(nip);
6638 *nipp = NULL;
6642 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
6644 rc_node_t *np;
6645 permcheck_t *pcp;
6646 int ret;
6647 perm_status_t granted;
6648 rc_auth_state_t authorized = RC_AUTH_UNKNOWN;
6649 char *auth_string = NULL;
6651 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6653 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
6654 rc_node_rele(np);
6655 np = np->rn_cchain[0];
6656 RC_NODE_CHECK_AND_HOLD(np);
6659 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6660 rc_node_rele(np);
6661 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6664 if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
6665 rc_node_rele(np);
6666 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6669 #ifdef NATIVE_BUILD
6670 if (client_is_privileged())
6671 goto skip_checks;
6672 rc_node_rele(np);
6673 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6674 #else
6675 if (is_main_repository == 0)
6676 goto skip_checks;
6678 /* permission check */
6679 pcp = pc_create();
6680 if (pcp == NULL) {
6681 rc_node_rele(np);
6682 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6685 if (np->rn_id.rl_ids[ID_INSTANCE] != 0 && /* instance pg */
6686 ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
6687 strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
6688 (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
6689 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
6690 rc_node_t *instn;
6692 /* solaris.smf.modify can be used */
6693 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6694 if (ret != REP_PROTOCOL_SUCCESS) {
6695 pc_free(pcp);
6696 rc_node_rele(np);
6697 return (ret);
6700 /* solaris.smf.manage can be used. */
6701 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6703 if (ret != REP_PROTOCOL_SUCCESS) {
6704 pc_free(pcp);
6705 rc_node_rele(np);
6706 return (ret);
6709 /* general/action_authorization values can be used. */
6710 ret = rc_node_parent(np, &instn);
6711 if (ret != REP_PROTOCOL_SUCCESS) {
6712 assert(ret == REP_PROTOCOL_FAIL_DELETED);
6713 rc_node_rele(np);
6714 pc_free(pcp);
6715 return (REP_PROTOCOL_FAIL_DELETED);
6718 assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
6720 ret = perm_add_inst_action_auth(pcp, instn);
6721 rc_node_rele(instn);
6722 switch (ret) {
6723 case REP_PROTOCOL_SUCCESS:
6724 break;
6726 case REP_PROTOCOL_FAIL_DELETED:
6727 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6728 rc_node_rele(np);
6729 pc_free(pcp);
6730 return (ret);
6732 default:
6733 bad_error("perm_add_inst_action_auth", ret);
6736 if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
6737 authorized = RC_AUTH_PASSED; /* No check on commit. */
6738 } else {
6739 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6741 if (ret == REP_PROTOCOL_SUCCESS) {
6742 /* propertygroup-type-specific authorization */
6743 /* no locking because rn_type won't change anyway */
6744 const char * const auth =
6745 perm_auth_for_pgtype(np->rn_type);
6747 if (auth != NULL)
6748 ret = perm_add_enabling(pcp, auth);
6751 if (ret == REP_PROTOCOL_SUCCESS)
6752 /* propertygroup/transaction-type-specific auths */
6753 ret =
6754 perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
6756 if (ret == REP_PROTOCOL_SUCCESS)
6757 ret =
6758 perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
6760 /* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
6761 if (ret == REP_PROTOCOL_SUCCESS &&
6762 strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6763 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
6764 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6766 if (ret != REP_PROTOCOL_SUCCESS) {
6767 pc_free(pcp);
6768 rc_node_rele(np);
6769 return (ret);
6773 granted = perm_granted(pcp);
6774 ret = map_granted_status(granted, pcp, &auth_string);
6775 pc_free(pcp);
6777 if ((granted == PERM_GONE) || (granted == PERM_FAIL) ||
6778 (ret == REP_PROTOCOL_FAIL_NO_RESOURCES)) {
6779 free(auth_string);
6780 rc_node_rele(np);
6781 return (ret);
6784 if (granted == PERM_DENIED) {
6786 * If we get here, the authorization failed.
6787 * Unfortunately, we don't have enough information at this
6788 * point to generate the security audit events. We'll only
6789 * get that information when the client tries to commit the
6790 * event. Thus, we'll remember the failed authorization,
6791 * so that we can generate the audit events later.
6793 authorized = RC_AUTH_FAILED;
6795 #endif /* NATIVE_BUILD */
6797 skip_checks:
6798 rc_node_assign(txp, np);
6799 txp->rnp_authorized = authorized;
6800 if (authorized != RC_AUTH_UNKNOWN) {
6801 /* Save the authorization string. */
6802 if (txp->rnp_auth_string != NULL)
6803 free((void *)txp->rnp_auth_string);
6804 txp->rnp_auth_string = auth_string;
6805 auth_string = NULL; /* Don't free until done with txp. */
6808 rc_node_rele(np);
6809 if (auth_string != NULL)
6810 free(auth_string);
6811 return (REP_PROTOCOL_SUCCESS);
6815 * Return 1 if the given transaction commands only modify the values of
6816 * properties other than "modify_authorization". Return -1 if any of the
6817 * commands are invalid, and 0 otherwise.
6819 static int
6820 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
6822 const struct rep_protocol_transaction_cmd *cmds;
6823 uintptr_t loc;
6824 uint32_t sz;
6825 rc_node_t *prop;
6826 boolean_t ok;
6828 assert(!MUTEX_HELD(&pg->rn_lock));
6830 loc = (uintptr_t)cmds_arg;
6832 while (cmds_sz > 0) {
6833 cmds = (struct rep_protocol_transaction_cmd *)loc;
6835 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6836 return (-1);
6838 sz = cmds->rptc_size;
6839 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6840 return (-1);
6842 sz = TX_SIZE(sz);
6843 if (sz > cmds_sz)
6844 return (-1);
6846 switch (cmds[0].rptc_action) {
6847 case REP_PROTOCOL_TX_ENTRY_CLEAR:
6848 break;
6850 case REP_PROTOCOL_TX_ENTRY_REPLACE:
6851 /* Check type */
6852 (void) pthread_mutex_lock(&pg->rn_lock);
6853 ok = B_FALSE;
6854 if (rc_node_find_named_child(pg,
6855 (const char *)cmds[0].rptc_data,
6856 REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
6857 REP_PROTOCOL_SUCCESS) {
6858 if (prop != NULL) {
6859 ok = prop->rn_valtype ==
6860 cmds[0].rptc_type;
6862 * rc_node_find_named_child()
6863 * places a hold on prop which we
6864 * do not need to hang on to.
6866 rc_node_rele(prop);
6869 (void) pthread_mutex_unlock(&pg->rn_lock);
6870 if (ok)
6871 break;
6872 return (0);
6874 default:
6875 return (0);
6878 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
6879 == 0)
6880 return (0);
6882 loc += sz;
6883 cmds_sz -= sz;
6886 return (1);
6890 * Return 1 if any of the given transaction commands affect
6891 * "action_authorization". Return -1 if any of the commands are invalid and
6892 * 0 in all other cases.
6894 static int
6895 tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
6897 const struct rep_protocol_transaction_cmd *cmds;
6898 uintptr_t loc;
6899 uint32_t sz;
6901 loc = (uintptr_t)cmds_arg;
6903 while (cmds_sz > 0) {
6904 cmds = (struct rep_protocol_transaction_cmd *)loc;
6906 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6907 return (-1);
6909 sz = cmds->rptc_size;
6910 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6911 return (-1);
6913 sz = TX_SIZE(sz);
6914 if (sz > cmds_sz)
6915 return (-1);
6917 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
6918 == 0)
6919 return (1);
6921 loc += sz;
6922 cmds_sz -= sz;
6925 return (0);
6929 * Returns 1 if the transaction commands only modify properties named
6930 * 'enabled'.
6932 static int
6933 tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
6935 const struct rep_protocol_transaction_cmd *cmd;
6936 uintptr_t loc;
6937 uint32_t sz;
6939 loc = (uintptr_t)cmds_arg;
6941 while (cmds_sz > 0) {
6942 cmd = (struct rep_protocol_transaction_cmd *)loc;
6944 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6945 return (-1);
6947 sz = cmd->rptc_size;
6948 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6949 return (-1);
6951 sz = TX_SIZE(sz);
6952 if (sz > cmds_sz)
6953 return (-1);
6955 if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
6956 != 0)
6957 return (0);
6959 loc += sz;
6960 cmds_sz -= sz;
6963 return (1);
6967 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
6969 rc_node_t *np = txp->rnp_node;
6970 rc_node_t *pp;
6971 rc_node_t *nnp;
6972 rc_node_pg_notify_t *pnp;
6973 int rc;
6974 permcheck_t *pcp;
6975 perm_status_t granted;
6976 int normal;
6977 char *pg_fmri = NULL;
6978 char *auth_string = NULL;
6979 int auth_status = ADT_SUCCESS;
6980 int auth_ret_value = ADT_SUCCESS;
6981 size_t sz_out;
6982 int tx_flag = 1;
6983 tx_commit_data_t *tx_data = NULL;
6985 RC_NODE_CHECK(np);
6987 if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) &&
6988 (txp->rnp_auth_string != NULL)) {
6989 auth_string = strdup(txp->rnp_auth_string);
6990 if (auth_string == NULL)
6991 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6994 if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) &&
6995 is_main_repository) {
6996 #ifdef NATIVE_BUILD
6997 if (!client_is_privileged()) {
6998 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
7000 #else
7001 /* permission check: depends on contents of transaction */
7002 pcp = pc_create();
7003 if (pcp == NULL)
7004 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7006 /* If normal is cleared, we won't do the normal checks. */
7007 normal = 1;
7008 rc = REP_PROTOCOL_SUCCESS;
7010 if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
7011 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
7012 /* Touching general[framework]/action_authorization? */
7013 rc = tx_modifies_action(cmds, cmds_sz);
7014 if (rc == -1) {
7015 pc_free(pcp);
7016 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7019 if (rc) {
7021 * Yes: only AUTH_MODIFY and AUTH_MANAGE
7022 * can be used.
7024 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7026 if (rc == REP_PROTOCOL_SUCCESS)
7027 rc = perm_add_enabling(pcp,
7028 AUTH_MANAGE);
7030 normal = 0;
7031 } else {
7032 rc = REP_PROTOCOL_SUCCESS;
7034 } else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
7035 strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
7036 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
7037 rc_node_t *instn;
7039 rc = tx_only_enabled(cmds, cmds_sz);
7040 if (rc == -1) {
7041 pc_free(pcp);
7042 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7045 if (rc) {
7046 rc = rc_node_parent(np, &instn);
7047 if (rc != REP_PROTOCOL_SUCCESS) {
7048 assert(rc == REP_PROTOCOL_FAIL_DELETED);
7049 pc_free(pcp);
7050 return (rc);
7053 assert(instn->rn_id.rl_type ==
7054 REP_PROTOCOL_ENTITY_INSTANCE);
7056 rc = perm_add_inst_action_auth(pcp, instn);
7057 rc_node_rele(instn);
7058 switch (rc) {
7059 case REP_PROTOCOL_SUCCESS:
7060 break;
7062 case REP_PROTOCOL_FAIL_DELETED:
7063 case REP_PROTOCOL_FAIL_NO_RESOURCES:
7064 pc_free(pcp);
7065 return (rc);
7067 default:
7068 bad_error("perm_add_inst_action_auth",
7069 rc);
7071 } else {
7072 rc = REP_PROTOCOL_SUCCESS;
7076 if (rc == REP_PROTOCOL_SUCCESS && normal) {
7077 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7079 if (rc == REP_PROTOCOL_SUCCESS) {
7080 /* Add pgtype-specific authorization. */
7081 const char * const auth =
7082 perm_auth_for_pgtype(np->rn_type);
7084 if (auth != NULL)
7085 rc = perm_add_enabling(pcp, auth);
7088 /* Add pg-specific modify_authorization auths. */
7089 if (rc == REP_PROTOCOL_SUCCESS)
7090 rc = perm_add_enabling_values(pcp, np,
7091 AUTH_PROP_MODIFY);
7093 /* If value_authorization values are ok, add them. */
7094 if (rc == REP_PROTOCOL_SUCCESS) {
7095 rc = tx_allow_value(cmds, cmds_sz, np);
7096 if (rc == -1)
7097 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
7098 else if (rc)
7099 rc = perm_add_enabling_values(pcp, np,
7100 AUTH_PROP_VALUE);
7104 if (rc == REP_PROTOCOL_SUCCESS) {
7105 granted = perm_granted(pcp);
7106 rc = map_granted_status(granted, pcp, &auth_string);
7107 if ((granted == PERM_DENIED) && auth_string) {
7109 * _PERMISSION_DENIED should not cause us
7110 * to exit at this point, because we still
7111 * want to generate an audit event.
7113 rc = REP_PROTOCOL_SUCCESS;
7117 pc_free(pcp);
7119 if (rc != REP_PROTOCOL_SUCCESS)
7120 goto cleanout;
7122 if (granted == PERM_DENIED) {
7123 auth_status = ADT_FAILURE;
7124 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7125 tx_flag = 0;
7127 #endif /* NATIVE_BUILD */
7128 } else if (txp->rnp_authorized == RC_AUTH_FAILED) {
7129 auth_status = ADT_FAILURE;
7130 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7131 tx_flag = 0;
7134 pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
7135 if (pg_fmri == NULL) {
7136 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7137 goto cleanout;
7139 if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri,
7140 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
7141 goto cleanout;
7145 * Parse the transaction commands into a useful form.
7147 if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) !=
7148 REP_PROTOCOL_SUCCESS) {
7149 goto cleanout;
7152 if (tx_flag == 0) {
7153 /* Authorization failed. Generate audit events. */
7154 generate_property_events(tx_data, pg_fmri, auth_string,
7155 auth_status, auth_ret_value);
7156 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
7157 goto cleanout;
7160 nnp = rc_node_alloc();
7161 if (nnp == NULL) {
7162 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7163 goto cleanout;
7166 nnp->rn_id = np->rn_id; /* structure assignment */
7167 nnp->rn_hash = np->rn_hash;
7168 nnp->rn_name = strdup(np->rn_name);
7169 nnp->rn_type = strdup(np->rn_type);
7170 nnp->rn_pgflags = np->rn_pgflags;
7172 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
7174 if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
7175 rc_node_destroy(nnp);
7176 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7177 goto cleanout;
7180 (void) pthread_mutex_lock(&np->rn_lock);
7183 * We must have all of the old properties in the cache, or the
7184 * database deletions could cause inconsistencies.
7186 if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
7187 REP_PROTOCOL_SUCCESS) {
7188 (void) pthread_mutex_unlock(&np->rn_lock);
7189 rc_node_destroy(nnp);
7190 goto cleanout;
7193 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
7194 (void) pthread_mutex_unlock(&np->rn_lock);
7195 rc_node_destroy(nnp);
7196 rc = REP_PROTOCOL_FAIL_DELETED;
7197 goto cleanout;
7200 if (np->rn_flags & RC_NODE_OLD) {
7201 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
7202 (void) pthread_mutex_unlock(&np->rn_lock);
7203 rc_node_destroy(nnp);
7204 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7205 goto cleanout;
7208 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
7209 if (pp == NULL) {
7210 /* our parent is gone, we're going next... */
7211 rc_node_destroy(nnp);
7212 (void) pthread_mutex_lock(&np->rn_lock);
7213 if (np->rn_flags & RC_NODE_OLD) {
7214 (void) pthread_mutex_unlock(&np->rn_lock);
7215 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7216 goto cleanout;
7218 (void) pthread_mutex_unlock(&np->rn_lock);
7219 rc = REP_PROTOCOL_FAIL_DELETED;
7220 goto cleanout;
7222 (void) pthread_mutex_unlock(&pp->rn_lock);
7225 * prepare for the transaction
7227 (void) pthread_mutex_lock(&np->rn_lock);
7228 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
7229 (void) pthread_mutex_unlock(&np->rn_lock);
7230 (void) pthread_mutex_lock(&pp->rn_lock);
7231 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7232 (void) pthread_mutex_unlock(&pp->rn_lock);
7233 rc_node_destroy(nnp);
7234 rc = REP_PROTOCOL_FAIL_DELETED;
7235 goto cleanout;
7237 nnp->rn_gen_id = np->rn_gen_id;
7238 (void) pthread_mutex_unlock(&np->rn_lock);
7240 /* Sets nnp->rn_gen_id on success. */
7241 rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id);
7243 (void) pthread_mutex_lock(&np->rn_lock);
7244 if (rc != REP_PROTOCOL_SUCCESS) {
7245 rc_node_rele_flag(np, RC_NODE_IN_TX);
7246 (void) pthread_mutex_unlock(&np->rn_lock);
7247 (void) pthread_mutex_lock(&pp->rn_lock);
7248 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7249 (void) pthread_mutex_unlock(&pp->rn_lock);
7250 rc_node_destroy(nnp);
7251 rc_node_clear(txp, 0);
7252 if (rc == REP_PROTOCOL_DONE)
7253 rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
7254 goto cleanout;
7258 * Notify waiters
7260 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7261 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
7262 rc_pg_notify_fire(pnp);
7263 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7265 np->rn_flags |= RC_NODE_OLD;
7266 (void) pthread_mutex_unlock(&np->rn_lock);
7268 rc_notify_remove_node(np);
7271 * replace np with nnp
7273 rc_node_relink_child(pp, np, nnp);
7276 * all done -- clear the transaction.
7278 rc_node_clear(txp, 0);
7279 generate_property_events(tx_data, pg_fmri, auth_string,
7280 auth_status, auth_ret_value);
7282 rc = REP_PROTOCOL_SUCCESS;
7284 cleanout:
7285 free(auth_string);
7286 free(pg_fmri);
7287 tx_commit_data_free(tx_data);
7288 return (rc);
7291 void
7292 rc_pg_notify_init(rc_node_pg_notify_t *pnp)
7294 uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7295 pnp->rnpn_pg = NULL;
7296 pnp->rnpn_fd = -1;
7300 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
7302 rc_node_t *np;
7304 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
7306 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
7307 (void) pthread_mutex_unlock(&np->rn_lock);
7308 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7312 * wait for any transaction in progress to complete
7314 if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
7315 (void) pthread_mutex_unlock(&np->rn_lock);
7316 return (REP_PROTOCOL_FAIL_DELETED);
7319 if (np->rn_flags & RC_NODE_OLD) {
7320 (void) pthread_mutex_unlock(&np->rn_lock);
7321 return (REP_PROTOCOL_FAIL_NOT_LATEST);
7324 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7325 rc_pg_notify_fire(pnp);
7326 pnp->rnpn_pg = np;
7327 pnp->rnpn_fd = fd;
7328 (void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
7329 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7331 (void) pthread_mutex_unlock(&np->rn_lock);
7332 return (REP_PROTOCOL_SUCCESS);
7335 void
7336 rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
7338 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7339 rc_pg_notify_fire(pnp);
7340 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7342 uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7345 void
7346 rc_notify_info_init(rc_notify_info_t *rnip)
7348 int i;
7350 uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7351 uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7352 rc_notify_pool);
7354 rnip->rni_notify.rcn_node = NULL;
7355 rnip->rni_notify.rcn_info = rnip;
7357 bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
7358 bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
7360 (void) pthread_cond_init(&rnip->rni_cv, NULL);
7362 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7363 rnip->rni_namelist[i] = NULL;
7364 rnip->rni_typelist[i] = NULL;
7368 static void
7369 rc_notify_info_insert_locked(rc_notify_info_t *rnip)
7371 assert(MUTEX_HELD(&rc_pg_notify_lock));
7373 assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
7375 rnip->rni_flags |= RC_NOTIFY_ACTIVE;
7376 (void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
7377 (void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
7380 static void
7381 rc_notify_info_remove_locked(rc_notify_info_t *rnip)
7383 rc_notify_t *me = &rnip->rni_notify;
7384 rc_notify_t *np;
7386 assert(MUTEX_HELD(&rc_pg_notify_lock));
7388 assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
7390 assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
7391 rnip->rni_flags |= RC_NOTIFY_DRAIN;
7392 (void) pthread_cond_broadcast(&rnip->rni_cv);
7394 (void) uu_list_remove(rc_notify_info_list, rnip);
7397 * clean up any notifications at the beginning of the list
7399 if (uu_list_first(rc_notify_list) == me) {
7401 * We can't call rc_notify_remove_locked() unless
7402 * rc_notify_in_use is 0.
7404 while (rc_notify_in_use) {
7405 (void) pthread_cond_wait(&rc_pg_notify_cv,
7406 &rc_pg_notify_lock);
7408 while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
7409 np->rcn_info == NULL)
7410 rc_notify_remove_locked(np);
7412 (void) uu_list_remove(rc_notify_list, me);
7414 while (rnip->rni_waiters) {
7415 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7416 (void) pthread_cond_broadcast(&rnip->rni_cv);
7417 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7420 rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
7423 static int
7424 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
7425 const char *name)
7427 int i;
7428 int rc;
7429 char *f;
7431 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
7432 if (rc != REP_PROTOCOL_SUCCESS)
7433 return (rc);
7435 f = strdup(name);
7436 if (f == NULL)
7437 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7439 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7441 while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
7442 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7444 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7445 if (arr[i] == NULL)
7446 break;
7449 * Don't add name if it's already being tracked.
7451 if (strcmp(arr[i], f) == 0) {
7452 free(f);
7453 goto out;
7457 if (i == RC_NOTIFY_MAX_NAMES) {
7458 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7459 free(f);
7460 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7463 arr[i] = f;
7465 out:
7466 if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
7467 rc_notify_info_insert_locked(rnip);
7469 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7470 return (REP_PROTOCOL_SUCCESS);
7474 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
7476 return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
7480 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
7482 return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
7486 * Wait for and report an event of interest to rnip, a notification client
7489 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
7490 char *outp, size_t sz)
7492 rc_notify_t *np;
7493 rc_notify_t *me = &rnip->rni_notify;
7494 rc_node_t *nnp;
7495 rc_notify_delete_t *ndp;
7497 int am_first_info;
7499 if (sz > 0)
7500 outp[0] = 0;
7502 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7504 while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
7505 RC_NOTIFY_ACTIVE) {
7507 * If I'm first on the notify list, it is my job to
7508 * clean up any notifications I pass by. I can't do that
7509 * if someone is blocking the list from removals, so I
7510 * have to wait until they have all drained.
7512 am_first_info = (uu_list_first(rc_notify_list) == me);
7513 if (am_first_info && rc_notify_in_use) {
7514 rnip->rni_waiters++;
7515 (void) pthread_cond_wait(&rc_pg_notify_cv,
7516 &rc_pg_notify_lock);
7517 rnip->rni_waiters--;
7518 continue;
7522 * Search the list for a node of interest.
7524 np = uu_list_next(rc_notify_list, me);
7525 while (np != NULL && !rc_notify_info_interested(rnip, np)) {
7526 rc_notify_t *next = uu_list_next(rc_notify_list, np);
7528 if (am_first_info) {
7529 if (np->rcn_info) {
7531 * Passing another client -- stop
7532 * cleaning up notifications
7534 am_first_info = 0;
7535 } else {
7536 rc_notify_remove_locked(np);
7539 np = next;
7543 * Nothing of interest -- wait for notification
7545 if (np == NULL) {
7546 rnip->rni_waiters++;
7547 (void) pthread_cond_wait(&rnip->rni_cv,
7548 &rc_pg_notify_lock);
7549 rnip->rni_waiters--;
7550 continue;
7554 * found something to report -- move myself after the
7555 * notification and process it.
7557 (void) uu_list_remove(rc_notify_list, me);
7558 (void) uu_list_insert_after(rc_notify_list, np, me);
7560 if ((ndp = np->rcn_delete) != NULL) {
7561 (void) strlcpy(outp, ndp->rnd_fmri, sz);
7562 if (am_first_info)
7563 rc_notify_remove_locked(np);
7564 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7565 rc_node_clear(out, 0);
7566 return (REP_PROTOCOL_SUCCESS);
7569 nnp = np->rcn_node;
7570 assert(nnp != NULL);
7573 * We can't bump nnp's reference count without grabbing its
7574 * lock, and rc_pg_notify_lock is a leaf lock. So we
7575 * temporarily block all removals to keep nnp from
7576 * disappearing.
7578 rc_notify_in_use++;
7579 assert(rc_notify_in_use > 0);
7580 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7582 rc_node_assign(out, nnp);
7584 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7585 assert(rc_notify_in_use > 0);
7586 rc_notify_in_use--;
7588 if (am_first_info) {
7590 * While we had the lock dropped, another thread
7591 * may have also incremented rc_notify_in_use. We
7592 * need to make sure that we're back to 0 before
7593 * removing the node.
7595 while (rc_notify_in_use) {
7596 (void) pthread_cond_wait(&rc_pg_notify_cv,
7597 &rc_pg_notify_lock);
7599 rc_notify_remove_locked(np);
7601 if (rc_notify_in_use == 0)
7602 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7603 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7605 return (REP_PROTOCOL_SUCCESS);
7608 * If we're the last one out, let people know it's clear.
7610 if (rnip->rni_waiters == 0)
7611 (void) pthread_cond_broadcast(&rnip->rni_cv);
7612 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7613 return (REP_PROTOCOL_DONE);
7616 static void
7617 rc_notify_info_reset(rc_notify_info_t *rnip)
7619 int i;
7621 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7622 if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
7623 rc_notify_info_remove_locked(rnip);
7624 assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
7625 rnip->rni_flags |= RC_NOTIFY_EMPTYING;
7626 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7628 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7629 if (rnip->rni_namelist[i] != NULL) {
7630 free((void *)rnip->rni_namelist[i]);
7631 rnip->rni_namelist[i] = NULL;
7633 if (rnip->rni_typelist[i] != NULL) {
7634 free((void *)rnip->rni_typelist[i]);
7635 rnip->rni_typelist[i] = NULL;
7639 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7640 rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
7641 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7644 void
7645 rc_notify_info_fini(rc_notify_info_t *rnip)
7647 rc_notify_info_reset(rnip);
7649 uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7650 uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7651 rc_notify_pool);