4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
29 * rc_node.c - In-memory SCF object management
31 * This layer manages the in-memory cache (the Repository Cache) of SCF
32 * data. Read requests are usually satisfied from here, but may require
33 * load calls to the "object" layer. Modify requests always write-through
34 * to the object layer.
36 * SCF data comprises scopes, services, instances, snapshots, snaplevels,
37 * property groups, properties, and property values. All but the last are
38 * known here as "entities" and are represented by rc_node_t data
39 * structures. (Property values are kept in the rn_values member of the
40 * respective property, not as separate objects.) All entities besides
41 * the "localhost" scope have some entity as a parent, and therefore form
44 * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
45 * the "localhost" scope. The tree is filled in from the database on-demand
46 * by rc_node_fill_children().
48 * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
51 * Multiple threads may service client requests, so access to each
52 * rc_node_t is synchronized by its rn_lock member. Some fields are
53 * protected by bits in the rn_flags field instead, to support operations
54 * which need to drop rn_lock, for example to respect locking order. Such
55 * flags should be manipulated with the rc_node_{hold,rele}_flag()
58 * We track references to nodes to tell when they can be free()d. rn_refs
59 * should be incremented with rc_node_hold() on the creation of client
60 * references (rc_node_ptr_t's and rc_iter_t's). rn_erefs ("ephemeral
61 * references") should be incremented when a pointer is read into a local
62 * variable of a thread, with rc_node_hold_ephemeral_locked(). This
63 * hasn't been fully implemented, however, so rc_node_rele() tolerates
64 * rn_erefs being 0. Some code which predates rn_erefs counts ephemeral
65 * references in rn_refs. Other references are tracked by the
66 * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
67 * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
69 * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
70 * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
71 * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
72 * etc.). Once you have locked an rc_node_t you must check its rn_flags for
73 * RC_NODE_DEAD before you can use it. This is usually done with the
74 * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
75 * functions & RC_NODE_*() macros), which fail if the object has died.
77 * When a transactional node (property group or snapshot) is updated,
78 * a new node takes the place of the old node in the global hash and the
79 * old node is hung off of the rn_former list of the new node. At the
80 * same time, all of its children have their rn_parent_ref pointer set,
81 * and any holds they have are reflected in the old node's rn_other_refs
82 * count. This is automatically kept up to date until the final reference
83 * to the subgraph is dropped, at which point the node is unrefed and
84 * destroyed, along with all of its children.
86 * Because name service lookups may take a long time and, more importantly
87 * may trigger additional accesses to the repository, perm_granted() must be
88 * called without holding any locks.
90 * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
91 * call via rc_node_setup_iter() to populate the rn_children uu_list of the
92 * rc_node_t * in question and a call to uu_list_walk_start() on that list. For
93 * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
96 * An ITER_START for an ENTITY_VALUE makes sure the node has its values
97 * filled, and sets up the iterator. An ITER_READ_VALUE just copies out
98 * the proper values and updates the offset information.
100 * To allow aliases, snapshots are implemented with a level of indirection.
101 * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
102 * snapshot.c which contains the authoritative snaplevel information. The
103 * snapid is "assigned" by rc_attach_snapshot().
105 * We provide the client layer with rc_node_ptr_t's to reference objects.
106 * Objects referred to by them are automatically held & released by
107 * rc_node_assign() & rc_node_clear(). The RC_NODE_PTR_*() macros are used at
108 * client.c entry points to read the pointers. They fetch the pointer to the
109 * object, return (from the function) if it is dead, and lock, hold, or hold
110 * a flag of the object.
114 * Permission checking is authorization-based: some operations may only
115 * proceed if the user has been assigned at least one of a set of
116 * authorization strings. The set of enabling authorizations depends on the
117 * operation and the target object. The set of authorizations assigned to
118 * a user is determined by an algorithm defined in libsecdb.
120 * The fastest way to decide whether the two sets intersect is by entering the
121 * strings into a hash table and detecting collisions, which takes linear time
122 * in the total size of the sets. Except for the authorization patterns which
123 * may be assigned to users, which without advanced pattern-matching
124 * algorithms will take O(n) in the number of enabling authorizations, per
127 * We can achieve some practical speed-ups by noting that if we enter all of
128 * the authorizations from one of the sets into the hash table we can merely
129 * check the elements of the second set for existence without adding them.
130 * This reduces memory requirements and hash table clutter. The enabling set
131 * is well suited for this because it is internal to configd (for now, at
132 * least). Combine this with short-circuiting and we can even minimize the
133 * number of queries to the security databases (user_attr & prof_attr).
135 * To force this usage onto clients we provide functions for adding
136 * authorizations to the enabling set of a permission context structure
137 * (perm_add_*()) and one to decide whether the the user associated with the
138 * current door call client possesses any of them (perm_granted()).
140 * At some point, a generic version of this should move to libsecdb.
142 * While entering the enabling strings into the hash table, we keep track
143 * of which is the most specific for use in generating auditing events.
144 * See the "Collecting the Authorization String" section of the "SMF Audit
145 * Events" block comment below.
149 * Composition is the combination of sets of properties. The sets are ordered
150 * and properties in higher sets obscure properties of the same name in lower
151 * sets. Here we present a composed view of an instance's properties as the
152 * union of its properties and its service's properties. Similarly the
153 * properties of snaplevels are combined to form a composed view of the
154 * properties of a snapshot (which should match the composed view of the
155 * properties of the instance when the snapshot was taken).
157 * In terms of the client interface, the client may request that a property
158 * group iterator for an instance or snapshot be composed. Property groups
159 * traversed by such an iterator may not have the target entity as a parent.
160 * Similarly, the properties traversed by a property iterator for those
161 * property groups may not have the property groups iterated as parents.
163 * Implementation requires that iterators for instances and snapshots be
164 * composition-savvy, and that we have a "composed property group" entity
165 * which represents the composition of a number of property groups. Iteration
166 * over "composed property groups" yields properties which may have different
167 * parents, but for all other operations a composed property group behaves
168 * like the top-most property group it represents.
170 * The implementation is based on the rn_cchain[] array of rc_node_t pointers
171 * in rc_node_t. For instances, the pointers point to the instance and its
172 * parent service. For snapshots they point to the child snaplevels, and for
173 * composed property groups they point to property groups. A composed
174 * iterator carries an index into rn_cchain[]. Thus most of the magic ends up
175 * int the rc_iter_*() code.
181 * To maintain security, SMF generates audit events whenever
182 * privileged operations are attempted. See the System Administration
183 * Guide:Security Services answerbook for a discussion of the Solaris
186 * The SMF audit event codes are defined in adt_event.h by symbols
187 * starting with ADT_smf_ and are described in audit_event.txt. The
188 * audit record structures are defined in the SMF section of adt.xml.
189 * adt.xml is used to automatically generate adt_event.h which
190 * contains the definitions that we code to in this file. For the
191 * most part the audit events map closely to actions that you would
192 * perform with svcadm or svccfg, but there are some special cases
193 * which we'll discuss later.
195 * The software associated with SMF audit events falls into three
197 * - collecting information to be written to the audit
199 * - using the adt_* functions in
200 * usr/src/lib/libbsm/common/adt.c to generate the audit
202 * - handling special cases
204 * Collecting Information:
205 * ----------------------
207 * Most all of the audit events require the FMRI of the affected
208 * object and the authorization string that was used. The one
209 * exception is ADT_smf_annotation which we'll talk about later.
211 * Collecting the FMRI:
213 * The rc_node structure has a member called rn_fmri which points to
214 * its FMRI. This is initialized by a call to rc_node_build_fmri()
215 * when the node's parent is established. The reason for doing it
216 * at this time is that a node's FMRI is basically the concatenation
217 * of the parent's FMRI and the node's name with the appropriate
218 * decoration. rc_node_build_fmri() does this concatenation and
219 * decorating. It is called from rc_node_link_child() and
220 * rc_node_relink_child() where a node is linked to its parent.
222 * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI
223 * when it is needed. It returns rn_fmri if it is set. If the node
224 * is at the top level, however, rn_fmri won't be set because it was
225 * never linked to a parent. In this case,
226 * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on
227 * its node type and its name, rn_name.
229 * Collecting the Authorization String:
231 * Naturally, the authorization string is captured during the
232 * authorization checking process. Acceptable authorization strings
233 * are added to a permcheck_t hash table as noted in the section on
234 * permission checking above. Once all entries have been added to the
235 * hash table, perm_granted() is called. If the client is authorized,
236 * perm_granted() returns with pc_auth_string of the permcheck_t
237 * structure pointing to the authorization string.
239 * This works fine if the client is authorized, but what happens if
240 * the client is not authorized? We need to report the required
241 * authorization string. This is the authorization that would have
242 * been used if permission had been granted. perm_granted() will
243 * find no match, so it needs to decide which string in the hash
244 * table to use as the required authorization string. It needs to do
245 * this, because configd is still going to generate an event. A
246 * design decision was made to use the most specific authorization
247 * in the hash table. The pc_auth_type enum designates the
248 * specificity of an authorization string. For example, an
249 * authorization string that is declared in an instance PG is more
250 * specific than one that is declared in a service PG.
252 * The pc_add() function keeps track of the most specific
253 * authorization in the hash table. It does this using the
254 * pc_specific and pc_specific_type members of the permcheck
255 * structure. pc_add() updates these members whenever a more
256 * specific authorization string is added to the hash table. Thus, if
257 * an authorization match is not found, perm_granted() will return
258 * with pc_auth_string in the permcheck_t pointing to the string that
259 * is referenced by pc_specific.
261 * Generating the Audit Events:
262 * ===========================
264 * As the functions in this file process requests for clients of
265 * configd, they gather the information that is required for an audit
266 * event. Eventually, the request processing gets to the point where
267 * the authorization is rejected or to the point where the requested
268 * action was attempted. At these two points smf_audit_event() is
271 * smf_audit_event() takes 4 parameters:
272 * - the event ID which is one of the ADT_smf_* symbols from
274 * - status to pass to adt_put_event()
275 * - return value to pass to adt_put_event()
276 * - the event data (see audit_event_data structure)
278 * All interactions with the auditing software require an audit
279 * session. We use one audit session per configd client. We keep
280 * track of the audit session in the repcache_client structure.
281 * smf_audit_event() calls get_audit_session() to get the session
284 * smf_audit_event() then calls adt_alloc_event() to allocate an
285 * adt_event_data union which is defined in adt_event.h, copies the
286 * data into the appropriate members of the union and calls
287 * adt_put_event() to generate the event.
292 * There are three major types of special cases:
294 * - gathering event information for each action in a
296 * - Higher level events represented by special property
297 * group/property name combinations. Many of these are
299 * - ADT_smf_annotation event
301 * Processing Transaction Actions:
302 * ------------------------------
304 * A transaction can contain multiple actions to modify, create or
305 * delete one or more properties. We need to capture information so
306 * that we can generate an event for each property action. The
307 * transaction information is stored in a tx_commmit_data_t, and
308 * object.c provides accessor functions to retrieve data from this
309 * structure. rc_tx_commit() obtains a tx_commit_data_t by calling
310 * tx_commit_data_new() and passes this to object_tx_commit() to
311 * commit the transaction. Then we call generate_property_events() to
312 * generate an audit event for each property action.
314 * Special Properties:
317 * There are combinations of property group/property name that are special.
318 * They are special because they have specific meaning to startd. startd
319 * interprets them in a service-independent fashion.
320 * restarter_actions/refresh and general/enabled are two examples of these.
321 * A special event is generated for these properties in addition to the
322 * regular property event described in the previous section. The special
323 * properties are declared as an array of audit_special_prop_item
324 * structures at special_props_list in rc_node.c.
326 * In the previous section, we mentioned the
327 * generate_property_event() function that generates an event for
328 * every property action. Before generating the event,
329 * generate_property_event() calls special_property_event().
330 * special_property_event() checks to see if the action involves a
331 * special property. If it does, it generates a special audit
334 * ADT_smf_annotation event:
335 * ------------------------
337 * This is a special event unlike any other. It allows the svccfg
338 * program to store an annotation in the event log before a series
339 * of transactions is processed. It is used with the import and
340 * apply svccfg commands. svccfg uses the rep_protocol_annotation
341 * message to pass the operation (import or apply) and the file name
342 * to configd. The set_annotation() function in client.c stores
343 * these away in the a repcache_client structure. The address of
344 * this structure is saved in the thread_info structure.
346 * Before it generates any events, smf_audit_event() calls
347 * smf_annotation_event(). smf_annotation_event() calls
348 * client_annotation_needed() which is defined in client.c. If an
349 * annotation is needed client_annotation_needed() returns the
350 * operation and filename strings that were saved from the
351 * rep_protocol_annotation message. smf_annotation_event() then
352 * generates the ADT_smf_annotation event.
357 #include <bsm/adt_event.h>
359 #include <libuutil.h>
361 #include <libscf_priv.h>
367 #include <sys/types.h>
374 #define AUTH_PREFIX "solaris.smf."
375 #define AUTH_MANAGE AUTH_PREFIX "manage"
376 #define AUTH_MODIFY AUTH_PREFIX "modify"
377 #define AUTH_MODIFY_PREFIX AUTH_MODIFY "."
378 #define AUTH_PG_ACTIONS SCF_PG_RESTARTER_ACTIONS
379 #define AUTH_PG_ACTIONS_TYPE SCF_PG_RESTARTER_ACTIONS_TYPE
380 #define AUTH_PG_GENERAL SCF_PG_GENERAL
381 #define AUTH_PG_GENERAL_TYPE SCF_PG_GENERAL_TYPE
382 #define AUTH_PG_GENERAL_OVR SCF_PG_GENERAL_OVR
383 #define AUTH_PG_GENERAL_OVR_TYPE SCF_PG_GENERAL_OVR_TYPE
384 #define AUTH_PROP_ACTION "action_authorization"
385 #define AUTH_PROP_ENABLED "enabled"
386 #define AUTH_PROP_MODIFY "modify_authorization"
387 #define AUTH_PROP_VALUE "value_authorization"
388 #define AUTH_PROP_READ "read_authorization"
390 #define MAX_VALID_CHILDREN 3
392 typedef struct rc_type_info
{
393 uint32_t rt_type
; /* matches array index */
395 uint32_t rt_name_flags
;
396 uint32_t rt_valid_children
[MAX_VALID_CHILDREN
];
399 #define RT_NO_NAME -1U
401 static rc_type_info_t rc_types
[] = {
402 {REP_PROTOCOL_ENTITY_NONE
, 0, RT_NO_NAME
},
403 {REP_PROTOCOL_ENTITY_SCOPE
, 0, 0,
404 {REP_PROTOCOL_ENTITY_SERVICE
, REP_PROTOCOL_ENTITY_SCOPE
}},
405 {REP_PROTOCOL_ENTITY_SERVICE
, 0, UU_NAME_DOMAIN
| UU_NAME_PATH
,
406 {REP_PROTOCOL_ENTITY_INSTANCE
, REP_PROTOCOL_ENTITY_PROPERTYGRP
}},
407 {REP_PROTOCOL_ENTITY_INSTANCE
, 1, UU_NAME_DOMAIN
,
408 {REP_PROTOCOL_ENTITY_SNAPSHOT
, REP_PROTOCOL_ENTITY_PROPERTYGRP
}},
409 {REP_PROTOCOL_ENTITY_SNAPSHOT
, 2, UU_NAME_DOMAIN
,
410 {REP_PROTOCOL_ENTITY_SNAPLEVEL
, REP_PROTOCOL_ENTITY_PROPERTYGRP
}},
411 {REP_PROTOCOL_ENTITY_SNAPLEVEL
, 4, RT_NO_NAME
,
412 {REP_PROTOCOL_ENTITY_PROPERTYGRP
}},
413 {REP_PROTOCOL_ENTITY_PROPERTYGRP
, 5, UU_NAME_DOMAIN
,
414 {REP_PROTOCOL_ENTITY_PROPERTY
}},
415 {REP_PROTOCOL_ENTITY_CPROPERTYGRP
, 0, UU_NAME_DOMAIN
,
416 {REP_PROTOCOL_ENTITY_PROPERTY
}},
417 {REP_PROTOCOL_ENTITY_PROPERTY
, 7, UU_NAME_DOMAIN
},
420 #define NUM_TYPES ((sizeof (rc_types) / sizeof (*rc_types)))
422 /* Element of a permcheck_t hash table. */
424 struct pc_elt
*pce_next
;
429 * If an authorization fails, we must decide which of the elements in the
430 * permcheck hash table to use in the audit event. That is to say of all
431 * the strings in the hash table, we must choose one and use it in the audit
432 * event. It is desirable to use the most specific string in the audit
435 * The pc_auth_type specifies the types (sources) of authorization
436 * strings. The enum is ordered in increasing specificity.
438 typedef enum pc_auth_type
{
439 PC_AUTH_NONE
= 0, /* no auth string available. */
440 PC_AUTH_SMF
, /* strings coded into SMF. */
441 PC_AUTH_SVC
, /* strings specified in PG of a service. */
442 PC_AUTH_INST
/* strings specified in PG of an instance. */
446 * The following enum is used to represent the results of the checks to see
447 * if the client has the appropriate permissions to perform an action.
449 typedef enum perm_status
{
450 PERM_DENIED
= 0, /* Permission denied. */
451 PERM_GRANTED
, /* Client has authorizations. */
452 PERM_GONE
, /* Door client went away. */
453 PERM_FAIL
/* Generic failure. e.g. resources */
456 /* An authorization set hash table. */
458 struct pc_elt
**pc_buckets
;
459 uint_t pc_bnum
; /* number of buckets */
460 uint_t pc_enum
; /* number of elements */
461 struct pc_elt
*pc_specific
; /* most specific element */
462 pc_auth_type_t pc_specific_type
; /* type of pc_specific */
463 char *pc_auth_string
; /* authorization string */
464 /* for audit events */
468 * Structure for holding audit event data. Not all events use all members
471 typedef struct audit_event_data
{
472 char *ed_auth
; /* authorization string. */
473 char *ed_fmri
; /* affected FMRI. */
474 char *ed_snapname
; /* name of snapshot. */
475 char *ed_old_fmri
; /* old fmri in attach case. */
476 char *ed_old_name
; /* old snapshot in attach case. */
477 char *ed_type
; /* prop. group or prop. type. */
478 char *ed_prop_value
; /* property value. */
479 } audit_event_data_t
;
482 * Pointer to function to do special processing to get audit event ID.
483 * Audit event IDs are defined in /usr/include/bsm/adt_event.h. Function
484 * returns 0 if ID successfully retrieved. Otherwise it returns -1.
486 typedef int (*spc_getid_fn_t
)(tx_commit_data_t
*, size_t, const char *,
488 static int general_enable_id(tx_commit_data_t
*, size_t, const char *,
491 static uu_list_pool_t
*rc_children_pool
;
492 static uu_list_pool_t
*rc_pg_notify_pool
;
493 static uu_list_pool_t
*rc_notify_pool
;
494 static uu_list_pool_t
*rc_notify_info_pool
;
496 static rc_node_t
*rc_scope
;
498 static pthread_mutex_t rc_pg_notify_lock
= PTHREAD_MUTEX_INITIALIZER
;
499 static pthread_cond_t rc_pg_notify_cv
= PTHREAD_COND_INITIALIZER
;
500 static uint_t rc_notify_in_use
; /* blocks removals */
503 * Some combinations of property group/property name require a special
504 * audit event to be generated when there is a change.
505 * audit_special_prop_item_t is used to specify these special cases. The
506 * special_props_list array defines a list of these special properties.
508 typedef struct audit_special_prop_item
{
509 const char *api_pg_name
; /* property group name. */
510 const char *api_prop_name
; /* property name. */
511 au_event_t api_event_id
; /* event id or 0. */
512 spc_getid_fn_t api_event_func
; /* function to get event id. */
513 } audit_special_prop_item_t
;
516 * Native builds are done using the build machine's standard include
517 * files. These files may not yet have the definitions for the ADT_smf_*
518 * symbols. Thus, we do not compile this table when doing native builds.
522 * The following special_props_list array specifies property group/property
523 * name combinations that have specific meaning to startd. A special event
524 * is generated for these combinations in addition to the regular property
527 * At run time this array gets sorted. See the call to qsort(3C) in
528 * rc_node_init(). The array is sorted, so that bsearch(3C) can be used
531 static audit_special_prop_item_t special_props_list
[] = {
532 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_DEGRADED
, ADT_smf_degrade
,
534 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_DEGRADE_IMMEDIATE
,
535 ADT_smf_immediate_degrade
, NULL
},
536 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_MAINT_OFF
, ADT_smf_clear
, NULL
},
537 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_MAINT_ON
,
538 ADT_smf_maintenance
, NULL
},
539 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_MAINT_ON_IMMEDIATE
,
540 ADT_smf_immediate_maintenance
, NULL
},
541 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_MAINT_ON_IMMTEMP
,
542 ADT_smf_immtmp_maintenance
, NULL
},
543 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_MAINT_ON_TEMPORARY
,
544 ADT_smf_tmp_maintenance
, NULL
},
545 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_REFRESH
, ADT_smf_refresh
, NULL
},
546 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_RESTART
, ADT_smf_restart
, NULL
},
547 {SCF_PG_RESTARTER_ACTIONS
, SCF_PROPERTY_RESTORE
, ADT_smf_clear
, NULL
},
548 {SCF_PG_OPTIONS
, SCF_PROPERTY_MILESTONE
, ADT_smf_milestone
, NULL
},
549 {SCF_PG_OPTIONS_OVR
, SCF_PROPERTY_MILESTONE
, ADT_smf_milestone
, NULL
},
550 {SCF_PG_GENERAL
, SCF_PROPERTY_ENABLED
, 0, general_enable_id
},
551 {SCF_PG_GENERAL_OVR
, SCF_PROPERTY_ENABLED
, 0, general_enable_id
}
553 #define SPECIAL_PROP_COUNT (sizeof (special_props_list) /\
554 sizeof (audit_special_prop_item_t))
555 #endif /* NATIVE_BUILD */
558 * We support an arbitrary number of clients interested in events for certain
559 * types of changes. Each client is represented by an rc_notify_info_t, and
560 * all clients are chained onto the rc_notify_info_list.
562 * The rc_notify_list is the global notification list. Each entry is of
563 * type rc_notify_t, which is embedded in one of three other structures:
565 * rc_node_t property group update notification
566 * rc_notify_delete_t object deletion notification
567 * rc_notify_info_t notification clients
569 * Which type of object is determined by which pointer in the rc_notify_t is
572 * New notifications and clients are added to the end of the list.
573 * Notifications no-one is interested in are never added to the list.
575 * Clients use their position in the list to track which notifications they
576 * have not yet reported. As they process notifications, they move forward
577 * in the list past them. There is always a client at the beginning of the
578 * list -- as it moves past notifications, it removes them from the list and
581 * The rc_pg_notify_lock protects all notification state. The rc_pg_notify_cv
582 * is used for global signalling, and each client has a cv which it waits for
583 * events of interest on.
585 * rc_notify_in_use is used to protect rc_notify_list from deletions when
586 * the rc_pg_notify_lock is dropped. Specifically, rc_notify_info_wait()
587 * must drop the lock to call rc_node_assign(), and then it reacquires the
588 * lock. Deletions from rc_notify_list during this period are not
589 * allowed. Insertions do not matter, because they are always done at the
592 static uu_list_t
*rc_notify_info_list
;
593 static uu_list_t
*rc_notify_list
;
595 #define HASH_SIZE 512
596 #define HASH_MASK (HASH_SIZE - 1)
598 #pragma align 64(cache_hash)
599 static cache_bucket_t cache_hash
[HASH_SIZE
];
601 #define CACHE_BUCKET(h) (&cache_hash[(h) & HASH_MASK])
604 static void rc_node_no_client_refs(rc_node_t
*np
);
608 rc_node_hash(rc_node_lookup_t
*lp
)
610 uint32_t type
= lp
->rl_type
;
611 uint32_t backend
= lp
->rl_backend
;
612 uint32_t mainid
= lp
->rl_main_id
;
613 uint32_t *ids
= lp
->rl_ids
;
615 rc_type_info_t
*tp
= &rc_types
[type
];
620 assert(backend
== BACKEND_TYPE_NORMAL
||
621 backend
== BACKEND_TYPE_NONPERSIST
);
623 assert(type
> 0 && type
< NUM_TYPES
);
624 num_ids
= tp
->rt_num_ids
;
626 left
= MAX_IDS
- num_ids
;
627 assert(num_ids
<= MAX_IDS
);
629 hash
= type
* 7 + mainid
* 5 + backend
;
631 while (num_ids
-- > 0)
632 hash
= hash
* 11 + *ids
++ * 7;
635 * the rest should be zeroed
644 rc_node_match(rc_node_t
*np
, rc_node_lookup_t
*l
)
646 rc_node_lookup_t
*r
= &np
->rn_id
;
651 if (r
->rl_main_id
!= l
->rl_main_id
)
655 if (type
!= l
->rl_type
)
658 assert(type
> 0 && type
< NUM_TYPES
);
660 tp
= &rc_types
[r
->rl_type
];
661 num_ids
= tp
->rt_num_ids
;
663 assert(num_ids
<= MAX_IDS
);
664 while (num_ids
-- > 0)
665 if (r
->rl_ids
[num_ids
] != l
->rl_ids
[num_ids
])
672 * Register an ephemeral reference to np. This should be done while both
673 * the persistent reference from which the np pointer was read is locked
674 * and np itself is locked. This guarantees that another thread which
675 * thinks it has the last reference will yield without destroying the
679 rc_node_hold_ephemeral_locked(rc_node_t
*np
)
681 assert(MUTEX_HELD(&np
->rn_lock
));
687 * the "other" references on a node are maintained in an atomically
688 * updated refcount, rn_other_refs. This can be bumped from arbitrary
689 * context, and tracks references to a possibly out-of-date node's children.
691 * To prevent the node from disappearing between the final drop of
692 * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
693 * 0->1 transitions and decremented (with the node lock held) on 1->0
697 rc_node_hold_other(rc_node_t
*np
)
699 if (atomic_add_32_nv(&np
->rn_other_refs
, 1) == 1) {
700 atomic_add_32(&np
->rn_other_refs_held
, 1);
701 assert(np
->rn_other_refs_held
> 0);
703 assert(np
->rn_other_refs
> 0);
707 * No node locks may be held
710 rc_node_rele_other(rc_node_t
*np
)
712 assert(np
->rn_other_refs
> 0);
713 if (atomic_add_32_nv(&np
->rn_other_refs
, -1) == 0) {
714 (void) pthread_mutex_lock(&np
->rn_lock
);
715 assert(np
->rn_other_refs_held
> 0);
716 if (atomic_add_32_nv(&np
->rn_other_refs_held
, -1) == 0 &&
717 np
->rn_refs
== 0 && (np
->rn_flags
& RC_NODE_OLD
)) {
719 * This was the last client reference. Destroy
720 * any other references and free() the node.
722 rc_node_no_client_refs(np
);
724 (void) pthread_mutex_unlock(&np
->rn_lock
);
730 rc_node_hold_locked(rc_node_t
*np
)
732 assert(MUTEX_HELD(&np
->rn_lock
));
734 if (np
->rn_refs
== 0 && (np
->rn_flags
& RC_NODE_PARENT_REF
))
735 rc_node_hold_other(np
->rn_parent_ref
);
737 assert(np
->rn_refs
> 0);
741 rc_node_hold(rc_node_t
*np
)
743 (void) pthread_mutex_lock(&np
->rn_lock
);
744 rc_node_hold_locked(np
);
745 (void) pthread_mutex_unlock(&np
->rn_lock
);
749 rc_node_rele_locked(rc_node_t
*np
)
752 rc_node_t
*par_ref
= NULL
;
754 assert(MUTEX_HELD(&np
->rn_lock
));
755 assert(np
->rn_refs
> 0);
757 if (--np
->rn_refs
== 0) {
758 if (np
->rn_flags
& RC_NODE_PARENT_REF
)
759 par_ref
= np
->rn_parent_ref
;
762 * Composed property groups are only as good as their
765 if (np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_CPROPERTYGRP
)
766 np
->rn_flags
|= RC_NODE_DEAD
;
768 if ((np
->rn_flags
& (RC_NODE_DEAD
|RC_NODE_OLD
)) &&
769 np
->rn_other_refs
== 0 && np
->rn_other_refs_held
== 0)
775 * This was the last client reference. Destroy any other
776 * references and free() the node.
778 rc_node_no_client_refs(np
);
781 * rn_erefs can be 0 if we acquired the reference in
782 * a path which hasn't been updated to increment rn_erefs.
783 * When all paths which end here are updated, we should
784 * assert rn_erefs > 0 and always decrement it.
786 if (np
->rn_erefs
> 0)
788 (void) pthread_mutex_unlock(&np
->rn_lock
);
792 rc_node_rele_other(par_ref
);
796 rc_node_rele(rc_node_t
*np
)
798 (void) pthread_mutex_lock(&np
->rn_lock
);
799 rc_node_rele_locked(np
);
802 static cache_bucket_t
*
803 cache_hold(uint32_t h
)
805 cache_bucket_t
*bp
= CACHE_BUCKET(h
);
806 (void) pthread_mutex_lock(&bp
->cb_lock
);
811 cache_release(cache_bucket_t
*bp
)
813 (void) pthread_mutex_unlock(&bp
->cb_lock
);
817 cache_lookup_unlocked(cache_bucket_t
*bp
, rc_node_lookup_t
*lp
)
819 uint32_t h
= rc_node_hash(lp
);
822 assert(MUTEX_HELD(&bp
->cb_lock
));
823 assert(bp
== CACHE_BUCKET(h
));
825 for (np
= bp
->cb_head
; np
!= NULL
; np
= np
->rn_hash_next
) {
826 if (np
->rn_hash
== h
&& rc_node_match(np
, lp
)) {
836 cache_lookup(rc_node_lookup_t
*lp
)
842 h
= rc_node_hash(lp
);
845 np
= cache_lookup_unlocked(bp
, lp
);
853 cache_insert_unlocked(cache_bucket_t
*bp
, rc_node_t
*np
)
855 assert(MUTEX_HELD(&bp
->cb_lock
));
856 assert(np
->rn_hash
== rc_node_hash(&np
->rn_id
));
857 assert(bp
== CACHE_BUCKET(np
->rn_hash
));
859 assert(np
->rn_hash_next
== NULL
);
861 np
->rn_hash_next
= bp
->cb_head
;
866 cache_remove_unlocked(cache_bucket_t
*bp
, rc_node_t
*np
)
870 assert(MUTEX_HELD(&bp
->cb_lock
));
871 assert(np
->rn_hash
== rc_node_hash(&np
->rn_id
));
872 assert(bp
== CACHE_BUCKET(np
->rn_hash
));
874 for (npp
= &bp
->cb_head
; *npp
!= NULL
; npp
= &(*npp
)->rn_hash_next
)
879 *npp
= np
->rn_hash_next
;
880 np
->rn_hash_next
= NULL
;
884 * verify that the 'parent' type can have a child typed 'child'
886 * _INVALID_TYPE - argument is invalid
887 * _TYPE_MISMATCH - parent type cannot have children of type child
890 rc_check_parent_child(uint32_t parent
, uint32_t child
)
895 if (parent
== 0 || parent
>= NUM_TYPES
||
896 child
== 0 || child
>= NUM_TYPES
)
897 return (REP_PROTOCOL_FAIL_INVALID_TYPE
); /* invalid types */
899 for (idx
= 0; idx
< MAX_VALID_CHILDREN
; idx
++) {
900 type
= rc_types
[parent
].rt_valid_children
[idx
];
902 return (REP_PROTOCOL_SUCCESS
);
905 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
910 * _INVALID_TYPE - type is invalid
911 * _BAD_REQUEST - name is an invalid name for a node of type type
914 rc_check_type_name(uint32_t type
, const char *name
)
916 if (type
== 0 || type
>= NUM_TYPES
)
917 return (REP_PROTOCOL_FAIL_INVALID_TYPE
); /* invalid types */
919 if (uu_check_name(name
, rc_types
[type
].rt_name_flags
) == -1)
920 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
922 return (REP_PROTOCOL_SUCCESS
);
926 rc_check_pgtype_name(const char *name
)
928 if (uu_check_name(name
, UU_NAME_DOMAIN
) == -1)
929 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
931 return (REP_PROTOCOL_SUCCESS
);
935 * rc_node_free_fmri should be called whenever a node loses its parent.
936 * The reason is that the node's fmri string is built up by concatenating
937 * its name to the parent's fmri. Thus, when the node no longer has a
938 * parent, its fmri is no longer valid.
941 rc_node_free_fmri(rc_node_t
*np
)
943 if (np
->rn_fmri
!= NULL
) {
944 free((void *)np
->rn_fmri
);
950 * Concatenate the appropriate separator and the FMRI element to the base
951 * FMRI string at fmri.
954 * _TRUNCATED Not enough room in buffer at fmri.
957 rc_concat_fmri_element(
958 char *fmri
, /* base fmri */
959 size_t bufsize
, /* size of buf at fmri */
960 size_t *sz_out
, /* receives result size. */
961 const char *element
, /* element name to concat */
962 rep_protocol_entity_t type
) /* type of element */
965 const char *name
= element
;
967 const char *separator
;
970 *sz_out
= strlen(fmri
);
975 case REP_PROTOCOL_ENTITY_SCOPE
:
976 if (strcmp(element
, SCF_FMRI_LOCAL_SCOPE
) == 0) {
978 * No need to display scope information if we are
979 * in the local scope.
981 separator
= SCF_FMRI_SVC_PREFIX
;
985 * Need to display scope information, because it is
986 * not the local scope.
988 separator
= SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX
;
991 case REP_PROTOCOL_ENTITY_SERVICE
:
992 separator
= SCF_FMRI_SERVICE_PREFIX
;
994 case REP_PROTOCOL_ENTITY_INSTANCE
:
995 separator
= SCF_FMRI_INSTANCE_PREFIX
;
997 case REP_PROTOCOL_ENTITY_PROPERTYGRP
:
998 case REP_PROTOCOL_ENTITY_CPROPERTYGRP
:
999 separator
= SCF_FMRI_PROPERTYGRP_PREFIX
;
1001 case REP_PROTOCOL_ENTITY_PROPERTY
:
1002 separator
= SCF_FMRI_PROPERTY_PREFIX
;
1004 case REP_PROTOCOL_ENTITY_VALUE
:
1006 * A value does not have a separate FMRI from its property,
1007 * so there is nothing to concat.
1009 return (REP_PROTOCOL_SUCCESS
);
1010 case REP_PROTOCOL_ENTITY_SNAPSHOT
:
1011 case REP_PROTOCOL_ENTITY_SNAPLEVEL
:
1012 /* Snapshots do not have FMRIs, so there is nothing to do. */
1013 return (REP_PROTOCOL_SUCCESS
);
1015 (void) fprintf(stderr
, "%s:%d: Unknown protocol type %d.\n",
1016 __FILE__
, __LINE__
, type
);
1017 abort(); /* Missing a case in switch if we get here. */
1020 /* Concatenate separator and element to the fmri buffer. */
1022 actual
= strlcat(fmri
, separator
, bufsize
);
1024 if (actual
< bufsize
) {
1025 actual
= strlcat(fmri
, name
, bufsize
);
1027 actual
+= strlen(name
);
1030 if (actual
< bufsize
) {
1031 rc
= REP_PROTOCOL_SUCCESS
;
1033 rc
= REP_PROTOCOL_FAIL_TRUNCATED
;
1040 * Get the FMRI for the node at np. The fmri will be placed in buf. On
1041 * success sz_out will be set to the size of the fmri in buf. If
1042 * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
1043 * of the buffer that would be required to avoid truncation.
1046 * _TRUNCATED not enough room in buf for the FMRI.
1049 rc_node_get_fmri_or_fragment(rc_node_t
*np
, char *buf
, size_t bufsize
,
1052 size_t fmri_len
= 0;
1059 if (np
->rn_fmri
== NULL
) {
1061 * A NULL rn_fmri implies that this is a top level scope.
1062 * Child nodes will always have an rn_fmri established
1063 * because both rc_node_link_child() and
1064 * rc_node_relink_child() call rc_node_build_fmri(). In
1065 * this case, we'll just return our name preceded by the
1066 * appropriate FMRI decorations.
1068 assert(np
->rn_parent
== NULL
);
1069 r
= rc_concat_fmri_element(buf
, bufsize
, &fmri_len
, np
->rn_name
,
1071 if (r
!= REP_PROTOCOL_SUCCESS
)
1074 /* We have an fmri, so return it. */
1075 fmri_len
= strlcpy(buf
, np
->rn_fmri
, bufsize
);
1080 if (fmri_len
>= bufsize
)
1081 return (REP_PROTOCOL_FAIL_TRUNCATED
);
1083 return (REP_PROTOCOL_SUCCESS
);
1087 * Build an FMRI string for this node and save it in rn_fmri.
1089 * The basic strategy here is to get the fmri of our parent and then
1090 * concatenate the appropriate separator followed by our name. If our name
1091 * is null, the resulting fmri will just be a copy of the parent fmri.
1092 * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
1093 * set. Also the rn_lock for this node should be held.
1096 * _NO_RESOURCES Could not allocate memory.
1099 rc_node_build_fmri(rc_node_t
*np
)
1102 char fmri
[REP_PROTOCOL_FMRI_LEN
];
1104 size_t sz
= REP_PROTOCOL_FMRI_LEN
;
1106 assert(MUTEX_HELD(&np
->rn_lock
));
1107 assert(np
->rn_flags
& RC_NODE_USING_PARENT
);
1109 rc_node_free_fmri(np
);
1111 rc
= rc_node_get_fmri_or_fragment(np
->rn_parent
, fmri
, sz
, &actual
);
1112 assert(rc
== REP_PROTOCOL_SUCCESS
);
1114 if (np
->rn_name
!= NULL
) {
1115 rc
= rc_concat_fmri_element(fmri
, sz
, &actual
, np
->rn_name
,
1117 assert(rc
== REP_PROTOCOL_SUCCESS
);
1118 np
->rn_fmri
= strdup(fmri
);
1120 np
->rn_fmri
= strdup(fmri
);
1122 if (np
->rn_fmri
== NULL
) {
1123 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
1125 rc
= REP_PROTOCOL_SUCCESS
;
1132 * Get the FMRI of the node at np placing the result in fmri. Then
1133 * concatenate the additional element to fmri. The type variable indicates
1134 * the type of element, so that the appropriate separator can be
1135 * generated. size is the number of bytes in the buffer at fmri, and
1136 * sz_out receives the size of the generated string. If the result is
1137 * truncated, sz_out will receive the size of the buffer that would be
1138 * required to avoid truncation.
1141 * _TRUNCATED Not enough room in buffer at fmri.
1144 rc_get_fmri_and_concat(rc_node_t
*np
, char *fmri
, size_t size
, size_t *sz_out
,
1145 const char *element
, rep_protocol_entity_t type
)
1149 if ((rc
= rc_node_get_fmri_or_fragment(np
, fmri
, size
, sz_out
)) !=
1150 REP_PROTOCOL_SUCCESS
) {
1153 if ((rc
= rc_concat_fmri_element(fmri
, size
, sz_out
, element
, type
)) !=
1154 REP_PROTOCOL_SUCCESS
) {
1158 return (REP_PROTOCOL_SUCCESS
);
1162 rc_notify_info_interested(rc_notify_info_t
*rnip
, rc_notify_t
*np
)
1164 rc_node_t
*nnp
= np
->rcn_node
;
1167 assert(MUTEX_HELD(&rc_pg_notify_lock
));
1169 if (np
->rcn_delete
!= NULL
) {
1170 assert(np
->rcn_info
== NULL
&& np
->rcn_node
== NULL
);
1171 return (1); /* everyone likes deletes */
1173 if (np
->rcn_node
== NULL
) {
1174 assert(np
->rcn_info
!= NULL
|| np
->rcn_delete
!= NULL
);
1177 assert(np
->rcn_info
== NULL
);
1179 for (i
= 0; i
< RC_NOTIFY_MAX_NAMES
; i
++) {
1180 if (rnip
->rni_namelist
[i
] != NULL
) {
1181 if (strcmp(nnp
->rn_name
, rnip
->rni_namelist
[i
]) == 0)
1184 if (rnip
->rni_typelist
[i
] != NULL
) {
1185 if (strcmp(nnp
->rn_type
, rnip
->rni_typelist
[i
]) == 0)
1193 rc_notify_insert_node(rc_node_t
*nnp
)
1195 rc_notify_t
*np
= &nnp
->rn_notify
;
1196 rc_notify_info_t
*nip
;
1199 assert(np
->rcn_info
== NULL
);
1201 if (nnp
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
)
1204 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
1206 for (nip
= uu_list_first(rc_notify_info_list
); nip
!= NULL
;
1207 nip
= uu_list_next(rc_notify_info_list
, nip
)) {
1208 if (rc_notify_info_interested(nip
, np
)) {
1209 (void) pthread_cond_broadcast(&nip
->rni_cv
);
1214 (void) uu_list_insert_before(rc_notify_list
, NULL
, np
);
1216 np
->rcn_node
= NULL
;
1218 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
1222 rc_notify_deletion(rc_notify_delete_t
*ndp
, const char *service
,
1223 const char *instance
, const char *pg
)
1225 rc_notify_info_t
*nip
;
1227 uu_list_node_init(&ndp
->rnd_notify
, &ndp
->rnd_notify
.rcn_list_node
,
1229 ndp
->rnd_notify
.rcn_delete
= ndp
;
1231 (void) snprintf(ndp
->rnd_fmri
, sizeof (ndp
->rnd_fmri
),
1232 "svc:/%s%s%s%s%s", service
,
1233 (instance
!= NULL
)? ":" : "", (instance
!= NULL
)? instance
: "",
1234 (pg
!= NULL
)? "/:properties/" : "", (pg
!= NULL
)? pg
: "");
1237 * add to notification list, notify watchers
1239 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
1240 for (nip
= uu_list_first(rc_notify_info_list
); nip
!= NULL
;
1241 nip
= uu_list_next(rc_notify_info_list
, nip
))
1242 (void) pthread_cond_broadcast(&nip
->rni_cv
);
1243 (void) uu_list_insert_before(rc_notify_list
, NULL
, ndp
);
1244 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
1248 rc_notify_remove_node(rc_node_t
*nnp
)
1250 rc_notify_t
*np
= &nnp
->rn_notify
;
1252 assert(np
->rcn_info
== NULL
);
1253 assert(!MUTEX_HELD(&nnp
->rn_lock
));
1255 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
1256 while (np
->rcn_node
!= NULL
) {
1257 if (rc_notify_in_use
) {
1258 (void) pthread_cond_wait(&rc_pg_notify_cv
,
1259 &rc_pg_notify_lock
);
1262 (void) uu_list_remove(rc_notify_list
, np
);
1263 np
->rcn_node
= NULL
;
1266 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
1270 rc_notify_remove_locked(rc_notify_t
*np
)
1272 assert(MUTEX_HELD(&rc_pg_notify_lock
));
1273 assert(rc_notify_in_use
== 0);
1275 (void) uu_list_remove(rc_notify_list
, np
);
1277 np
->rcn_node
= NULL
;
1278 } else if (np
->rcn_delete
) {
1279 uu_free(np
->rcn_delete
);
1281 assert(0); /* CAN'T HAPPEN */
1286 * Permission checking functions. See comment atop this file.
1288 #ifndef NATIVE_BUILD
1289 static permcheck_t
*
1294 p
= uu_zalloc(sizeof (*p
));
1297 p
->pc_bnum
= 8; /* Normal case will only have 2 elts. */
1298 p
->pc_buckets
= uu_zalloc(sizeof (*p
->pc_buckets
) * p
->pc_bnum
);
1299 if (p
->pc_buckets
== NULL
) {
1309 pc_free(permcheck_t
*pcp
)
1312 struct pc_elt
*ep
, *next
;
1314 for (i
= 0; i
< pcp
->pc_bnum
; ++i
) {
1315 for (ep
= pcp
->pc_buckets
[i
]; ep
!= NULL
; ep
= next
) {
1316 next
= ep
->pce_next
;
1321 free(pcp
->pc_buckets
);
1326 pc_hash(const char *auth
)
1332 * Generic hash function from uts/common/os/modhash.c.
1334 for (p
= auth
; *p
!= '\0'; ++p
) {
1336 g
= (h
& 0xf0000000);
1346 static perm_status_t
1347 pc_exists(permcheck_t
*pcp
, const char *auth
)
1353 for (ep
= pcp
->pc_buckets
[h
& (pcp
->pc_bnum
- 1)];
1355 ep
= ep
->pce_next
) {
1356 if (strcmp(auth
, ep
->pce_auth
) == 0) {
1357 pcp
->pc_auth_string
= ep
->pce_auth
;
1358 return (PERM_GRANTED
);
1362 return (PERM_DENIED
);
1365 static perm_status_t
1366 pc_match(permcheck_t
*pcp
, const char *pattern
)
1371 for (i
= 0; i
< pcp
->pc_bnum
; ++i
) {
1372 for (ep
= pcp
->pc_buckets
[i
]; ep
!= NULL
; ep
= ep
->pce_next
) {
1373 if (_auth_match(pattern
, ep
->pce_auth
)) {
1374 pcp
->pc_auth_string
= ep
->pce_auth
;
1375 return (PERM_GRANTED
);
1380 return (PERM_DENIED
);
1384 pc_grow(permcheck_t
*pcp
)
1386 uint_t new_bnum
, i
, j
;
1387 struct pc_elt
**new_buckets
;
1388 struct pc_elt
*ep
, *next
;
1390 new_bnum
= pcp
->pc_bnum
* 2;
1391 if (new_bnum
< pcp
->pc_bnum
)
1392 /* Homey don't play that. */
1395 new_buckets
= uu_zalloc(sizeof (*new_buckets
) * new_bnum
);
1396 if (new_buckets
== NULL
)
1399 for (i
= 0; i
< pcp
->pc_bnum
; ++i
) {
1400 for (ep
= pcp
->pc_buckets
[i
]; ep
!= NULL
; ep
= next
) {
1401 next
= ep
->pce_next
;
1402 j
= pc_hash(ep
->pce_auth
) & (new_bnum
- 1);
1403 ep
->pce_next
= new_buckets
[j
];
1404 new_buckets
[j
] = ep
;
1408 uu_free(pcp
->pc_buckets
);
1409 pcp
->pc_buckets
= new_buckets
;
1410 pcp
->pc_bnum
= new_bnum
;
1416 pc_add(permcheck_t
*pcp
, const char *auth
, pc_auth_type_t auth_type
)
1421 ep
= uu_zalloc(offsetof(struct pc_elt
, pce_auth
) + strlen(auth
) + 1);
1425 /* Grow if pc_enum / pc_bnum > 3/4. */
1426 if (pcp
->pc_enum
* 4 > 3 * pcp
->pc_bnum
)
1427 /* Failure is not a stopper; we'll try again next time. */
1428 (void) pc_grow(pcp
);
1430 (void) strcpy(ep
->pce_auth
, auth
);
1432 i
= pc_hash(auth
) & (pcp
->pc_bnum
- 1);
1433 ep
->pce_next
= pcp
->pc_buckets
[i
];
1434 pcp
->pc_buckets
[i
] = ep
;
1436 if (auth_type
> pcp
->pc_specific_type
) {
1437 pcp
->pc_specific_type
= auth_type
;
1438 pcp
->pc_specific
= ep
;
1447 * For the type of a property group, return the authorization which may be
1448 * used to modify it.
1451 perm_auth_for_pgtype(const char *pgtype
)
1453 if (strcmp(pgtype
, SCF_GROUP_METHOD
) == 0)
1454 return (AUTH_MODIFY_PREFIX
"method");
1455 else if (strcmp(pgtype
, SCF_GROUP_DEPENDENCY
) == 0)
1456 return (AUTH_MODIFY_PREFIX
"dependency");
1457 else if (strcmp(pgtype
, SCF_GROUP_APPLICATION
) == 0)
1458 return (AUTH_MODIFY_PREFIX
"application");
1459 else if (strcmp(pgtype
, SCF_GROUP_FRAMEWORK
) == 0)
1460 return (AUTH_MODIFY_PREFIX
"framework");
1467 * _NO_RESOURCES - out of memory
1470 perm_add_enabling_type(permcheck_t
*pcp
, const char *auth
,
1471 pc_auth_type_t auth_type
)
1473 return (pc_add(pcp
, auth
, auth_type
) == 0 ? REP_PROTOCOL_SUCCESS
:
1474 REP_PROTOCOL_FAIL_NO_RESOURCES
);
1479 * _NO_RESOURCES - out of memory
1482 perm_add_enabling(permcheck_t
*pcp
, const char *auth
)
1484 return (perm_add_enabling_type(pcp
, auth
, PC_AUTH_SMF
));
1487 /* Note that perm_add_enabling_values() is defined below. */
1490 * perm_granted() returns PERM_GRANTED if the current door caller has one of
1491 * the enabling authorizations in pcp, PERM_DENIED if it doesn't, PERM_GONE if
1492 * the door client went away and PERM_FAIL if an error (usually lack of
1493 * memory) occurs. auth_cb() checks each and every authorizations as
1494 * enumerated by _enum_auths. When we find a result other than PERM_DENIED,
1495 * we short-cut the enumeration and return non-zero.
1499 auth_cb(const char *auth
, void *ctxt
, void *vres
)
1501 permcheck_t
*pcp
= ctxt
;
1504 if (strchr(auth
, KV_WILDCHAR
) == NULL
)
1505 *pret
= pc_exists(pcp
, auth
);
1507 *pret
= pc_match(pcp
, auth
);
1509 if (*pret
!= PERM_DENIED
)
1512 * If we failed, choose the most specific auth string for use in
1515 assert(pcp
->pc_specific
!= NULL
);
1516 pcp
->pc_auth_string
= pcp
->pc_specific
->pce_auth
;
1518 return (0); /* Tells that we need to continue */
1521 static perm_status_t
1522 perm_granted(permcheck_t
*pcp
)
1526 perm_status_t ret
= PERM_DENIED
;
1529 char pwbuf
[1024]; /* XXX should be NSS_BUFLEN_PASSWD */
1532 if ((uc
= get_ucred()) == NULL
) {
1533 if (errno
== EINVAL
) {
1535 * Client is no longer waiting for our response (e.g.,
1536 * it received a signal & resumed with EINTR).
1537 * Punting with door_return() would be nice but we
1538 * need to release all of the locks & references we
1539 * hold. And we must report failure to the client
1540 * layer to keep it from ignoring retries as
1541 * already-done (idempotency & all that). None of the
1542 * error codes fit very well, so we might as well
1543 * force the return of _PERMISSION_DENIED since we
1544 * couldn't determine the user.
1552 uid
= ucred_geteuid(uc
);
1553 assert(uid
!= (uid_t
)-1);
1555 if (getpwuid_r(uid
, &pw
, pwbuf
, sizeof (pwbuf
)) == NULL
) {
1560 * Enumerate all the auths defined for the user and return the
1563 if (_enum_auths(pw
.pw_name
, auth_cb
, pcp
, &ret
) < 0)
1570 map_granted_status(perm_status_t status
, permcheck_t
*pcp
,
1578 *match_auth
= strdup(pcp
->pc_auth_string
);
1579 if (*match_auth
== NULL
)
1580 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
1582 rc
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
1585 *match_auth
= strdup(pcp
->pc_auth_string
);
1586 if (*match_auth
== NULL
)
1587 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
1589 rc
= REP_PROTOCOL_SUCCESS
;
1592 rc
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
1595 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
1600 #endif /* NATIVE_BUILD */
1603 * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1604 * serialize certain actions, and to wait for certain operations to complete
1606 * The waiting flags are:
1607 * RC_NODE_CHILDREN_CHANGING
1608 * The child list is being built or changed (due to creation
1609 * or deletion). All iterators pause.
1611 * RC_NODE_USING_PARENT
1612 * Someone is actively using the parent pointer, so we can't
1613 * be removed from the parent list.
1615 * RC_NODE_CREATING_CHILD
1616 * A child is being created -- locks out other creations, to
1617 * prevent insert-insert races.
1620 * This object is running a transaction.
1623 * This node might be dying. Always set as a set, using
1624 * RC_NODE_DYING_FLAGS (which is everything but
1625 * RC_NODE_USING_PARENT)
1628 rc_node_hold_flag(rc_node_t
*np
, uint32_t flag
)
1630 assert(MUTEX_HELD(&np
->rn_lock
));
1631 assert((flag
& ~RC_NODE_WAITING_FLAGS
) == 0);
1633 while (!(np
->rn_flags
& RC_NODE_DEAD
) && (np
->rn_flags
& flag
)) {
1634 (void) pthread_cond_wait(&np
->rn_cv
, &np
->rn_lock
);
1636 if (np
->rn_flags
& RC_NODE_DEAD
)
1639 np
->rn_flags
|= flag
;
1644 rc_node_rele_flag(rc_node_t
*np
, uint32_t flag
)
1646 assert((flag
& ~RC_NODE_WAITING_FLAGS
) == 0);
1647 assert(MUTEX_HELD(&np
->rn_lock
));
1648 assert((np
->rn_flags
& flag
) == flag
);
1649 np
->rn_flags
&= ~flag
;
1650 (void) pthread_cond_broadcast(&np
->rn_cv
);
1654 * wait until a particular flag has cleared. Fails if the object dies.
1657 rc_node_wait_flag(rc_node_t
*np
, uint32_t flag
)
1659 assert(MUTEX_HELD(&np
->rn_lock
));
1660 while (!(np
->rn_flags
& RC_NODE_DEAD
) && (np
->rn_flags
& flag
))
1661 (void) pthread_cond_wait(&np
->rn_cv
, &np
->rn_lock
);
1663 return (!(np
->rn_flags
& RC_NODE_DEAD
));
1667 * On entry, np's lock must be held, and this thread must be holding
1668 * RC_NODE_USING_PARENT. On return, both of them are released.
1670 * If the return value is NULL, np either does not have a parent, or
1671 * the parent has been marked DEAD.
1673 * If the return value is non-NULL, it is the parent of np, and both
1674 * its lock and the requested flags are held.
1677 rc_node_hold_parent_flag(rc_node_t
*np
, uint32_t flag
)
1681 assert(MUTEX_HELD(&np
->rn_lock
));
1682 assert(np
->rn_flags
& RC_NODE_USING_PARENT
);
1684 if ((pp
= np
->rn_parent
) == NULL
) {
1685 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
1686 (void) pthread_mutex_unlock(&np
->rn_lock
);
1689 (void) pthread_mutex_unlock(&np
->rn_lock
);
1691 (void) pthread_mutex_lock(&pp
->rn_lock
);
1692 (void) pthread_mutex_lock(&np
->rn_lock
);
1693 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
1694 (void) pthread_mutex_unlock(&np
->rn_lock
);
1696 if (!rc_node_hold_flag(pp
, flag
)) {
1697 (void) pthread_mutex_unlock(&pp
->rn_lock
);
1706 rc_node_t
*np
= uu_zalloc(sizeof (*np
));
1711 (void) pthread_mutex_init(&np
->rn_lock
, NULL
);
1712 (void) pthread_cond_init(&np
->rn_cv
, NULL
);
1714 np
->rn_children
= uu_list_create(rc_children_pool
, np
, 0);
1715 np
->rn_pg_notify_list
= uu_list_create(rc_pg_notify_pool
, np
, 0);
1717 uu_list_node_init(np
, &np
->rn_sibling_node
, rc_children_pool
);
1719 uu_list_node_init(&np
->rn_notify
, &np
->rn_notify
.rcn_list_node
,
1726 rc_node_destroy(rc_node_t
*np
)
1730 if (np
->rn_flags
& RC_NODE_UNREFED
)
1731 return; /* being handled elsewhere */
1733 assert(np
->rn_refs
== 0 && np
->rn_other_refs
== 0);
1734 assert(np
->rn_former
== NULL
);
1736 if (np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_CPROPERTYGRP
) {
1737 /* Release the holds from rc_iter_next(). */
1738 for (i
= 0; i
< COMPOSITION_DEPTH
; ++i
) {
1739 /* rn_cchain[i] may be NULL for empty snapshots. */
1740 if (np
->rn_cchain
[i
] != NULL
)
1741 rc_node_rele(np
->rn_cchain
[i
]);
1745 if (np
->rn_name
!= NULL
)
1746 free((void *)np
->rn_name
);
1748 if (np
->rn_type
!= NULL
)
1749 free((void *)np
->rn_type
);
1751 if (np
->rn_values
!= NULL
)
1752 object_free_values(np
->rn_values
, np
->rn_valtype
,
1753 np
->rn_values_count
, np
->rn_values_size
);
1754 np
->rn_values
= NULL
;
1755 rc_node_free_fmri(np
);
1757 if (np
->rn_snaplevel
!= NULL
)
1758 rc_snaplevel_rele(np
->rn_snaplevel
);
1759 np
->rn_snaplevel
= NULL
;
1761 uu_list_node_fini(np
, &np
->rn_sibling_node
, rc_children_pool
);
1763 uu_list_node_fini(&np
->rn_notify
, &np
->rn_notify
.rcn_list_node
,
1766 assert(uu_list_first(np
->rn_children
) == NULL
);
1767 uu_list_destroy(np
->rn_children
);
1768 uu_list_destroy(np
->rn_pg_notify_list
);
1770 (void) pthread_mutex_destroy(&np
->rn_lock
);
1771 (void) pthread_cond_destroy(&np
->rn_cv
);
1777 * Link in a child node.
1779 * Because of the lock ordering, cp has to already be in the hash table with
1780 * its lock dropped before we get it. To prevent anyone from noticing that
1781 * it is parentless, the creation code sets the RC_NODE_USING_PARENT. Once
1782 * we've linked it in, we release the flag.
1785 rc_node_link_child(rc_node_t
*np
, rc_node_t
*cp
)
1787 assert(!MUTEX_HELD(&np
->rn_lock
));
1788 assert(!MUTEX_HELD(&cp
->rn_lock
));
1790 (void) pthread_mutex_lock(&np
->rn_lock
);
1791 (void) pthread_mutex_lock(&cp
->rn_lock
);
1792 assert(!(cp
->rn_flags
& RC_NODE_IN_PARENT
) &&
1793 (cp
->rn_flags
& RC_NODE_USING_PARENT
));
1795 assert(rc_check_parent_child(np
->rn_id
.rl_type
, cp
->rn_id
.rl_type
) ==
1796 REP_PROTOCOL_SUCCESS
);
1799 cp
->rn_flags
|= RC_NODE_IN_PARENT
;
1800 (void) uu_list_insert_before(np
->rn_children
, NULL
, cp
);
1801 (void) rc_node_build_fmri(cp
);
1803 (void) pthread_mutex_unlock(&np
->rn_lock
);
1805 rc_node_rele_flag(cp
, RC_NODE_USING_PARENT
);
1806 (void) pthread_mutex_unlock(&cp
->rn_lock
);
1810 * Sets the rn_parent_ref field of all the children of np to pp -- always
1811 * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1813 * This is used when we mark a node RC_NODE_OLD, so that when the object and
1814 * its children are no longer referenced, they will all be deleted as a unit.
1817 rc_node_setup_parent_ref(rc_node_t
*np
, rc_node_t
*pp
)
1821 assert(MUTEX_HELD(&np
->rn_lock
));
1823 for (cp
= uu_list_first(np
->rn_children
); cp
!= NULL
;
1824 cp
= uu_list_next(np
->rn_children
, cp
)) {
1825 (void) pthread_mutex_lock(&cp
->rn_lock
);
1826 if (cp
->rn_flags
& RC_NODE_PARENT_REF
) {
1827 assert(cp
->rn_parent_ref
== pp
);
1829 assert(cp
->rn_parent_ref
== NULL
);
1831 cp
->rn_flags
|= RC_NODE_PARENT_REF
;
1832 cp
->rn_parent_ref
= pp
;
1833 if (cp
->rn_refs
!= 0)
1834 rc_node_hold_other(pp
);
1836 rc_node_setup_parent_ref(cp
, pp
); /* recurse */
1837 (void) pthread_mutex_unlock(&cp
->rn_lock
);
1842 * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1845 * *no* node locks may be held.
1846 * pp must be held with RC_NODE_CHILDREN_CHANGING
1847 * newp and np must be held with RC_NODE_IN_TX
1848 * np must be marked RC_NODE_IN_PARENT, newp must not be
1849 * np must be marked RC_NODE_OLD
1852 * pp's RC_NODE_CHILDREN_CHANGING is dropped
1853 * newp and np's RC_NODE_IN_TX is dropped
1854 * newp->rn_former = np;
1855 * newp is RC_NODE_IN_PARENT, np is not.
1856 * interested notify subscribers have been notified of newp's new status.
1859 rc_node_relink_child(rc_node_t
*pp
, rc_node_t
*np
, rc_node_t
*newp
)
1863 * First, swap np and nnp in the cache. newp's RC_NODE_IN_TX flag
1864 * keeps rc_node_update() from seeing it until we are done.
1866 bp
= cache_hold(newp
->rn_hash
);
1867 cache_remove_unlocked(bp
, np
);
1868 cache_insert_unlocked(bp
, newp
);
1872 * replace np with newp in pp's list, and attach it to newp's rn_former
1875 (void) pthread_mutex_lock(&pp
->rn_lock
);
1876 assert(pp
->rn_flags
& RC_NODE_CHILDREN_CHANGING
);
1878 (void) pthread_mutex_lock(&newp
->rn_lock
);
1879 assert(!(newp
->rn_flags
& RC_NODE_IN_PARENT
));
1880 assert(newp
->rn_flags
& RC_NODE_IN_TX
);
1882 (void) pthread_mutex_lock(&np
->rn_lock
);
1883 assert(np
->rn_flags
& RC_NODE_IN_PARENT
);
1884 assert(np
->rn_flags
& RC_NODE_OLD
);
1885 assert(np
->rn_flags
& RC_NODE_IN_TX
);
1887 newp
->rn_parent
= pp
;
1888 newp
->rn_flags
|= RC_NODE_IN_PARENT
;
1891 * Note that we carefully add newp before removing np -- this
1892 * keeps iterators on the list from missing us.
1894 (void) uu_list_insert_after(pp
->rn_children
, np
, newp
);
1895 (void) rc_node_build_fmri(newp
);
1896 (void) uu_list_remove(pp
->rn_children
, np
);
1901 newp
->rn_former
= np
;
1902 np
->rn_parent
= NULL
;
1903 np
->rn_flags
&= ~RC_NODE_IN_PARENT
;
1904 np
->rn_flags
|= RC_NODE_ON_FORMER
;
1906 rc_notify_insert_node(newp
);
1908 rc_node_rele_flag(pp
, RC_NODE_CHILDREN_CHANGING
);
1909 (void) pthread_mutex_unlock(&pp
->rn_lock
);
1910 rc_node_rele_flag(newp
, RC_NODE_USING_PARENT
| RC_NODE_IN_TX
);
1911 (void) pthread_mutex_unlock(&newp
->rn_lock
);
1912 rc_node_setup_parent_ref(np
, np
);
1913 rc_node_rele_flag(np
, RC_NODE_IN_TX
);
1914 (void) pthread_mutex_unlock(&np
->rn_lock
);
1918 * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
1919 * 'cp' is used (and returned) if the node does not yet exist. If it does
1920 * exist, 'cp' is freed, and the existent node is returned instead.
1923 rc_node_setup(rc_node_t
*cp
, rc_node_lookup_t
*nip
, const char *name
,
1928 uint32_t h
= rc_node_hash(nip
);
1930 assert(cp
->rn_refs
== 0);
1933 if ((np
= cache_lookup_unlocked(bp
, nip
)) != NULL
) {
1937 * make sure it matches our expectations
1939 (void) pthread_mutex_lock(&np
->rn_lock
);
1940 if (rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
1941 assert(np
->rn_parent
== pp
);
1942 assert(memcmp(&np
->rn_id
, nip
, sizeof (*nip
)) == 0);
1943 assert(strcmp(np
->rn_name
, name
) == 0);
1944 assert(np
->rn_type
== NULL
);
1945 assert(np
->rn_flags
& RC_NODE_IN_PARENT
);
1946 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
1948 (void) pthread_mutex_unlock(&np
->rn_lock
);
1950 rc_node_destroy(cp
);
1955 * No one is there -- setup & install the new node.
1961 np
->rn_name
= strdup(name
);
1963 np
->rn_flags
|= RC_NODE_USING_PARENT
;
1965 if (np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_INSTANCE
) {
1966 #if COMPOSITION_DEPTH == 2
1967 np
->rn_cchain
[0] = np
;
1968 np
->rn_cchain
[1] = pp
;
1970 #error This code must be updated.
1974 cache_insert_unlocked(bp
, np
);
1975 cache_release(bp
); /* we are now visible */
1977 rc_node_link_child(pp
, np
);
1983 * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
1984 * 'cp' is used (and returned) if the node does not yet exist. If it does
1985 * exist, 'cp' is freed, and the existent node is returned instead.
1988 rc_node_setup_snapshot(rc_node_t
*cp
, rc_node_lookup_t
*nip
, const char *name
,
1989 uint32_t snap_id
, rc_node_t
*pp
)
1993 uint32_t h
= rc_node_hash(nip
);
1995 assert(cp
->rn_refs
== 0);
1998 if ((np
= cache_lookup_unlocked(bp
, nip
)) != NULL
) {
2002 * make sure it matches our expectations
2004 (void) pthread_mutex_lock(&np
->rn_lock
);
2005 if (rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
2006 assert(np
->rn_parent
== pp
);
2007 assert(memcmp(&np
->rn_id
, nip
, sizeof (*nip
)) == 0);
2008 assert(strcmp(np
->rn_name
, name
) == 0);
2009 assert(np
->rn_type
== NULL
);
2010 assert(np
->rn_flags
& RC_NODE_IN_PARENT
);
2011 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
2013 (void) pthread_mutex_unlock(&np
->rn_lock
);
2015 rc_node_destroy(cp
);
2020 * No one is there -- create a new node.
2026 np
->rn_name
= strdup(name
);
2027 np
->rn_snapshot_id
= snap_id
;
2029 np
->rn_flags
|= RC_NODE_USING_PARENT
;
2031 cache_insert_unlocked(bp
, np
);
2032 cache_release(bp
); /* we are now visible */
2034 rc_node_link_child(pp
, np
);
2040 * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists. 'cp' is
2041 * used (and returned) if the node does not yet exist. If it does exist, 'cp'
2042 * is freed, and the existent node is returned instead.
2045 rc_node_setup_snaplevel(rc_node_t
*cp
, rc_node_lookup_t
*nip
,
2046 rc_snaplevel_t
*lvl
, rc_node_t
*pp
)
2050 uint32_t h
= rc_node_hash(nip
);
2052 assert(cp
->rn_refs
== 0);
2055 if ((np
= cache_lookup_unlocked(bp
, nip
)) != NULL
) {
2059 * make sure it matches our expectations
2061 (void) pthread_mutex_lock(&np
->rn_lock
);
2062 if (rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
2063 assert(np
->rn_parent
== pp
);
2064 assert(memcmp(&np
->rn_id
, nip
, sizeof (*nip
)) == 0);
2065 assert(np
->rn_name
== NULL
);
2066 assert(np
->rn_type
== NULL
);
2067 assert(np
->rn_flags
& RC_NODE_IN_PARENT
);
2068 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
2070 (void) pthread_mutex_unlock(&np
->rn_lock
);
2072 rc_node_destroy(cp
);
2077 * No one is there -- create a new node.
2080 rc_node_hold(np
); /* released in snapshot_fill_children() */
2084 rc_snaplevel_hold(lvl
);
2085 np
->rn_snaplevel
= lvl
;
2087 np
->rn_flags
|= RC_NODE_USING_PARENT
;
2089 cache_insert_unlocked(bp
, np
);
2090 cache_release(bp
); /* we are now visible */
2092 /* Add this snaplevel to the snapshot's composition chain. */
2093 assert(pp
->rn_cchain
[lvl
->rsl_level_num
- 1] == NULL
);
2094 pp
->rn_cchain
[lvl
->rsl_level_num
- 1] = np
;
2096 rc_node_link_child(pp
, np
);
2102 * Returns NULL if strdup() fails.
2105 rc_node_setup_pg(rc_node_t
*cp
, rc_node_lookup_t
*nip
, const char *name
,
2106 const char *type
, uint32_t flags
, uint32_t gen_id
, rc_node_t
*pp
)
2111 uint32_t h
= rc_node_hash(nip
);
2113 if ((np
= cache_lookup_unlocked(bp
, nip
)) != NULL
) {
2117 * make sure it matches our expectations (don't check
2118 * the generation number or parent, since someone could
2119 * have gotten a transaction through while we weren't
2122 (void) pthread_mutex_lock(&np
->rn_lock
);
2123 if (rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
2124 assert(memcmp(&np
->rn_id
, nip
, sizeof (*nip
)) == 0);
2125 assert(strcmp(np
->rn_name
, name
) == 0);
2126 assert(strcmp(np
->rn_type
, type
) == 0);
2127 assert(np
->rn_pgflags
== flags
);
2128 assert(np
->rn_flags
& RC_NODE_IN_PARENT
);
2129 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
2131 (void) pthread_mutex_unlock(&np
->rn_lock
);
2133 rc_node_destroy(cp
);
2138 rc_node_hold(np
); /* released in fill_pg_callback() */
2141 np
->rn_name
= strdup(name
);
2142 if (np
->rn_name
== NULL
) {
2146 np
->rn_type
= strdup(type
);
2147 if (np
->rn_type
== NULL
) {
2148 free((void *)np
->rn_name
);
2152 np
->rn_pgflags
= flags
;
2153 np
->rn_gen_id
= gen_id
;
2155 np
->rn_flags
|= RC_NODE_USING_PARENT
;
2157 cache_insert_unlocked(bp
, np
);
2158 cache_release(bp
); /* we are now visible */
2160 rc_node_link_child(pp
, np
);
2165 #if COMPOSITION_DEPTH == 2
2167 * Initialize a "composed property group" which represents the composition of
2168 * property groups pg1 & pg2. It is ephemeral: once created & returned for an
2169 * ITER_READ request, keeping it out of cache_hash and any child lists
2170 * prevents it from being looked up. Operations besides iteration are passed
2173 * pg1 & pg2 should be held before entering this function. They will be
2174 * released in rc_node_destroy().
2177 rc_node_setup_cpg(rc_node_t
*cpg
, rc_node_t
*pg1
, rc_node_t
*pg2
)
2179 if (strcmp(pg1
->rn_type
, pg2
->rn_type
) != 0)
2180 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
2182 cpg
->rn_id
.rl_type
= REP_PROTOCOL_ENTITY_CPROPERTYGRP
;
2183 cpg
->rn_name
= strdup(pg1
->rn_name
);
2184 if (cpg
->rn_name
== NULL
)
2185 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
2187 cpg
->rn_cchain
[0] = pg1
;
2188 cpg
->rn_cchain
[1] = pg2
;
2190 return (REP_PROTOCOL_SUCCESS
);
2193 #error This code must be updated.
2197 * Fails with _NO_RESOURCES.
2200 rc_node_create_property(rc_node_t
*pp
, rc_node_lookup_t
*nip
,
2201 const char *name
, rep_protocol_value_type_t type
,
2202 const char *vals
, size_t count
, size_t size
)
2207 uint32_t h
= rc_node_hash(nip
);
2209 if ((np
= cache_lookup_unlocked(bp
, nip
)) != NULL
) {
2212 * make sure it matches our expectations
2214 (void) pthread_mutex_lock(&np
->rn_lock
);
2215 if (rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
2216 assert(np
->rn_parent
== pp
);
2217 assert(memcmp(&np
->rn_id
, nip
, sizeof (*nip
)) == 0);
2218 assert(strcmp(np
->rn_name
, name
) == 0);
2219 assert(np
->rn_valtype
== type
);
2220 assert(np
->rn_values_count
== count
);
2221 assert(np
->rn_values_size
== size
);
2222 assert(vals
== NULL
||
2223 memcmp(np
->rn_values
, vals
, size
) == 0);
2224 assert(np
->rn_flags
& RC_NODE_IN_PARENT
);
2225 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
2227 rc_node_rele_locked(np
);
2228 object_free_values(vals
, type
, count
, size
);
2229 return (REP_PROTOCOL_SUCCESS
);
2233 * No one is there -- create a new node.
2235 np
= rc_node_alloc();
2238 object_free_values(vals
, type
, count
, size
);
2239 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
2243 np
->rn_name
= strdup(name
);
2244 if (np
->rn_name
== NULL
) {
2246 object_free_values(vals
, type
, count
, size
);
2247 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
2250 np
->rn_valtype
= type
;
2251 np
->rn_values
= vals
;
2252 np
->rn_values_count
= count
;
2253 np
->rn_values_size
= size
;
2255 np
->rn_flags
|= RC_NODE_USING_PARENT
;
2257 cache_insert_unlocked(bp
, np
);
2258 cache_release(bp
); /* we are now visible */
2260 rc_node_link_child(pp
, np
);
2262 return (REP_PROTOCOL_SUCCESS
);
2266 * This function implements a decision table to determine the event ID for
2267 * changes to the enabled (SCF_PROPERTY_ENABLED) property. The event ID is
2268 * determined by the value of the first property in the command specified
2269 * by cmd_no and the name of the property group. Here is the decision
2272 * Property Group Name
2273 * Property ------------------------------------------
2274 * Value SCF_PG_GENERAL SCF_PG_GENERAL_OVR
2275 * -------- -------------- ------------------
2276 * "0" ADT_smf_disable ADT_smf_tmp_disable
2277 * "1" ADT_smf_enable ADT_smf_tmp_enable
2279 * This function is called by special_property_event through a function
2280 * pointer in the special_props_list array.
2282 * Since the ADT_smf_* symbols may not be defined in the build machine's
2283 * include files, this function is not compiled when doing native builds.
2285 #ifndef NATIVE_BUILD
2287 general_enable_id(tx_commit_data_t
*tx_data
, size_t cmd_no
, const char *pg
,
2288 au_event_t
*event_id
)
2295 * First, check property value.
2297 if (tx_cmd_nvalues(tx_data
, cmd_no
, &nvalues
) != REP_PROTOCOL_SUCCESS
)
2301 if (tx_cmd_value(tx_data
, cmd_no
, 0, &value
) != REP_PROTOCOL_SUCCESS
)
2303 if (strcmp(value
, "0") == 0) {
2305 } else if (strcmp(value
, "1") == 0) {
2312 * Now check property group name.
2314 if (strcmp(pg
, SCF_PG_GENERAL
) == 0) {
2315 *event_id
= enable
? ADT_smf_enable
: ADT_smf_disable
;
2317 } else if (strcmp(pg
, SCF_PG_GENERAL_OVR
) == 0) {
2318 *event_id
= enable
? ADT_smf_tmp_enable
: ADT_smf_tmp_disable
;
2323 #endif /* NATIVE_BUILD */
2326 * This function compares two audit_special_prop_item_t structures
2327 * represented by item1 and item2. It returns an integer greater than 0 if
2328 * item1 is greater than item2. It returns 0 if they are equal and an
2329 * integer less than 0 if item1 is less than item2. api_prop_name and
2330 * api_pg_name are the key fields for sorting.
2332 * This function is suitable for calls to bsearch(3C) and qsort(3C).
2335 special_prop_compare(const void *item1
, const void *item2
)
2337 const audit_special_prop_item_t
*a
= (audit_special_prop_item_t
*)item1
;
2338 const audit_special_prop_item_t
*b
= (audit_special_prop_item_t
*)item2
;
2341 r
= strcmp(a
->api_prop_name
, b
->api_prop_name
);
2344 * Primary keys are the same, so check the secondary key.
2346 r
= strcmp(a
->api_pg_name
, b
->api_pg_name
);
2357 rc_children_pool
= uu_list_pool_create("rc_children_pool",
2358 sizeof (rc_node_t
), offsetof(rc_node_t
, rn_sibling_node
),
2359 NULL
, UU_LIST_POOL_DEBUG
);
2361 rc_pg_notify_pool
= uu_list_pool_create("rc_pg_notify_pool",
2362 sizeof (rc_node_pg_notify_t
),
2363 offsetof(rc_node_pg_notify_t
, rnpn_node
),
2364 NULL
, UU_LIST_POOL_DEBUG
);
2366 rc_notify_pool
= uu_list_pool_create("rc_notify_pool",
2367 sizeof (rc_notify_t
), offsetof(rc_notify_t
, rcn_list_node
),
2368 NULL
, UU_LIST_POOL_DEBUG
);
2370 rc_notify_info_pool
= uu_list_pool_create("rc_notify_info_pool",
2371 sizeof (rc_notify_info_t
),
2372 offsetof(rc_notify_info_t
, rni_list_node
),
2373 NULL
, UU_LIST_POOL_DEBUG
);
2375 if (rc_children_pool
== NULL
|| rc_pg_notify_pool
== NULL
||
2376 rc_notify_pool
== NULL
|| rc_notify_info_pool
== NULL
)
2377 uu_die("out of memory");
2379 rc_notify_list
= uu_list_create(rc_notify_pool
,
2380 &rc_notify_list
, 0);
2382 rc_notify_info_list
= uu_list_create(rc_notify_info_pool
,
2383 &rc_notify_info_list
, 0);
2385 if (rc_notify_list
== NULL
|| rc_notify_info_list
== NULL
)
2386 uu_die("out of memory");
2389 * Sort the special_props_list array so that it can be searched
2392 * The special_props_list array is not compiled into the native
2393 * build code, so there is no need to call qsort if NATIVE_BUILD is
2396 #ifndef NATIVE_BUILD
2397 qsort(special_props_list
, SPECIAL_PROP_COUNT
,
2398 sizeof (special_props_list
[0]), special_prop_compare
);
2399 #endif /* NATIVE_BUILD */
2401 if ((np
= rc_node_alloc()) == NULL
)
2402 uu_die("out of memory");
2405 np
->rn_id
.rl_type
= REP_PROTOCOL_ENTITY_SCOPE
;
2406 np
->rn_id
.rl_backend
= BACKEND_TYPE_NORMAL
;
2407 np
->rn_hash
= rc_node_hash(&np
->rn_id
);
2408 np
->rn_name
= "localhost";
2410 bp
= cache_hold(np
->rn_hash
);
2411 cache_insert_unlocked(bp
, np
);
2420 * _INVALID_TYPE - type is invalid
2421 * _TYPE_MISMATCH - np doesn't carry children of type type
2422 * _DELETED - np has been deleted
2426 rc_node_fill_children(rc_node_t
*np
, uint32_t type
)
2430 assert(MUTEX_HELD(&np
->rn_lock
));
2432 if ((rc
= rc_check_parent_child(np
->rn_id
.rl_type
, type
)) !=
2433 REP_PROTOCOL_SUCCESS
)
2436 if (!rc_node_hold_flag(np
, RC_NODE_CHILDREN_CHANGING
))
2437 return (REP_PROTOCOL_FAIL_DELETED
);
2439 if (np
->rn_flags
& RC_NODE_HAS_CHILDREN
) {
2440 rc_node_rele_flag(np
, RC_NODE_CHILDREN_CHANGING
);
2441 return (REP_PROTOCOL_SUCCESS
);
2444 (void) pthread_mutex_unlock(&np
->rn_lock
);
2445 rc
= object_fill_children(np
);
2446 (void) pthread_mutex_lock(&np
->rn_lock
);
2448 if (rc
== REP_PROTOCOL_SUCCESS
) {
2449 np
->rn_flags
|= RC_NODE_HAS_CHILDREN
;
2451 rc_node_rele_flag(np
, RC_NODE_CHILDREN_CHANGING
);
2458 * _INVALID_TYPE - type is invalid
2459 * _TYPE_MISMATCH - np doesn't carry children of type type
2460 * _DELETED - np has been deleted
2462 * _SUCCESS - if *cpp is not NULL, it is held
2465 rc_node_find_named_child(rc_node_t
*np
, const char *name
, uint32_t type
,
2471 assert(MUTEX_HELD(&np
->rn_lock
));
2472 assert(np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_CPROPERTYGRP
);
2474 ret
= rc_node_fill_children(np
, type
);
2475 if (ret
!= REP_PROTOCOL_SUCCESS
)
2478 for (cp
= uu_list_first(np
->rn_children
);
2480 cp
= uu_list_next(np
->rn_children
, cp
)) {
2481 if (cp
->rn_id
.rl_type
== type
&& strcmp(cp
->rn_name
, name
) == 0)
2489 return (REP_PROTOCOL_SUCCESS
);
2492 static int rc_node_parent(rc_node_t
*, rc_node_t
**);
2496 * _INVALID_TYPE - type is invalid
2497 * _DELETED - np or an ancestor has been deleted
2498 * _NOT_FOUND - no ancestor of specified type exists
2499 * _SUCCESS - *app is held
2502 rc_node_find_ancestor(rc_node_t
*np
, uint32_t type
, rc_node_t
**app
)
2505 rc_node_t
*parent
, *np_orig
;
2507 if (type
>= REP_PROTOCOL_ENTITY_MAX
)
2508 return (REP_PROTOCOL_FAIL_INVALID_TYPE
);
2512 while (np
->rn_id
.rl_type
> type
) {
2513 ret
= rc_node_parent(np
, &parent
);
2516 if (ret
!= REP_PROTOCOL_SUCCESS
)
2521 if (np
->rn_id
.rl_type
== type
) {
2523 return (REP_PROTOCOL_SUCCESS
);
2526 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
2529 #ifndef NATIVE_BUILD
2531 * If the propname property exists in pg, and it is of type string, add its
2532 * values as authorizations to pcp. pg must not be locked on entry, and it is
2533 * returned unlocked. Returns
2534 * _DELETED - pg was deleted
2536 * _NOT_FOUND - pg has no property named propname
2540 perm_add_pg_prop_values(permcheck_t
*pcp
, rc_node_t
*pg
, const char *propname
)
2548 assert(!MUTEX_HELD(&pg
->rn_lock
));
2549 assert(pg
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_PROPERTYGRP
);
2551 (void) pthread_mutex_lock(&pg
->rn_lock
);
2552 result
= rc_node_find_named_child(pg
, propname
,
2553 REP_PROTOCOL_ENTITY_PROPERTY
, &prop
);
2554 (void) pthread_mutex_unlock(&pg
->rn_lock
);
2555 if (result
!= REP_PROTOCOL_SUCCESS
) {
2557 case REP_PROTOCOL_FAIL_DELETED
:
2558 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
2561 case REP_PROTOCOL_FAIL_INVALID_TYPE
:
2562 case REP_PROTOCOL_FAIL_TYPE_MISMATCH
:
2564 bad_error("rc_node_find_named_child", result
);
2569 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
2571 /* rn_valtype is immutable, so no locking. */
2572 if (prop
->rn_valtype
!= REP_PROTOCOL_TYPE_STRING
) {
2574 return (REP_PROTOCOL_SUCCESS
);
2577 (void) pthread_mutex_lock(&prop
->rn_lock
);
2578 for (count
= prop
->rn_values_count
, cp
= prop
->rn_values
;
2581 result
= perm_add_enabling_type(pcp
, cp
,
2582 (pg
->rn_id
.rl_ids
[ID_INSTANCE
]) ? PC_AUTH_INST
:
2584 if (result
!= REP_PROTOCOL_SUCCESS
)
2587 cp
= strchr(cp
, '\0') + 1;
2590 rc_node_rele_locked(prop
);
2596 * Assuming that ent is a service or instance node, if the pgname property
2597 * group has type pgtype, and it has a propname property with string type, add
2598 * its values as authorizations to pcp. If pgtype is NULL, it is not checked.
2601 * _DELETED - ent was deleted
2602 * _NO_RESOURCES - no resources
2603 * _NOT_FOUND - ent does not have pgname pg or propname property
2606 perm_add_ent_prop_values(permcheck_t
*pcp
, rc_node_t
*ent
, const char *pgname
,
2607 const char *pgtype
, const char *propname
)
2612 assert(!MUTEX_HELD(&ent
->rn_lock
));
2614 (void) pthread_mutex_lock(&ent
->rn_lock
);
2615 r
= rc_node_find_named_child(ent
, pgname
,
2616 REP_PROTOCOL_ENTITY_PROPERTYGRP
, &pg
);
2617 (void) pthread_mutex_unlock(&ent
->rn_lock
);
2620 case REP_PROTOCOL_SUCCESS
:
2623 case REP_PROTOCOL_FAIL_DELETED
:
2624 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
2628 bad_error("rc_node_find_named_child", r
);
2632 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
2634 if (pgtype
== NULL
|| strcmp(pg
->rn_type
, pgtype
) == 0) {
2635 r
= perm_add_pg_prop_values(pcp
, pg
, propname
);
2637 case REP_PROTOCOL_FAIL_DELETED
:
2638 r
= REP_PROTOCOL_FAIL_NOT_FOUND
;
2641 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
2642 case REP_PROTOCOL_SUCCESS
:
2643 case REP_PROTOCOL_FAIL_NOT_FOUND
:
2647 bad_error("perm_add_pg_prop_values", r
);
2657 * If pg has a property named propname, and is string typed, add its values as
2658 * authorizations to pcp. If pg has no such property, and its parent is an
2659 * instance, walk up to the service and try doing the same with the property
2660 * of the same name from the property group of the same name. Returns
2663 * _DELETED - pg (or an ancestor) was deleted
2666 perm_add_enabling_values(permcheck_t
*pcp
, rc_node_t
*pg
, const char *propname
)
2669 char pgname
[REP_PROTOCOL_NAME_LEN
+ 1];
2673 r
= perm_add_pg_prop_values(pcp
, pg
, propname
);
2675 if (r
!= REP_PROTOCOL_FAIL_NOT_FOUND
)
2678 assert(!MUTEX_HELD(&pg
->rn_lock
));
2680 if (pg
->rn_id
.rl_ids
[ID_INSTANCE
] == 0)
2681 return (REP_PROTOCOL_SUCCESS
);
2683 sz
= strlcpy(pgname
, pg
->rn_name
, sizeof (pgname
));
2684 assert(sz
< sizeof (pgname
));
2687 * If pg is a child of an instance or snapshot, we want to compose the
2688 * authorization property with the service's (if it exists). The
2689 * snapshot case applies only to read_authorization. In all other
2690 * cases, the pg's parent will be the instance.
2692 r
= rc_node_find_ancestor(pg
, REP_PROTOCOL_ENTITY_SERVICE
, &svc
);
2693 if (r
!= REP_PROTOCOL_SUCCESS
) {
2694 assert(r
== REP_PROTOCOL_FAIL_DELETED
);
2697 assert(svc
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_SERVICE
);
2699 r
= perm_add_ent_prop_values(pcp
, svc
, pgname
, NULL
, propname
);
2703 if (r
== REP_PROTOCOL_FAIL_NOT_FOUND
)
2704 r
= REP_PROTOCOL_SUCCESS
;
2710 * Call perm_add_enabling_values() for the "action_authorization" property of
2711 * the "general" property group of inst. Returns
2712 * _DELETED - inst (or an ancestor) was deleted
2717 perm_add_inst_action_auth(permcheck_t
*pcp
, rc_node_t
*inst
)
2722 assert(inst
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_INSTANCE
);
2724 r
= perm_add_ent_prop_values(pcp
, inst
, AUTH_PG_GENERAL
,
2725 AUTH_PG_GENERAL_TYPE
, AUTH_PROP_ACTION
);
2727 if (r
!= REP_PROTOCOL_FAIL_NOT_FOUND
)
2730 r
= rc_node_parent(inst
, &svc
);
2731 if (r
!= REP_PROTOCOL_SUCCESS
) {
2732 assert(r
== REP_PROTOCOL_FAIL_DELETED
);
2736 r
= perm_add_ent_prop_values(pcp
, svc
, AUTH_PG_GENERAL
,
2737 AUTH_PG_GENERAL_TYPE
, AUTH_PROP_ACTION
);
2739 return (r
== REP_PROTOCOL_FAIL_NOT_FOUND
? REP_PROTOCOL_SUCCESS
: r
);
2741 #endif /* NATIVE_BUILD */
2744 rc_node_ptr_init(rc_node_ptr_t
*out
)
2746 out
->rnp_node
= NULL
;
2747 out
->rnp_auth_string
= NULL
;
2748 out
->rnp_authorized
= RC_AUTH_UNKNOWN
;
2749 out
->rnp_deleted
= 0;
2753 rc_node_ptr_free_mem(rc_node_ptr_t
*npp
)
2755 if (npp
->rnp_auth_string
!= NULL
) {
2756 free((void *)npp
->rnp_auth_string
);
2757 npp
->rnp_auth_string
= NULL
;
2762 rc_node_assign(rc_node_ptr_t
*out
, rc_node_t
*val
)
2764 rc_node_t
*cur
= out
->rnp_node
;
2767 out
->rnp_node
= val
;
2772 * Register the ephemeral reference created by reading
2773 * out->rnp_node into cur. Note that the persistent
2774 * reference we're destroying is locked by the client
2777 rc_node_hold_ephemeral_locked(cur
);
2779 rc_node_rele_locked(cur
);
2781 out
->rnp_authorized
= RC_AUTH_UNKNOWN
;
2782 rc_node_ptr_free_mem(out
);
2783 out
->rnp_deleted
= 0;
2787 rc_node_clear(rc_node_ptr_t
*out
, int deleted
)
2789 rc_node_assign(out
, NULL
);
2790 out
->rnp_deleted
= deleted
;
2794 rc_node_ptr_assign(rc_node_ptr_t
*out
, const rc_node_ptr_t
*val
)
2796 rc_node_assign(out
, val
->rnp_node
);
2800 * rc_node_check()/RC_NODE_CHECK()
2801 * generic "entry" checks, run before the use of an rc_node pointer.
2808 rc_node_check_and_lock(rc_node_t
*np
)
2810 int result
= REP_PROTOCOL_SUCCESS
;
2812 return (REP_PROTOCOL_FAIL_NOT_SET
);
2814 (void) pthread_mutex_lock(&np
->rn_lock
);
2815 if (!rc_node_wait_flag(np
, RC_NODE_DYING
)) {
2816 result
= REP_PROTOCOL_FAIL_DELETED
;
2817 (void) pthread_mutex_unlock(&np
->rn_lock
);
2825 * _NOT_SET - ptr is reset
2826 * _DELETED - node has been deleted
2829 rc_node_ptr_check_and_lock(rc_node_ptr_t
*npp
, int *res
)
2831 rc_node_t
*np
= npp
->rnp_node
;
2833 if (npp
->rnp_deleted
)
2834 *res
= REP_PROTOCOL_FAIL_DELETED
;
2836 *res
= REP_PROTOCOL_FAIL_NOT_SET
;
2840 (void) pthread_mutex_lock(&np
->rn_lock
);
2841 if (!rc_node_wait_flag(np
, RC_NODE_DYING
)) {
2842 (void) pthread_mutex_unlock(&np
->rn_lock
);
2843 rc_node_clear(npp
, 1);
2844 *res
= REP_PROTOCOL_FAIL_DELETED
;
2850 #define RC_NODE_CHECK_AND_LOCK(n) { \
2852 if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2856 #define RC_NODE_CHECK(n) { \
2857 RC_NODE_CHECK_AND_LOCK(n); \
2858 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2861 #define RC_NODE_CHECK_AND_HOLD(n) { \
2862 RC_NODE_CHECK_AND_LOCK(n); \
2863 rc_node_hold_locked(n); \
2864 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2867 #define RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) { \
2869 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL) \
2873 #define RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, mem) { \
2875 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == \
2877 if ((mem) != NULL) \
2883 #define RC_NODE_PTR_GET_CHECK(np, npp) { \
2884 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2885 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2888 #define RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) { \
2889 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2890 rc_node_hold_locked(np); \
2891 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2894 #define HOLD_FLAG_OR_RETURN(np, flag) { \
2895 assert(MUTEX_HELD(&(np)->rn_lock)); \
2896 assert(!((np)->rn_flags & RC_NODE_DEAD)); \
2897 if (!rc_node_hold_flag((np), flag)) { \
2898 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2899 return (REP_PROTOCOL_FAIL_DELETED); \
2903 #define HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) { \
2904 assert(MUTEX_HELD(&(np)->rn_lock)); \
2905 if (!rc_node_hold_flag((np), flag)) { \
2906 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2907 assert((np) == (npp)->rnp_node); \
2908 rc_node_clear(npp, 1); \
2909 if ((mem) != NULL) \
2911 return (REP_PROTOCOL_FAIL_DELETED); \
2916 rc_local_scope(uint32_t type
, rc_node_ptr_t
*out
)
2918 if (type
!= REP_PROTOCOL_ENTITY_SCOPE
) {
2919 rc_node_clear(out
, 0);
2920 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
2924 * the main scope never gets destroyed
2926 rc_node_assign(out
, rc_scope
);
2928 return (REP_PROTOCOL_SUCCESS
);
2933 * _NOT_SET - npp is not set
2934 * _DELETED - the node npp pointed at has been deleted
2935 * _TYPE_MISMATCH - type is not _SCOPE
2936 * _NOT_FOUND - scope has no parent
2939 rc_scope_parent_scope(rc_node_ptr_t
*npp
, uint32_t type
, rc_node_ptr_t
*out
)
2943 rc_node_clear(out
, 0);
2945 RC_NODE_PTR_GET_CHECK(np
, npp
);
2947 if (type
!= REP_PROTOCOL_ENTITY_SCOPE
)
2948 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
2950 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
2953 static int rc_node_pg_check_read_protect(rc_node_t
*);
2966 rc_node_name(rc_node_ptr_t
*npp
, char *buf
, size_t sz
, uint32_t answertype
,
2972 assert(sz
== *sz_out
);
2974 RC_NODE_PTR_GET_CHECK(np
, npp
);
2976 if (np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_CPROPERTYGRP
) {
2977 np
= np
->rn_cchain
[0];
2981 switch (answertype
) {
2982 case RP_ENTITY_NAME_NAME
:
2983 if (np
->rn_name
== NULL
)
2984 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
2985 actual
= strlcpy(buf
, np
->rn_name
, sz
);
2987 case RP_ENTITY_NAME_PGTYPE
:
2988 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
)
2989 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
2990 actual
= strlcpy(buf
, np
->rn_type
, sz
);
2992 case RP_ENTITY_NAME_PGFLAGS
:
2993 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
)
2994 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
2995 actual
= snprintf(buf
, sz
, "%d", np
->rn_pgflags
);
2997 case RP_ENTITY_NAME_SNAPLEVEL_SCOPE
:
2998 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPLEVEL
)
2999 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
3000 actual
= strlcpy(buf
, np
->rn_snaplevel
->rsl_scope
, sz
);
3002 case RP_ENTITY_NAME_SNAPLEVEL_SERVICE
:
3003 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPLEVEL
)
3004 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
3005 actual
= strlcpy(buf
, np
->rn_snaplevel
->rsl_service
, sz
);
3007 case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE
:
3008 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPLEVEL
)
3009 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
3010 if (np
->rn_snaplevel
->rsl_instance
== NULL
)
3011 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
3012 actual
= strlcpy(buf
, np
->rn_snaplevel
->rsl_instance
, sz
);
3014 case RP_ENTITY_NAME_PGREADPROT
:
3018 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
)
3019 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
3020 ret
= rc_node_pg_check_read_protect(np
);
3021 assert(ret
!= REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
3023 case REP_PROTOCOL_FAIL_PERMISSION_DENIED
:
3024 actual
= snprintf(buf
, sz
, "1");
3026 case REP_PROTOCOL_SUCCESS
:
3027 actual
= snprintf(buf
, sz
, "0");
3035 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
3038 return (REP_PROTOCOL_FAIL_TRUNCATED
);
3041 return (REP_PROTOCOL_SUCCESS
);
3045 rc_node_get_property_type(rc_node_ptr_t
*npp
, rep_protocol_value_type_t
*out
)
3049 RC_NODE_PTR_GET_CHECK(np
, npp
);
3051 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTY
)
3052 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
3054 *out
= np
->rn_valtype
;
3056 return (REP_PROTOCOL_SUCCESS
);
3060 * Get np's parent. If np is deleted, returns _DELETED. Otherwise puts a hold
3061 * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
3064 rc_node_parent(rc_node_t
*np
, rc_node_t
**out
)
3069 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_CPROPERTYGRP
) {
3070 RC_NODE_CHECK_AND_LOCK(np
);
3072 np
= np
->rn_cchain
[0];
3073 RC_NODE_CHECK_AND_LOCK(np
);
3077 rc_node_hold_locked(np
); /* simplifies the remainder */
3080 if (!rc_node_wait_flag(np
,
3081 RC_NODE_IN_TX
| RC_NODE_USING_PARENT
)) {
3082 rc_node_rele_locked(np
);
3083 return (REP_PROTOCOL_FAIL_DELETED
);
3086 if (!(np
->rn_flags
& RC_NODE_OLD
))
3089 rc_node_rele_locked(np
);
3090 np
= cache_lookup(&np_orig
->rn_id
);
3091 assert(np
!= np_orig
);
3095 (void) pthread_mutex_lock(&np
->rn_lock
);
3098 /* guaranteed to succeed without dropping the lock */
3099 if (!rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
3100 (void) pthread_mutex_unlock(&np
->rn_lock
);
3103 return (REP_PROTOCOL_FAIL_DELETED
);
3106 assert(np
->rn_parent
!= NULL
);
3107 pnp
= np
->rn_parent
;
3108 (void) pthread_mutex_unlock(&np
->rn_lock
);
3110 (void) pthread_mutex_lock(&pnp
->rn_lock
);
3111 (void) pthread_mutex_lock(&np
->rn_lock
);
3112 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
3113 (void) pthread_mutex_unlock(&np
->rn_lock
);
3115 rc_node_hold_locked(pnp
);
3117 (void) pthread_mutex_unlock(&pnp
->rn_lock
);
3121 return (REP_PROTOCOL_SUCCESS
);
3125 return (REP_PROTOCOL_FAIL_DELETED
);
3134 rc_node_ptr_parent(rc_node_ptr_t
*npp
, rc_node_t
**out
)
3138 RC_NODE_PTR_GET_CHECK(np
, npp
);
3140 return (rc_node_parent(np
, out
));
3145 * _NOT_SET - npp is not set
3146 * _DELETED - the node npp pointed at has been deleted
3147 * _TYPE_MISMATCH - npp's node's parent is not of type type
3149 * If npp points to a scope, can also fail with
3150 * _NOT_FOUND - scope has no parent
3153 rc_node_get_parent(rc_node_ptr_t
*npp
, uint32_t type
, rc_node_ptr_t
*out
)
3158 if (npp
->rnp_node
!= NULL
&&
3159 npp
->rnp_node
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_SCOPE
)
3160 return (rc_scope_parent_scope(npp
, type
, out
));
3162 if ((rc
= rc_node_ptr_parent(npp
, &pnp
)) != REP_PROTOCOL_SUCCESS
) {
3163 rc_node_clear(out
, 0);
3167 if (type
!= pnp
->rn_id
.rl_type
) {
3169 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
3172 rc_node_assign(out
, pnp
);
3175 return (REP_PROTOCOL_SUCCESS
);
3179 rc_node_parent_type(rc_node_ptr_t
*npp
, uint32_t *type_out
)
3184 if (npp
->rnp_node
!= NULL
&&
3185 npp
->rnp_node
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_SCOPE
) {
3186 *type_out
= REP_PROTOCOL_ENTITY_SCOPE
;
3187 return (REP_PROTOCOL_SUCCESS
);
3190 if ((rc
= rc_node_ptr_parent(npp
, &pnp
)) != REP_PROTOCOL_SUCCESS
)
3193 *type_out
= pnp
->rn_id
.rl_type
;
3197 return (REP_PROTOCOL_SUCCESS
);
3202 * _INVALID_TYPE - type is invalid
3203 * _TYPE_MISMATCH - np doesn't carry children of type type
3204 * _DELETED - np has been deleted
3205 * _NOT_FOUND - no child with that name/type combo found
3210 rc_node_get_child(rc_node_ptr_t
*npp
, const char *name
, uint32_t type
,
3211 rc_node_ptr_t
*outp
)
3214 rc_node_t
*child
= NULL
;
3217 RC_NODE_PTR_GET_CHECK_AND_LOCK(np
, npp
);
3218 if ((ret
= rc_check_type_name(type
, name
)) == REP_PROTOCOL_SUCCESS
) {
3219 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_CPROPERTYGRP
) {
3220 ret
= rc_node_find_named_child(np
, name
, type
, &child
);
3222 (void) pthread_mutex_unlock(&np
->rn_lock
);
3223 ret
= REP_PROTOCOL_SUCCESS
;
3224 for (idx
= 0; idx
< COMPOSITION_DEPTH
; idx
++) {
3225 cp
= np
->rn_cchain
[idx
];
3228 RC_NODE_CHECK_AND_LOCK(cp
);
3229 ret
= rc_node_find_named_child(cp
, name
, type
,
3231 (void) pthread_mutex_unlock(&cp
->rn_lock
);
3233 * loop only if we succeeded, but no child of
3234 * the correct name was found.
3236 if (ret
!= REP_PROTOCOL_SUCCESS
||
3240 (void) pthread_mutex_lock(&np
->rn_lock
);
3243 (void) pthread_mutex_unlock(&np
->rn_lock
);
3245 if (ret
== REP_PROTOCOL_SUCCESS
) {
3246 rc_node_assign(outp
, child
);
3248 rc_node_rele(child
);
3250 ret
= REP_PROTOCOL_FAIL_NOT_FOUND
;
3252 rc_node_assign(outp
, NULL
);
3258 rc_node_update(rc_node_ptr_t
*npp
)
3261 rc_node_t
*np
= npp
->rnp_node
;
3263 rc_node_t
*cpg
= NULL
;
3266 np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_CPROPERTYGRP
) {
3268 * If we're updating a composed property group, actually
3269 * update the top-level property group & return the
3270 * appropriate value. But leave *nnp pointing at us.
3273 np
= np
->rn_cchain
[0];
3278 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
&&
3279 np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPSHOT
)
3280 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
3283 bp
= cache_hold(np
->rn_hash
);
3284 nnp
= cache_lookup_unlocked(bp
, &np
->rn_id
);
3287 rc_node_clear(npp
, 1);
3288 return (REP_PROTOCOL_FAIL_DELETED
);
3291 * grab the lock before dropping the cache bucket, so
3292 * that no one else can sneak in
3294 (void) pthread_mutex_lock(&nnp
->rn_lock
);
3297 if (!(nnp
->rn_flags
& RC_NODE_IN_TX
) ||
3298 !rc_node_wait_flag(nnp
, RC_NODE_IN_TX
))
3301 rc_node_rele_locked(nnp
);
3305 * If it is dead, we want to update it so that it will continue to
3306 * report being dead.
3308 if (nnp
->rn_flags
& RC_NODE_DEAD
) {
3309 (void) pthread_mutex_unlock(&nnp
->rn_lock
);
3310 if (nnp
!= np
&& cpg
== NULL
)
3311 rc_node_assign(npp
, nnp
); /* updated */
3313 return (REP_PROTOCOL_FAIL_DELETED
);
3316 assert(!(nnp
->rn_flags
& RC_NODE_OLD
));
3317 (void) pthread_mutex_unlock(&nnp
->rn_lock
);
3319 if (nnp
!= np
&& cpg
== NULL
)
3320 rc_node_assign(npp
, nnp
); /* updated */
3324 return ((nnp
== np
)? REP_PROTOCOL_SUCCESS
: REP_PROTOCOL_DONE
);
3328 * does a generic modification check, for creation, deletion, and snapshot
3329 * management only. Property group transactions have different checks.
3331 * The string returned to *match_auth must be freed.
3333 static perm_status_t
3334 rc_node_modify_permission_check(char **match_auth
)
3337 perm_status_t granted
= PERM_GRANTED
;
3342 if (!client_is_privileged()) {
3343 granted
= PERM_DENIED
;
3347 if (is_main_repository
== 0)
3348 return (PERM_GRANTED
);
3351 rc
= perm_add_enabling(pcp
, AUTH_MODIFY
);
3353 if (rc
== REP_PROTOCOL_SUCCESS
) {
3354 granted
= perm_granted(pcp
);
3356 if ((granted
== PERM_GRANTED
) ||
3357 (granted
== PERM_DENIED
)) {
3359 * Copy off the authorization
3360 * string before freeing pcp.
3363 strdup(pcp
->pc_auth_string
);
3364 if (*match_auth
== NULL
)
3365 granted
= PERM_FAIL
;
3368 granted
= PERM_FAIL
;
3373 granted
= PERM_FAIL
;
3377 #endif /* NATIVE_BUILD */
3381 * Native builds are done to create svc.configd-native. This program runs
3382 * only on the Solaris build machines to create the seed repository, and it
3383 * is compiled against the build machine's header files. The ADT_smf_*
3384 * symbols may not be defined in these header files. For this reason
3385 * smf_annotation_event(), smf_audit_event() and special_property_event()
3386 * are not compiled for native builds.
3388 #ifndef NATIVE_BUILD
3391 * This function generates an annotation audit event if one has been setup.
3392 * Annotation events should only be generated immediately before the audit
3393 * record from the first attempt to modify the repository from a client
3394 * which has requested an annotation.
3397 smf_annotation_event(int status
, int return_val
)
3399 adt_session_data_t
*session
;
3400 adt_event_data_t
*event
= NULL
;
3401 char file
[MAXPATHLEN
];
3402 char operation
[REP_PROTOCOL_NAME_LEN
];
3404 /* Don't audit if we're using an alternate repository. */
3405 if (is_main_repository
== 0)
3408 if (client_annotation_needed(operation
, sizeof (operation
), file
,
3409 sizeof (file
)) == 0) {
3413 (void) strlcpy(file
, "NO FILE", sizeof (file
));
3415 if (operation
[0] == 0) {
3416 (void) strlcpy(operation
, "NO OPERATION",
3417 sizeof (operation
));
3419 if ((session
= get_audit_session()) == NULL
)
3421 if ((event
= adt_alloc_event(session
, ADT_smf_annotation
)) == NULL
) {
3422 uu_warn("smf_annotation_event cannot allocate event "
3423 "data. %s\n", strerror(errno
));
3426 event
->adt_smf_annotation
.operation
= operation
;
3427 event
->adt_smf_annotation
.file
= file
;
3428 if (adt_put_event(event
, status
, return_val
) == 0) {
3429 client_annotation_finished();
3431 uu_warn("smf_annotation_event failed to put event. "
3432 "%s\n", strerror(errno
));
3434 adt_free_event(event
);
3439 * smf_audit_event interacts with the security auditing system to generate
3440 * an audit event structure. It establishes an audit session and allocates
3441 * an audit event. The event is filled in from the audit data, and
3442 * adt_put_event is called to generate the event.
3445 smf_audit_event(au_event_t event_id
, int status
, int return_val
,
3446 audit_event_data_t
*data
)
3448 #ifndef NATIVE_BUILD
3452 adt_session_data_t
*session
;
3453 adt_event_data_t
*event
= NULL
;
3455 /* Don't audit if we're using an alternate repository */
3456 if (is_main_repository
== 0)
3459 smf_annotation_event(status
, return_val
);
3460 if ((session
= get_audit_session()) == NULL
)
3462 if ((event
= adt_alloc_event(session
, event_id
)) == NULL
) {
3463 uu_warn("smf_audit_event cannot allocate event "
3464 "data. %s\n", strerror(errno
));
3469 * Handle possibility of NULL authorization strings, FMRIs and
3472 if (data
->ed_auth
== NULL
) {
3473 auth_used
= "PRIVILEGED";
3475 auth_used
= data
->ed_auth
;
3477 if (data
->ed_fmri
== NULL
) {
3478 syslog(LOG_WARNING
, "smf_audit_event called with "
3479 "empty FMRI string");
3480 fmri
= "UNKNOWN FMRI";
3482 fmri
= data
->ed_fmri
;
3484 if (data
->ed_prop_value
== NULL
) {
3487 prop_value
= data
->ed_prop_value
;
3490 /* Fill in the event data. */
3492 case ADT_smf_attach_snap
:
3493 event
->adt_smf_attach_snap
.auth_used
= auth_used
;
3494 event
->adt_smf_attach_snap
.old_fmri
= data
->ed_old_fmri
;
3495 event
->adt_smf_attach_snap
.old_name
= data
->ed_old_name
;
3496 event
->adt_smf_attach_snap
.new_fmri
= fmri
;
3497 event
->adt_smf_attach_snap
.new_name
= data
->ed_snapname
;
3499 case ADT_smf_change_prop
:
3500 event
->adt_smf_change_prop
.auth_used
= auth_used
;
3501 event
->adt_smf_change_prop
.fmri
= fmri
;
3502 event
->adt_smf_change_prop
.type
= data
->ed_type
;
3503 event
->adt_smf_change_prop
.value
= prop_value
;
3506 event
->adt_smf_clear
.auth_used
= auth_used
;
3507 event
->adt_smf_clear
.fmri
= fmri
;
3509 case ADT_smf_create
:
3510 event
->adt_smf_create
.fmri
= fmri
;
3511 event
->adt_smf_create
.auth_used
= auth_used
;
3513 case ADT_smf_create_npg
:
3514 event
->adt_smf_create_npg
.auth_used
= auth_used
;
3515 event
->adt_smf_create_npg
.fmri
= fmri
;
3516 event
->adt_smf_create_npg
.type
= data
->ed_type
;
3518 case ADT_smf_create_pg
:
3519 event
->adt_smf_create_pg
.auth_used
= auth_used
;
3520 event
->adt_smf_create_pg
.fmri
= fmri
;
3521 event
->adt_smf_create_pg
.type
= data
->ed_type
;
3523 case ADT_smf_create_prop
:
3524 event
->adt_smf_create_prop
.auth_used
= auth_used
;
3525 event
->adt_smf_create_prop
.fmri
= fmri
;
3526 event
->adt_smf_create_prop
.type
= data
->ed_type
;
3527 event
->adt_smf_create_prop
.value
= prop_value
;
3529 case ADT_smf_create_snap
:
3530 event
->adt_smf_create_snap
.auth_used
= auth_used
;
3531 event
->adt_smf_create_snap
.fmri
= fmri
;
3532 event
->adt_smf_create_snap
.name
= data
->ed_snapname
;
3534 case ADT_smf_degrade
:
3535 event
->adt_smf_degrade
.auth_used
= auth_used
;
3536 event
->adt_smf_degrade
.fmri
= fmri
;
3538 case ADT_smf_delete
:
3539 event
->adt_smf_delete
.fmri
= fmri
;
3540 event
->adt_smf_delete
.auth_used
= auth_used
;
3542 case ADT_smf_delete_npg
:
3543 event
->adt_smf_delete_npg
.auth_used
= auth_used
;
3544 event
->adt_smf_delete_npg
.fmri
= fmri
;
3545 event
->adt_smf_delete_npg
.type
= data
->ed_type
;
3547 case ADT_smf_delete_pg
:
3548 event
->adt_smf_delete_pg
.auth_used
= auth_used
;
3549 event
->adt_smf_delete_pg
.fmri
= fmri
;
3550 event
->adt_smf_delete_pg
.type
= data
->ed_type
;
3552 case ADT_smf_delete_prop
:
3553 event
->adt_smf_delete_prop
.auth_used
= auth_used
;
3554 event
->adt_smf_delete_prop
.fmri
= fmri
;
3556 case ADT_smf_delete_snap
:
3557 event
->adt_smf_delete_snap
.auth_used
= auth_used
;
3558 event
->adt_smf_delete_snap
.fmri
= fmri
;
3559 event
->adt_smf_delete_snap
.name
= data
->ed_snapname
;
3561 case ADT_smf_disable
:
3562 event
->adt_smf_disable
.auth_used
= auth_used
;
3563 event
->adt_smf_disable
.fmri
= fmri
;
3565 case ADT_smf_enable
:
3566 event
->adt_smf_enable
.auth_used
= auth_used
;
3567 event
->adt_smf_enable
.fmri
= fmri
;
3569 case ADT_smf_immediate_degrade
:
3570 event
->adt_smf_immediate_degrade
.auth_used
= auth_used
;
3571 event
->adt_smf_immediate_degrade
.fmri
= fmri
;
3573 case ADT_smf_immediate_maintenance
:
3574 event
->adt_smf_immediate_maintenance
.auth_used
= auth_used
;
3575 event
->adt_smf_immediate_maintenance
.fmri
= fmri
;
3577 case ADT_smf_immtmp_maintenance
:
3578 event
->adt_smf_immtmp_maintenance
.auth_used
= auth_used
;
3579 event
->adt_smf_immtmp_maintenance
.fmri
= fmri
;
3581 case ADT_smf_maintenance
:
3582 event
->adt_smf_maintenance
.auth_used
= auth_used
;
3583 event
->adt_smf_maintenance
.fmri
= fmri
;
3585 case ADT_smf_milestone
:
3586 event
->adt_smf_milestone
.auth_used
= auth_used
;
3587 event
->adt_smf_milestone
.fmri
= fmri
;
3589 case ADT_smf_read_prop
:
3590 event
->adt_smf_read_prop
.auth_used
= auth_used
;
3591 event
->adt_smf_read_prop
.fmri
= fmri
;
3593 case ADT_smf_refresh
:
3594 event
->adt_smf_refresh
.auth_used
= auth_used
;
3595 event
->adt_smf_refresh
.fmri
= fmri
;
3597 case ADT_smf_restart
:
3598 event
->adt_smf_restart
.auth_used
= auth_used
;
3599 event
->adt_smf_restart
.fmri
= fmri
;
3601 case ADT_smf_tmp_disable
:
3602 event
->adt_smf_tmp_disable
.auth_used
= auth_used
;
3603 event
->adt_smf_tmp_disable
.fmri
= fmri
;
3605 case ADT_smf_tmp_enable
:
3606 event
->adt_smf_tmp_enable
.auth_used
= auth_used
;
3607 event
->adt_smf_tmp_enable
.fmri
= fmri
;
3609 case ADT_smf_tmp_maintenance
:
3610 event
->adt_smf_tmp_maintenance
.auth_used
= auth_used
;
3611 event
->adt_smf_tmp_maintenance
.fmri
= fmri
;
3614 abort(); /* Need to cover all SMF event IDs */
3617 if (adt_put_event(event
, status
, return_val
) != 0) {
3618 uu_warn("smf_audit_event failed to put event. %s\n",
3621 adt_free_event(event
);
3625 #ifndef NATIVE_BUILD
3627 * Determine if the combination of the property group at pg_name and the
3628 * property at prop_name are in the set of special startd properties. If
3629 * they are, a special audit event will be generated.
3632 special_property_event(audit_event_data_t
*evdp
, const char *prop_name
,
3633 char *pg_name
, int status
, int return_val
, tx_commit_data_t
*tx_data
,
3636 au_event_t event_id
;
3637 audit_special_prop_item_t search_key
;
3638 audit_special_prop_item_t
*found
;
3640 /* Use bsearch to find the special property information. */
3641 search_key
.api_prop_name
= prop_name
;
3642 search_key
.api_pg_name
= pg_name
;
3643 found
= (audit_special_prop_item_t
*)bsearch(&search_key
,
3644 special_props_list
, SPECIAL_PROP_COUNT
,
3645 sizeof (special_props_list
[0]), special_prop_compare
);
3646 if (found
== NULL
) {
3647 /* Not a special property. */
3651 /* Get the event id */
3652 if (found
->api_event_func
== NULL
) {
3653 event_id
= found
->api_event_id
;
3655 if ((*found
->api_event_func
)(tx_data
, cmd_no
,
3656 found
->api_pg_name
, &event_id
) < 0)
3660 /* Generate the event. */
3661 smf_audit_event(event_id
, status
, return_val
, evdp
);
3663 #endif /* NATIVE_BUILD */
3666 * Return a pointer to a string containing all the values of the command
3667 * specified by cmd_no with each value enclosed in quotes. It is up to the
3668 * caller to free the memory at the returned pointer.
3671 generate_value_list(tx_commit_data_t
*tx_data
, size_t cmd_no
)
3674 const char *cur_value
;
3675 size_t byte_count
= 0;
3678 size_t str_size
= 0;
3679 char *values
= NULL
;
3682 if (tx_cmd_nvalues(tx_data
, cmd_no
, &nvalues
) != REP_PROTOCOL_SUCCESS
)
3685 * First determine the size of the buffer that we will need. We
3686 * will represent each property value surrounded by quotes with a
3687 * space separating the values. Thus, we need to find the total
3688 * size of all the value strings and add 3 for each value.
3690 * There is one catch, though. We need to escape any internal
3691 * quote marks in the values. So for each quote in the value we
3692 * need to add another byte to the buffer size.
3694 for (i
= 0; i
< nvalues
; i
++) {
3695 if (tx_cmd_value(tx_data
, cmd_no
, i
, &cur_value
) !=
3696 REP_PROTOCOL_SUCCESS
)
3698 for (cp
= cur_value
; *cp
!= 0; cp
++) {
3699 byte_count
+= (*cp
== '"') ? 2 : 1;
3701 byte_count
+= 3; /* surrounding quotes & space */
3703 byte_count
++; /* nul terminator */
3704 values
= malloc(byte_count
);
3709 /* Now build up the string of values. */
3710 for (i
= 0; i
< nvalues
; i
++) {
3711 if (tx_cmd_value(tx_data
, cmd_no
, i
, &cur_value
) !=
3712 REP_PROTOCOL_SUCCESS
) {
3716 (void) strlcat(values
, "\"", byte_count
);
3717 for (cp
= cur_value
, vp
= values
+ strlen(values
);
3727 str_size
= strlcat(values
, "\" ", byte_count
);
3728 assert(str_size
< byte_count
);
3731 values
[str_size
- 1] = 0; /* get rid of trailing space */
3736 * generate_property_events takes the transaction commit data at tx_data
3737 * and generates an audit event for each command.
3739 * Native builds are done to create svc.configd-native. This program runs
3740 * only on the Solaris build machines to create the seed repository. Thus,
3741 * no audit events should be generated when running svc.configd-native.
3744 generate_property_events(
3745 tx_commit_data_t
*tx_data
,
3746 char *pg_fmri
, /* FMRI of property group */
3751 #ifndef NATIVE_BUILD
3752 enum rep_protocol_transaction_action action
;
3753 audit_event_data_t audit_data
;
3757 au_event_t event_id
;
3758 char fmri
[REP_PROTOCOL_FMRI_LEN
];
3759 char pg_name
[REP_PROTOCOL_NAME_LEN
];
3760 char *pg_end
; /* End of prop. group fmri */
3761 const char *prop_name
;
3764 enum rep_protocol_responseid rc
;
3767 /* Make sure we have something to do. */
3768 if (tx_data
== NULL
)
3770 if ((count
= tx_cmd_count(tx_data
)) == 0)
3773 /* Copy the property group fmri */
3775 pg_end
+= strlcpy(fmri
, pg_fmri
, sizeof (fmri
));
3778 * Get the property group name. It is the first component after
3779 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri.
3781 cp
= strstr(pg_fmri
, SCF_FMRI_PROPERTYGRP_PREFIX
);
3785 cp
+= strlen(SCF_FMRI_PROPERTYGRP_PREFIX
);
3786 (void) strlcpy(pg_name
, cp
, sizeof (pg_name
));
3789 audit_data
.ed_auth
= auth_string
;
3790 audit_data
.ed_fmri
= fmri
;
3791 audit_data
.ed_type
= prop_type
;
3794 * Property type is two characters (see
3795 * rep_protocol_value_type_t), so terminate the string.
3799 for (cmd_no
= 0; cmd_no
< count
; cmd_no
++) {
3800 /* Construct FMRI of the property */
3802 if (tx_cmd_prop(tx_data
, cmd_no
, &prop_name
) !=
3803 REP_PROTOCOL_SUCCESS
) {
3806 rc
= rc_concat_fmri_element(fmri
, sizeof (fmri
), &sz_out
,
3807 prop_name
, REP_PROTOCOL_ENTITY_PROPERTY
);
3808 if (rc
!= REP_PROTOCOL_SUCCESS
) {
3810 * If we can't get the FMRI, we'll abandon this
3816 /* Generate special property event if necessary. */
3817 special_property_event(&audit_data
, prop_name
, pg_name
,
3818 auth_status
, auth_ret_value
, tx_data
, cmd_no
);
3820 /* Capture rest of audit data. */
3821 if (tx_cmd_prop_type(tx_data
, cmd_no
, &ptype
) !=
3822 REP_PROTOCOL_SUCCESS
) {
3825 prop_type
[0] = REP_PROTOCOL_BASE_TYPE(ptype
);
3826 prop_type
[1] = REP_PROTOCOL_SUBTYPE(ptype
);
3827 audit_data
.ed_prop_value
= generate_value_list(tx_data
, cmd_no
);
3829 /* Determine the event type. */
3830 if (tx_cmd_action(tx_data
, cmd_no
, &action
) !=
3831 REP_PROTOCOL_SUCCESS
) {
3832 free(audit_data
.ed_prop_value
);
3836 case REP_PROTOCOL_TX_ENTRY_NEW
:
3837 event_id
= ADT_smf_create_prop
;
3839 case REP_PROTOCOL_TX_ENTRY_CLEAR
:
3840 event_id
= ADT_smf_change_prop
;
3842 case REP_PROTOCOL_TX_ENTRY_REPLACE
:
3843 event_id
= ADT_smf_change_prop
;
3845 case REP_PROTOCOL_TX_ENTRY_DELETE
:
3846 event_id
= ADT_smf_delete_prop
;
3849 assert(0); /* Missing a case */
3850 free(audit_data
.ed_prop_value
);
3854 /* Generate the event. */
3855 smf_audit_event(event_id
, auth_status
, auth_ret_value
,
3857 free(audit_data
.ed_prop_value
);
3859 #endif /* NATIVE_BUILD */
3864 * _DELETED - node has been deleted
3865 * _NOT_SET - npp is reset
3866 * _NOT_APPLICABLE - type is _PROPERTYGRP
3867 * _INVALID_TYPE - node is corrupt or type is invalid
3868 * _TYPE_MISMATCH - node cannot have children of type type
3869 * _BAD_REQUEST - name is invalid
3870 * cannot create children for this type of node
3871 * _NO_RESOURCES - out of memory, or could not allocate new id
3872 * _PERMISSION_DENIED
3875 * _EXISTS - child already exists
3876 * _TRUNCATED - truncated FMRI for the audit record
3879 rc_node_create_child(rc_node_ptr_t
*npp
, uint32_t type
, const char *name
,
3883 rc_node_t
*cp
= NULL
;
3885 perm_status_t perm_rc
;
3887 char fmri
[REP_PROTOCOL_FMRI_LEN
];
3888 audit_event_data_t audit_data
;
3890 rc_node_clear(cpp
, 0);
3893 * rc_node_modify_permission_check() must be called before the node
3894 * is locked. This is because the library functions that check
3895 * authorizations can trigger calls back into configd.
3897 perm_rc
= rc_node_modify_permission_check(&audit_data
.ed_auth
);
3901 * We continue in this case, so that an audit event can be
3902 * generated later in the function.
3908 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED
);
3910 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
3912 bad_error(rc_node_modify_permission_check
, perm_rc
);
3915 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np
, npp
, audit_data
.ed_auth
);
3917 audit_data
.ed_fmri
= fmri
;
3920 * there is a separate interface for creating property groups
3922 if (type
== REP_PROTOCOL_ENTITY_PROPERTYGRP
) {
3923 (void) pthread_mutex_unlock(&np
->rn_lock
);
3924 free(audit_data
.ed_auth
);
3925 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
3928 if (np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_CPROPERTYGRP
) {
3929 (void) pthread_mutex_unlock(&np
->rn_lock
);
3930 np
= np
->rn_cchain
[0];
3931 if ((rc
= rc_node_check_and_lock(np
)) != REP_PROTOCOL_SUCCESS
) {
3932 free(audit_data
.ed_auth
);
3937 if ((rc
= rc_check_parent_child(np
->rn_id
.rl_type
, type
)) !=
3938 REP_PROTOCOL_SUCCESS
) {
3939 (void) pthread_mutex_unlock(&np
->rn_lock
);
3940 free(audit_data
.ed_auth
);
3943 if ((rc
= rc_check_type_name(type
, name
)) != REP_PROTOCOL_SUCCESS
) {
3944 (void) pthread_mutex_unlock(&np
->rn_lock
);
3945 free(audit_data
.ed_auth
);
3949 if ((rc
= rc_get_fmri_and_concat(np
, fmri
, sizeof (fmri
), &sz_out
,
3950 name
, type
)) != REP_PROTOCOL_SUCCESS
) {
3951 (void) pthread_mutex_unlock(&np
->rn_lock
);
3952 free(audit_data
.ed_auth
);
3955 if (perm_rc
== PERM_DENIED
) {
3956 (void) pthread_mutex_unlock(&np
->rn_lock
);
3957 smf_audit_event(ADT_smf_create
, ADT_FAILURE
,
3958 ADT_FAIL_VALUE_AUTH
, &audit_data
);
3959 free(audit_data
.ed_auth
);
3960 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED
);
3963 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np
, npp
, RC_NODE_CREATING_CHILD
,
3964 audit_data
.ed_auth
);
3965 (void) pthread_mutex_unlock(&np
->rn_lock
);
3967 rc
= object_create(np
, type
, name
, &cp
);
3968 assert(rc
!= REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
3970 if (rc
== REP_PROTOCOL_SUCCESS
) {
3971 rc_node_assign(cpp
, cp
);
3975 (void) pthread_mutex_lock(&np
->rn_lock
);
3976 rc_node_rele_flag(np
, RC_NODE_CREATING_CHILD
);
3977 (void) pthread_mutex_unlock(&np
->rn_lock
);
3979 if (rc
== REP_PROTOCOL_SUCCESS
) {
3980 smf_audit_event(ADT_smf_create
, ADT_SUCCESS
, ADT_SUCCESS
,
3984 free(audit_data
.ed_auth
);
3990 rc_node_create_child_pg(rc_node_ptr_t
*npp
, uint32_t type
, const char *name
,
3991 const char *pgtype
, uint32_t flags
, rc_node_ptr_t
*cpp
)
3997 perm_status_t granted
;
3998 char fmri
[REP_PROTOCOL_FMRI_LEN
];
3999 audit_event_data_t audit_data
;
4000 au_event_t event_id
;
4003 audit_data
.ed_auth
= NULL
;
4004 audit_data
.ed_fmri
= fmri
;
4005 audit_data
.ed_type
= (char *)pgtype
;
4007 rc_node_clear(cpp
, 0);
4009 /* verify flags is valid */
4010 if (flags
& ~SCF_PG_FLAG_NONPERSISTENT
)
4011 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
4013 RC_NODE_PTR_GET_CHECK_AND_HOLD(np
, npp
);
4015 if (type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
) {
4017 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
4020 if ((rc
= rc_check_parent_child(np
->rn_id
.rl_type
, type
)) !=
4021 REP_PROTOCOL_SUCCESS
) {
4025 if ((rc
= rc_check_type_name(type
, name
)) != REP_PROTOCOL_SUCCESS
||
4026 (rc
= rc_check_pgtype_name(pgtype
)) != REP_PROTOCOL_SUCCESS
) {
4032 if (!client_is_privileged()) {
4033 rc
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
4036 if (flags
& SCF_PG_FLAG_NONPERSISTENT
) {
4037 event_id
= ADT_smf_create_npg
;
4039 event_id
= ADT_smf_create_pg
;
4041 if ((rc
= rc_get_fmri_and_concat(np
, fmri
, sizeof (fmri
), &sz_out
,
4042 name
, REP_PROTOCOL_ENTITY_PROPERTYGRP
)) != REP_PROTOCOL_SUCCESS
) {
4047 if (is_main_repository
) {
4048 /* Must have .smf.modify or smf.modify.<type> authorization */
4051 rc
= perm_add_enabling(pcp
, AUTH_MODIFY
);
4053 if (rc
== REP_PROTOCOL_SUCCESS
) {
4054 const char * const auth
=
4055 perm_auth_for_pgtype(pgtype
);
4058 rc
= perm_add_enabling(pcp
, auth
);
4062 * .manage or $action_authorization can be used to
4063 * create the actions pg and the general_ovr pg.
4065 if (rc
== REP_PROTOCOL_SUCCESS
&&
4066 (flags
& SCF_PG_FLAG_NONPERSISTENT
) != 0 &&
4067 np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_INSTANCE
&&
4068 ((strcmp(name
, AUTH_PG_ACTIONS
) == 0 &&
4069 strcmp(pgtype
, AUTH_PG_ACTIONS_TYPE
) == 0) ||
4070 (strcmp(name
, AUTH_PG_GENERAL_OVR
) == 0 &&
4071 strcmp(pgtype
, AUTH_PG_GENERAL_OVR_TYPE
) == 0))) {
4072 rc
= perm_add_enabling(pcp
, AUTH_MANAGE
);
4074 if (rc
== REP_PROTOCOL_SUCCESS
)
4075 rc
= perm_add_inst_action_auth(pcp
, np
);
4078 if (rc
== REP_PROTOCOL_SUCCESS
) {
4079 granted
= perm_granted(pcp
);
4081 rc
= map_granted_status(granted
, pcp
,
4082 &audit_data
.ed_auth
);
4083 if (granted
== PERM_GONE
) {
4084 /* No auditing if client gone. */
4093 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
4097 rc
= REP_PROTOCOL_SUCCESS
;
4099 #endif /* NATIVE_BUILD */
4102 if (rc
!= REP_PROTOCOL_SUCCESS
) {
4104 if (rc
!= REP_PROTOCOL_FAIL_NO_RESOURCES
) {
4105 smf_audit_event(event_id
, ADT_FAILURE
,
4106 ADT_FAIL_VALUE_AUTH
, &audit_data
);
4108 if (audit_data
.ed_auth
!= NULL
)
4109 free(audit_data
.ed_auth
);
4113 (void) pthread_mutex_lock(&np
->rn_lock
);
4114 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np
, npp
, RC_NODE_CREATING_CHILD
,
4115 audit_data
.ed_auth
);
4116 (void) pthread_mutex_unlock(&np
->rn_lock
);
4118 rc
= object_create_pg(np
, type
, name
, pgtype
, flags
, &cp
);
4120 if (rc
== REP_PROTOCOL_SUCCESS
) {
4121 rc_node_assign(cpp
, cp
);
4125 (void) pthread_mutex_lock(&np
->rn_lock
);
4126 rc_node_rele_flag(np
, RC_NODE_CREATING_CHILD
);
4127 (void) pthread_mutex_unlock(&np
->rn_lock
);
4129 if (rc
== REP_PROTOCOL_SUCCESS
) {
4130 smf_audit_event(event_id
, ADT_SUCCESS
, ADT_SUCCESS
,
4133 if (audit_data
.ed_auth
!= NULL
)
4134 free(audit_data
.ed_auth
);
4140 rc_pg_notify_fire(rc_node_pg_notify_t
*pnp
)
4142 assert(MUTEX_HELD(&rc_pg_notify_lock
));
4144 if (pnp
->rnpn_pg
!= NULL
) {
4145 uu_list_remove(pnp
->rnpn_pg
->rn_pg_notify_list
, pnp
);
4146 (void) close(pnp
->rnpn_fd
);
4148 pnp
->rnpn_pg
= NULL
;
4151 assert(pnp
->rnpn_fd
== -1);
4156 rc_notify_node_delete(rc_notify_delete_t
*ndp
, rc_node_t
*np_arg
)
4158 rc_node_t
*svc
= NULL
;
4159 rc_node_t
*inst
= NULL
;
4160 rc_node_t
*pg
= NULL
;
4161 rc_node_t
*np
= np_arg
;
4164 while (svc
== NULL
) {
4165 (void) pthread_mutex_lock(&np
->rn_lock
);
4166 if (!rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
4167 (void) pthread_mutex_unlock(&np
->rn_lock
);
4170 nnp
= np
->rn_parent
;
4171 rc_node_hold_locked(np
); /* hold it in place */
4173 switch (np
->rn_id
.rl_type
) {
4174 case REP_PROTOCOL_ENTITY_PROPERTYGRP
:
4178 case REP_PROTOCOL_ENTITY_INSTANCE
:
4179 assert(inst
== NULL
);
4182 case REP_PROTOCOL_ENTITY_SERVICE
:
4183 assert(svc
== NULL
);
4187 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
4188 rc_node_rele_locked(np
);
4192 (void) pthread_mutex_unlock(&np
->rn_lock
);
4199 rc_notify_deletion(ndp
,
4201 inst
!= NULL
? inst
->rn_name
: NULL
,
4202 pg
!= NULL
? pg
->rn_name
: NULL
);
4214 } else if (inst
!= NULL
) {
4217 } else if (pg
!= NULL
) {
4223 (void) pthread_mutex_lock(&np
->rn_lock
);
4224 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
4225 rc_node_rele_locked(np
);
4230 * Hold RC_NODE_DYING_FLAGS on np's descendents. If andformer is true, do
4231 * the same down the rn_former chain.
4234 rc_node_delete_hold(rc_node_t
*np
, int andformer
)
4239 assert(MUTEX_HELD(&np
->rn_lock
));
4240 assert((np
->rn_flags
& RC_NODE_DYING_FLAGS
) == RC_NODE_DYING_FLAGS
);
4242 for (cp
= uu_list_first(np
->rn_children
); cp
!= NULL
;
4243 cp
= uu_list_next(np
->rn_children
, cp
)) {
4244 (void) pthread_mutex_lock(&cp
->rn_lock
);
4245 (void) pthread_mutex_unlock(&np
->rn_lock
);
4246 if (!rc_node_hold_flag(cp
, RC_NODE_DYING_FLAGS
)) {
4248 * already marked as dead -- can't happen, since that
4249 * would require setting RC_NODE_CHILDREN_CHANGING
4250 * in np, and we're holding that...
4254 rc_node_delete_hold(cp
, andformer
); /* recurse, drop lock */
4256 (void) pthread_mutex_lock(&np
->rn_lock
);
4258 if (andformer
&& (cp
= np
->rn_former
) != NULL
) {
4259 (void) pthread_mutex_lock(&cp
->rn_lock
);
4260 (void) pthread_mutex_unlock(&np
->rn_lock
);
4261 if (!rc_node_hold_flag(cp
, RC_NODE_DYING_FLAGS
))
4262 abort(); /* can't happen, see above */
4264 goto again
; /* tail-recurse down rn_former */
4266 (void) pthread_mutex_unlock(&np
->rn_lock
);
4270 * N.B.: this function drops np->rn_lock on the way out.
4273 rc_node_delete_rele(rc_node_t
*np
, int andformer
)
4278 assert(MUTEX_HELD(&np
->rn_lock
));
4279 assert((np
->rn_flags
& RC_NODE_DYING_FLAGS
) == RC_NODE_DYING_FLAGS
);
4281 for (cp
= uu_list_first(np
->rn_children
); cp
!= NULL
;
4282 cp
= uu_list_next(np
->rn_children
, cp
)) {
4283 (void) pthread_mutex_lock(&cp
->rn_lock
);
4284 (void) pthread_mutex_unlock(&np
->rn_lock
);
4285 rc_node_delete_rele(cp
, andformer
); /* recurse, drop lock */
4286 (void) pthread_mutex_lock(&np
->rn_lock
);
4288 if (andformer
&& (cp
= np
->rn_former
) != NULL
) {
4289 (void) pthread_mutex_lock(&cp
->rn_lock
);
4290 rc_node_rele_flag(np
, RC_NODE_DYING_FLAGS
);
4291 (void) pthread_mutex_unlock(&np
->rn_lock
);
4294 goto again
; /* tail-recurse down rn_former */
4296 rc_node_rele_flag(np
, RC_NODE_DYING_FLAGS
);
4297 (void) pthread_mutex_unlock(&np
->rn_lock
);
4301 rc_node_finish_delete(rc_node_t
*cp
)
4304 rc_node_pg_notify_t
*pnp
;
4306 assert(MUTEX_HELD(&cp
->rn_lock
));
4308 if (!(cp
->rn_flags
& RC_NODE_OLD
)) {
4309 assert(cp
->rn_flags
& RC_NODE_IN_PARENT
);
4310 if (!rc_node_wait_flag(cp
, RC_NODE_USING_PARENT
)) {
4311 abort(); /* can't happen, see above */
4313 cp
->rn_flags
&= ~RC_NODE_IN_PARENT
;
4314 cp
->rn_parent
= NULL
;
4315 rc_node_free_fmri(cp
);
4318 cp
->rn_flags
|= RC_NODE_DEAD
;
4321 * If this node is not out-dated, we need to remove it from
4322 * the notify list and cache hash table.
4324 if (!(cp
->rn_flags
& RC_NODE_OLD
)) {
4325 assert(cp
->rn_refs
> 0); /* can't go away yet */
4326 (void) pthread_mutex_unlock(&cp
->rn_lock
);
4328 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
4329 while ((pnp
= uu_list_first(cp
->rn_pg_notify_list
)) != NULL
)
4330 rc_pg_notify_fire(pnp
);
4331 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
4332 rc_notify_remove_node(cp
);
4334 bp
= cache_hold(cp
->rn_hash
);
4335 (void) pthread_mutex_lock(&cp
->rn_lock
);
4336 cache_remove_unlocked(bp
, cp
);
4342 * For each child, call rc_node_finish_delete() and recurse. If andformer
4343 * is set, also recurse down rn_former. Finally release np, which might
4347 rc_node_delete_children(rc_node_t
*np
, int andformer
)
4352 assert(np
->rn_refs
> 0);
4353 assert(MUTEX_HELD(&np
->rn_lock
));
4354 assert(np
->rn_flags
& RC_NODE_DEAD
);
4356 while ((cp
= uu_list_first(np
->rn_children
)) != NULL
) {
4357 uu_list_remove(np
->rn_children
, cp
);
4358 (void) pthread_mutex_lock(&cp
->rn_lock
);
4359 (void) pthread_mutex_unlock(&np
->rn_lock
);
4360 rc_node_hold_locked(cp
); /* hold while we recurse */
4361 rc_node_finish_delete(cp
);
4362 rc_node_delete_children(cp
, andformer
); /* drops lock + ref */
4363 (void) pthread_mutex_lock(&np
->rn_lock
);
4367 * When we drop cp's lock, all the children will be gone, so we
4368 * can release DYING_FLAGS.
4370 rc_node_rele_flag(np
, RC_NODE_DYING_FLAGS
);
4371 if (andformer
&& (cp
= np
->rn_former
) != NULL
) {
4372 np
->rn_former
= NULL
; /* unlink */
4373 (void) pthread_mutex_lock(&cp
->rn_lock
);
4376 * Register the ephemeral reference created by reading
4377 * np->rn_former into cp. Note that the persistent
4378 * reference (np->rn_former) is locked because we haven't
4379 * dropped np's lock since we dropped its RC_NODE_IN_TX
4380 * (via RC_NODE_DYING_FLAGS).
4382 rc_node_hold_ephemeral_locked(cp
);
4384 (void) pthread_mutex_unlock(&np
->rn_lock
);
4385 cp
->rn_flags
&= ~RC_NODE_ON_FORMER
;
4387 rc_node_hold_locked(cp
); /* hold while we loop */
4389 rc_node_finish_delete(cp
);
4391 rc_node_rele(np
); /* drop the old reference */
4394 goto again
; /* tail-recurse down rn_former */
4396 rc_node_rele_locked(np
);
4400 * The last client or child reference to np, which must be either
4401 * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed. We'll destroy any
4402 * remaining references (e.g., rn_former) and call rc_node_destroy() to
4406 rc_node_no_client_refs(rc_node_t
*np
)
4409 rc_node_t
*current
, *cur
;
4411 assert(MUTEX_HELD(&np
->rn_lock
));
4412 assert(np
->rn_refs
== 0);
4413 assert(np
->rn_other_refs
== 0);
4414 assert(np
->rn_other_refs_held
== 0);
4416 if (np
->rn_flags
& RC_NODE_DEAD
) {
4418 * The node is DEAD, so the deletion code should have
4419 * destroyed all rn_children or rn_former references.
4420 * Since the last client or child reference has been
4421 * destroyed, we're free to destroy np. Unless another
4422 * thread has an ephemeral reference, in which case we'll
4425 if (np
->rn_erefs
> 1) {
4431 (void) pthread_mutex_unlock(&np
->rn_lock
);
4432 rc_node_destroy(np
);
4436 /* We only collect DEAD and OLD nodes, thank you. */
4437 assert(np
->rn_flags
& RC_NODE_OLD
);
4440 * RC_NODE_UNREFED keeps multiple threads from processing OLD
4441 * nodes. But it's vulnerable to unfriendly scheduling, so full
4442 * use of rn_erefs should supersede it someday.
4444 if (np
->rn_flags
& RC_NODE_UNREFED
) {
4445 (void) pthread_mutex_unlock(&np
->rn_lock
);
4448 np
->rn_flags
|= RC_NODE_UNREFED
;
4451 * Now we'll remove the node from the rn_former chain and take its
4456 * Since this node is OLD, it should be on an rn_former chain. To
4457 * remove it, we must find the current in-hash object and grab its
4458 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
4461 (void) pthread_mutex_unlock(&np
->rn_lock
);
4464 current
= cache_lookup(&np
->rn_id
);
4466 if (current
== NULL
) {
4467 (void) pthread_mutex_lock(&np
->rn_lock
);
4469 if (np
->rn_flags
& RC_NODE_DEAD
)
4473 * We are trying to unreference this node, but the
4474 * owner of the former list does not exist. It must
4475 * be the case that another thread is deleting this
4476 * entire sub-branch, but has not yet reached us.
4477 * We will in short order be deleted.
4479 np
->rn_flags
&= ~RC_NODE_UNREFED
;
4480 (void) pthread_mutex_unlock(&np
->rn_lock
);
4484 if (current
== np
) {
4486 * no longer unreferenced
4488 (void) pthread_mutex_lock(&np
->rn_lock
);
4489 np
->rn_flags
&= ~RC_NODE_UNREFED
;
4490 /* held in cache_lookup() */
4491 rc_node_rele_locked(np
);
4495 (void) pthread_mutex_lock(¤t
->rn_lock
);
4496 if (current
->rn_flags
& RC_NODE_OLD
) {
4498 * current has been replaced since we looked it
4501 /* held in cache_lookup() */
4502 rc_node_rele_locked(current
);
4506 if (!rc_node_hold_flag(current
, RC_NODE_IN_TX
)) {
4508 * current has been deleted since we looked it up. Try
4511 /* held in cache_lookup() */
4512 rc_node_rele_locked(current
);
4517 * rc_node_hold_flag() might have dropped current's lock, so
4520 if (!(current
->rn_flags
& RC_NODE_OLD
)) {
4521 /* Not old. Stop looping. */
4522 (void) pthread_mutex_unlock(¤t
->rn_lock
);
4526 rc_node_rele_flag(current
, RC_NODE_IN_TX
);
4527 rc_node_rele_locked(current
);
4530 /* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
4531 (void) pthread_mutex_lock(&np
->rn_lock
);
4534 * While we didn't have the lock, a thread may have added
4535 * a reference or changed the flags.
4537 if (!(np
->rn_flags
& (RC_NODE_OLD
| RC_NODE_DEAD
)) ||
4538 np
->rn_refs
!= 0 || np
->rn_other_refs
!= 0 ||
4539 np
->rn_other_refs_held
!= 0) {
4540 np
->rn_flags
&= ~RC_NODE_UNREFED
;
4542 (void) pthread_mutex_lock(¤t
->rn_lock
);
4543 rc_node_rele_flag(current
, RC_NODE_IN_TX
);
4544 /* held by cache_lookup() */
4545 rc_node_rele_locked(current
);
4549 if (!rc_node_hold_flag(np
, RC_NODE_DYING_FLAGS
)) {
4551 * Someone deleted the node while we were waiting for
4552 * DYING_FLAGS. Undo the modifications to current.
4554 (void) pthread_mutex_unlock(&np
->rn_lock
);
4556 rc_node_rele_flag(current
, RC_NODE_IN_TX
);
4557 /* held by cache_lookup() */
4558 rc_node_rele_locked(current
);
4560 (void) pthread_mutex_lock(&np
->rn_lock
);
4564 /* Take RC_NODE_DYING_FLAGS on np's descendents. */
4565 rc_node_delete_hold(np
, 0); /* drops np->rn_lock */
4567 /* Mark np DEAD. This requires the lock. */
4568 (void) pthread_mutex_lock(&np
->rn_lock
);
4570 /* Recheck for new references. */
4571 if (!(np
->rn_flags
& RC_NODE_OLD
) ||
4572 np
->rn_refs
!= 0 || np
->rn_other_refs
!= 0 ||
4573 np
->rn_other_refs_held
!= 0) {
4574 np
->rn_flags
&= ~RC_NODE_UNREFED
;
4575 rc_node_delete_rele(np
, 0); /* drops np's lock */
4577 (void) pthread_mutex_lock(¤t
->rn_lock
);
4578 rc_node_rele_flag(current
, RC_NODE_IN_TX
);
4579 /* held by cache_lookup() */
4580 rc_node_rele_locked(current
);
4584 np
->rn_flags
|= RC_NODE_DEAD
;
4587 * Delete the children. This calls rc_node_rele_locked() on np at
4588 * the end, so add a reference to keep the count from going
4589 * negative. It will recurse with RC_NODE_DEAD set, so we'll call
4590 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
4591 * shouldn't actually free() np.
4593 rc_node_hold_locked(np
);
4594 rc_node_delete_children(np
, 0); /* unlocks np */
4596 /* Remove np from current's rn_former chain. */
4597 (void) pthread_mutex_lock(¤t
->rn_lock
);
4598 for (cur
= current
; cur
!= NULL
&& cur
->rn_former
!= np
;
4599 cur
= cur
->rn_former
)
4601 assert(cur
!= NULL
&& cur
!= np
);
4603 cur
->rn_former
= np
->rn_former
;
4604 np
->rn_former
= NULL
;
4606 rc_node_rele_flag(current
, RC_NODE_IN_TX
);
4607 /* held by cache_lookup() */
4608 rc_node_rele_locked(current
);
4610 /* Clear ON_FORMER and UNREFED, and destroy. */
4611 (void) pthread_mutex_lock(&np
->rn_lock
);
4612 assert(np
->rn_flags
& RC_NODE_ON_FORMER
);
4613 np
->rn_flags
&= ~(RC_NODE_UNREFED
| RC_NODE_ON_FORMER
);
4615 if (np
->rn_erefs
> 1) {
4616 /* Still referenced. Stay execution. */
4622 (void) pthread_mutex_unlock(&np
->rn_lock
);
4623 rc_node_destroy(np
);
4628 * Another thread marked np DEAD. If there still aren't any
4629 * persistent references, destroy the node.
4631 np
->rn_flags
&= ~RC_NODE_UNREFED
;
4633 unrefed
= (np
->rn_refs
== 0 && np
->rn_other_refs
== 0 &&
4634 np
->rn_other_refs_held
== 0);
4636 if (np
->rn_erefs
> 0)
4639 if (unrefed
&& np
->rn_erefs
> 0) {
4644 (void) pthread_mutex_unlock(&np
->rn_lock
);
4647 rc_node_destroy(np
);
4651 get_delete_event_id(rep_protocol_entity_t entity
, uint32_t pgflags
)
4655 #ifndef NATIVE_BUILD
4657 case REP_PROTOCOL_ENTITY_SERVICE
:
4658 case REP_PROTOCOL_ENTITY_INSTANCE
:
4659 id
= ADT_smf_delete
;
4661 case REP_PROTOCOL_ENTITY_SNAPSHOT
:
4662 id
= ADT_smf_delete_snap
;
4664 case REP_PROTOCOL_ENTITY_PROPERTYGRP
:
4665 case REP_PROTOCOL_ENTITY_CPROPERTYGRP
:
4666 if (pgflags
& SCF_PG_FLAG_NONPERSISTENT
) {
4667 id
= ADT_smf_delete_npg
;
4669 id
= ADT_smf_delete_pg
;
4675 #endif /* NATIVE_BUILD */
4684 * _PERMISSION_DENIED
4687 * and whatever object_delete() fails with.
4690 rc_node_delete(rc_node_ptr_t
*npp
)
4692 rc_node_t
*np
, *np_orig
;
4693 rc_node_t
*pp
= NULL
;
4695 rc_node_pg_notify_t
*pnp
;
4697 rc_notify_delete_t
*ndp
;
4700 au_event_t event_id
= 0;
4702 audit_event_data_t audit_data
;
4703 int audit_failure
= 0;
4705 RC_NODE_PTR_GET_CHECK_AND_LOCK(np
, npp
);
4707 audit_data
.ed_fmri
= NULL
;
4708 audit_data
.ed_auth
= NULL
;
4709 audit_data
.ed_snapname
= NULL
;
4710 audit_data
.ed_type
= NULL
;
4712 switch (np
->rn_id
.rl_type
) {
4713 case REP_PROTOCOL_ENTITY_SERVICE
:
4714 event_id
= get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE
,
4717 case REP_PROTOCOL_ENTITY_INSTANCE
:
4718 event_id
= get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE
,
4721 case REP_PROTOCOL_ENTITY_SNAPSHOT
:
4722 event_id
= get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT
,
4724 audit_data
.ed_snapname
= strdup(np
->rn_name
);
4725 if (audit_data
.ed_snapname
== NULL
) {
4726 (void) pthread_mutex_unlock(&np
->rn_lock
);
4727 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
4729 break; /* deletable */
4731 case REP_PROTOCOL_ENTITY_SCOPE
:
4732 case REP_PROTOCOL_ENTITY_SNAPLEVEL
:
4733 /* Scopes and snaplevels are indelible. */
4734 (void) pthread_mutex_unlock(&np
->rn_lock
);
4735 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
4737 case REP_PROTOCOL_ENTITY_CPROPERTYGRP
:
4738 (void) pthread_mutex_unlock(&np
->rn_lock
);
4739 np
= np
->rn_cchain
[0];
4740 RC_NODE_CHECK_AND_LOCK(np
);
4741 event_id
= get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP
,
4745 case REP_PROTOCOL_ENTITY_PROPERTYGRP
:
4746 if (np
->rn_id
.rl_ids
[ID_SNAPSHOT
] == 0) {
4748 get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP
,
4750 audit_data
.ed_type
= strdup(np
->rn_type
);
4751 if (audit_data
.ed_type
== NULL
) {
4752 (void) pthread_mutex_unlock(&np
->rn_lock
);
4753 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
4758 /* Snapshot property groups are indelible. */
4759 (void) pthread_mutex_unlock(&np
->rn_lock
);
4760 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED
);
4762 case REP_PROTOCOL_ENTITY_PROPERTY
:
4763 (void) pthread_mutex_unlock(&np
->rn_lock
);
4764 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
4772 audit_data
.ed_fmri
= malloc(REP_PROTOCOL_FMRI_LEN
);
4773 if (audit_data
.ed_fmri
== NULL
) {
4774 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
4778 rc_node_hold_locked(np
); /* simplifies rest of the code */
4782 * The following loop is to deal with the fact that snapshots and
4783 * property groups are moving targets -- changes to them result
4784 * in a new "child" node. Since we can only delete from the top node,
4785 * we have to loop until we have a non-RC_NODE_OLD version.
4788 if (!rc_node_wait_flag(np
,
4789 RC_NODE_IN_TX
| RC_NODE_USING_PARENT
)) {
4790 rc_node_rele_locked(np
);
4791 rc
= REP_PROTOCOL_FAIL_DELETED
;
4795 if (np
->rn_flags
& RC_NODE_OLD
) {
4796 rc_node_rele_locked(np
);
4797 np
= cache_lookup(&np_orig
->rn_id
);
4798 assert(np
!= np_orig
);
4801 rc
= REP_PROTOCOL_FAIL_DELETED
;
4804 (void) pthread_mutex_lock(&np
->rn_lock
);
4808 if (!rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
4809 rc_node_rele_locked(np
);
4810 rc_node_clear(npp
, 1);
4811 rc
= REP_PROTOCOL_FAIL_DELETED
;
4815 * Mark our parent as children changing. this call drops our
4816 * lock and the RC_NODE_USING_PARENT flag, and returns with
4819 pp
= rc_node_hold_parent_flag(np
, RC_NODE_CHILDREN_CHANGING
);
4821 /* our parent is gone, we're going next... */
4824 rc_node_clear(npp
, 1);
4825 rc
= REP_PROTOCOL_FAIL_DELETED
;
4829 rc_node_hold_locked(pp
); /* hold for later */
4830 (void) pthread_mutex_unlock(&pp
->rn_lock
);
4832 (void) pthread_mutex_lock(&np
->rn_lock
);
4833 if (!(np
->rn_flags
& RC_NODE_OLD
))
4834 break; /* not old -- we're done */
4836 (void) pthread_mutex_unlock(&np
->rn_lock
);
4837 (void) pthread_mutex_lock(&pp
->rn_lock
);
4838 rc_node_rele_flag(pp
, RC_NODE_CHILDREN_CHANGING
);
4839 rc_node_rele_locked(pp
);
4840 (void) pthread_mutex_lock(&np
->rn_lock
);
4841 continue; /* loop around and try again */
4844 * Everyone out of the pool -- we grab everything but
4845 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
4846 * any changes from occurring while we are attempting to
4849 if (!rc_node_hold_flag(np
, RC_NODE_DYING_FLAGS
)) {
4850 (void) pthread_mutex_unlock(&np
->rn_lock
);
4851 rc
= REP_PROTOCOL_FAIL_DELETED
;
4855 assert(!(np
->rn_flags
& RC_NODE_OLD
));
4857 if ((rc
= rc_node_get_fmri_or_fragment(np
, audit_data
.ed_fmri
,
4858 REP_PROTOCOL_FMRI_LEN
, &sz_out
)) != REP_PROTOCOL_SUCCESS
) {
4859 rc_node_rele_flag(np
, RC_NODE_DYING_FLAGS
);
4860 (void) pthread_mutex_unlock(&np
->rn_lock
);
4865 if (!client_is_privileged()) {
4866 rc
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
4869 if (is_main_repository
) {
4870 /* permission check */
4871 (void) pthread_mutex_unlock(&np
->rn_lock
);
4874 rc
= perm_add_enabling(pcp
, AUTH_MODIFY
);
4876 /* add .smf.modify.<type> for pgs. */
4877 if (rc
== REP_PROTOCOL_SUCCESS
&& np
->rn_id
.rl_type
==
4878 REP_PROTOCOL_ENTITY_PROPERTYGRP
) {
4879 const char * const auth
=
4880 perm_auth_for_pgtype(np
->rn_type
);
4883 rc
= perm_add_enabling(pcp
, auth
);
4886 if (rc
== REP_PROTOCOL_SUCCESS
) {
4887 granted
= perm_granted(pcp
);
4889 rc
= map_granted_status(granted
, pcp
,
4890 &audit_data
.ed_auth
);
4891 if (granted
== PERM_GONE
) {
4892 /* No need to audit if client gone. */
4894 rc_node_rele_flag(np
,
4895 RC_NODE_DYING_FLAGS
);
4898 if (granted
== PERM_DENIED
)
4904 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
4907 (void) pthread_mutex_lock(&np
->rn_lock
);
4909 rc
= REP_PROTOCOL_SUCCESS
;
4911 #endif /* NATIVE_BUILD */
4913 if (rc
!= REP_PROTOCOL_SUCCESS
) {
4914 rc_node_rele_flag(np
, RC_NODE_DYING_FLAGS
);
4915 (void) pthread_mutex_unlock(&np
->rn_lock
);
4919 ndp
= uu_zalloc(sizeof (*ndp
));
4921 rc_node_rele_flag(np
, RC_NODE_DYING_FLAGS
);
4922 (void) pthread_mutex_unlock(&np
->rn_lock
);
4923 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
4927 rc_node_delete_hold(np
, 1); /* hold entire subgraph, drop lock */
4929 rc
= object_delete(np
);
4931 if (rc
!= REP_PROTOCOL_SUCCESS
) {
4932 (void) pthread_mutex_lock(&np
->rn_lock
);
4933 rc_node_delete_rele(np
, 1); /* drops lock */
4939 * Now, delicately unlink and delete the object.
4941 * Create the delete notification, atomically remove
4942 * from the hash table and set the NODE_DEAD flag, and
4943 * remove from the parent's children list.
4945 rc_notify_node_delete(ndp
, np
); /* frees or uses ndp */
4947 bp
= cache_hold(np
->rn_hash
);
4949 (void) pthread_mutex_lock(&np
->rn_lock
);
4950 cache_remove_unlocked(bp
, np
);
4953 np
->rn_flags
|= RC_NODE_DEAD
;
4957 * Remove from pp's rn_children. This requires pp's lock,
4958 * so we must drop np's lock to respect lock order.
4960 (void) pthread_mutex_unlock(&np
->rn_lock
);
4961 (void) pthread_mutex_lock(&pp
->rn_lock
);
4962 (void) pthread_mutex_lock(&np
->rn_lock
);
4964 uu_list_remove(pp
->rn_children
, np
);
4966 rc_node_rele_flag(pp
, RC_NODE_CHILDREN_CHANGING
);
4968 (void) pthread_mutex_unlock(&pp
->rn_lock
);
4970 np
->rn_flags
&= ~RC_NODE_IN_PARENT
;
4974 * finally, propagate death to our children (including marking
4975 * them DEAD), handle notifications, and release our hold.
4977 rc_node_hold_locked(np
); /* hold for delete */
4978 rc_node_delete_children(np
, 1); /* drops DYING_FLAGS, lock, ref */
4980 rc_node_clear(npp
, 1);
4982 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
4983 while ((pnp
= uu_list_first(np
->rn_pg_notify_list
)) != NULL
)
4984 rc_pg_notify_fire(pnp
);
4985 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
4986 rc_notify_remove_node(np
);
4990 smf_audit_event(event_id
, ADT_SUCCESS
, ADT_SUCCESS
,
4992 free(audit_data
.ed_auth
);
4993 free(audit_data
.ed_snapname
);
4994 free(audit_data
.ed_type
);
4995 free(audit_data
.ed_fmri
);
5000 if (rc
== REP_PROTOCOL_FAIL_DELETED
)
5001 rc_node_clear(npp
, 1);
5003 (void) pthread_mutex_lock(&pp
->rn_lock
);
5004 rc_node_rele_flag(pp
, RC_NODE_CHILDREN_CHANGING
);
5005 rc_node_rele_locked(pp
); /* drop ref and lock */
5007 if (audit_failure
) {
5008 smf_audit_event(event_id
, ADT_FAILURE
,
5009 ADT_FAIL_VALUE_AUTH
, &audit_data
);
5012 free(audit_data
.ed_auth
);
5013 free(audit_data
.ed_snapname
);
5014 free(audit_data
.ed_type
);
5015 free(audit_data
.ed_fmri
);
5020 rc_node_next_snaplevel(rc_node_ptr_t
*npp
, rc_node_ptr_t
*cpp
)
5026 rc_node_clear(cpp
, 0);
5028 RC_NODE_PTR_GET_CHECK_AND_LOCK(np
, npp
);
5030 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPSHOT
&&
5031 np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPLEVEL
) {
5032 (void) pthread_mutex_unlock(&np
->rn_lock
);
5033 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
5036 if (np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_SNAPSHOT
) {
5037 if ((res
= rc_node_fill_children(np
,
5038 REP_PROTOCOL_ENTITY_SNAPLEVEL
)) != REP_PROTOCOL_SUCCESS
) {
5039 (void) pthread_mutex_unlock(&np
->rn_lock
);
5043 for (cp
= uu_list_first(np
->rn_children
);
5045 cp
= uu_list_next(np
->rn_children
, cp
)) {
5046 if (cp
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPLEVEL
)
5052 (void) pthread_mutex_unlock(&np
->rn_lock
);
5054 if (!rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
5055 (void) pthread_mutex_unlock(&np
->rn_lock
);
5056 rc_node_clear(npp
, 1);
5057 return (REP_PROTOCOL_FAIL_DELETED
);
5061 * mark our parent as children changing. This call drops our
5062 * lock and the RC_NODE_USING_PARENT flag, and returns with
5065 pp
= rc_node_hold_parent_flag(np
, RC_NODE_CHILDREN_CHANGING
);
5067 /* our parent is gone, we're going next... */
5069 rc_node_clear(npp
, 1);
5070 return (REP_PROTOCOL_FAIL_DELETED
);
5074 * find the next snaplevel
5077 while ((cp
= uu_list_next(pp
->rn_children
, cp
)) != NULL
&&
5078 cp
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPLEVEL
)
5081 /* it must match the snaplevel list */
5082 assert((cp
== NULL
&& np
->rn_snaplevel
->rsl_next
== NULL
) ||
5083 (cp
!= NULL
&& np
->rn_snaplevel
->rsl_next
==
5089 rc_node_rele_flag(pp
, RC_NODE_CHILDREN_CHANGING
);
5091 (void) pthread_mutex_unlock(&pp
->rn_lock
);
5094 rc_node_assign(cpp
, cp
);
5098 return (REP_PROTOCOL_SUCCESS
);
5100 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
5104 * This call takes a snapshot (np) and either:
5105 * an existing snapid (to be associated with np), or
5106 * a non-NULL parentp (from which a new snapshot is taken, and associated
5109 * To do the association, np is duplicated, the duplicate is made to
5110 * represent the new snapid, and np is replaced with the new rc_node_t on
5111 * np's parent's child list. np is placed on the new node's rn_former list,
5112 * and replaces np in cache_hash (so rc_node_update() will find the new one).
5114 * old_fmri and old_name point to the original snap shot's FMRI and name.
5115 * These values are used when generating audit events.
5134 rc_node_t
*nnp
, *prev
;
5138 perm_status_t granted
;
5139 au_event_t event_id
;
5140 audit_event_data_t audit_data
;
5142 if (parentp
== NULL
) {
5143 assert(old_fmri
!= NULL
);
5145 assert(snapid
== 0);
5147 assert(MUTEX_HELD(&np
->rn_lock
));
5149 /* Gather the audit data. */
5151 * ADT_smf_* symbols may not be defined in the /usr/include header
5152 * files on the build machine. Thus, the following if-else will
5153 * not be compiled when doing native builds.
5155 #ifndef NATIVE_BUILD
5156 if (parentp
== NULL
) {
5157 event_id
= ADT_smf_attach_snap
;
5159 event_id
= ADT_smf_create_snap
;
5161 #endif /* NATIVE_BUILD */
5162 audit_data
.ed_fmri
= malloc(REP_PROTOCOL_FMRI_LEN
);
5163 audit_data
.ed_snapname
= malloc(REP_PROTOCOL_NAME_LEN
);
5164 if ((audit_data
.ed_fmri
== NULL
) || (audit_data
.ed_snapname
== NULL
)) {
5165 (void) pthread_mutex_unlock(&np
->rn_lock
);
5166 free(audit_data
.ed_fmri
);
5167 free(audit_data
.ed_snapname
);
5168 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
5170 audit_data
.ed_auth
= NULL
;
5171 if (strlcpy(audit_data
.ed_snapname
, np
->rn_name
,
5172 REP_PROTOCOL_NAME_LEN
) >= REP_PROTOCOL_NAME_LEN
) {
5175 audit_data
.ed_old_fmri
= old_fmri
;
5176 audit_data
.ed_old_name
= old_name
? old_name
: "NO NAME";
5178 if (parentp
== NULL
) {
5180 * In the attach case, get the instance FMRIs of the
5183 if ((rc
= rc_node_get_fmri_or_fragment(np
, audit_data
.ed_fmri
,
5184 REP_PROTOCOL_FMRI_LEN
, &sz_out
)) != REP_PROTOCOL_SUCCESS
) {
5185 (void) pthread_mutex_unlock(&np
->rn_lock
);
5186 free(audit_data
.ed_fmri
);
5187 free(audit_data
.ed_snapname
);
5192 * Capture the FMRI of the parent if we're actually going
5193 * to take the snapshot.
5195 if ((rc
= rc_node_get_fmri_or_fragment(parentp
,
5196 audit_data
.ed_fmri
, REP_PROTOCOL_FMRI_LEN
, &sz_out
)) !=
5197 REP_PROTOCOL_SUCCESS
) {
5198 (void) pthread_mutex_unlock(&np
->rn_lock
);
5199 free(audit_data
.ed_fmri
);
5200 free(audit_data
.ed_snapname
);
5206 rc_node_hold_locked(np
); /* simplifies the remainder */
5208 (void) pthread_mutex_unlock(&np
->rn_lock
);
5209 granted
= rc_node_modify_permission_check(&audit_data
.ed_auth
);
5212 smf_audit_event(event_id
, ADT_FAILURE
, ADT_FAIL_VALUE_AUTH
,
5214 rc
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
5220 rc
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
5224 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
5228 bad_error(rc_node_modify_permission_check
, granted
);
5230 (void) pthread_mutex_lock(&np
->rn_lock
);
5233 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
5234 * list from changing.
5237 if (!(np
->rn_flags
& RC_NODE_OLD
)) {
5238 if (!rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
5241 pp
= rc_node_hold_parent_flag(np
,
5242 RC_NODE_CHILDREN_CHANGING
);
5244 (void) pthread_mutex_lock(&np
->rn_lock
);
5248 if (np
->rn_flags
& RC_NODE_OLD
) {
5249 rc_node_rele_flag(pp
,
5250 RC_NODE_CHILDREN_CHANGING
);
5251 (void) pthread_mutex_unlock(&pp
->rn_lock
);
5254 (void) pthread_mutex_unlock(&pp
->rn_lock
);
5256 if (!rc_node_hold_flag(np
, RC_NODE_IN_TX
)) {
5258 * Can't happen, since we're holding our
5259 * parent's CHILDREN_CHANGING flag...
5263 break; /* everything's ready */
5266 rc_node_rele_locked(np
);
5267 np
= cache_lookup(&np_orig
->rn_id
);
5270 rc
= REP_PROTOCOL_FAIL_DELETED
;
5274 (void) pthread_mutex_lock(&np
->rn_lock
);
5277 if (parentp
!= NULL
) {
5278 if (pp
!= parentp
) {
5279 rc
= REP_PROTOCOL_FAIL_BAD_REQUEST
;
5285 * look for a former node with the snapid we need.
5287 if (np
->rn_snapshot_id
== snapid
) {
5288 rc_node_rele_flag(np
, RC_NODE_IN_TX
);
5289 rc_node_rele_locked(np
);
5291 (void) pthread_mutex_lock(&pp
->rn_lock
);
5292 rc_node_rele_flag(pp
, RC_NODE_CHILDREN_CHANGING
);
5293 (void) pthread_mutex_unlock(&pp
->rn_lock
);
5294 rc
= REP_PROTOCOL_SUCCESS
; /* nothing to do */
5299 while ((nnp
= prev
->rn_former
) != NULL
) {
5300 if (nnp
->rn_snapshot_id
== snapid
) {
5302 break; /* existing node with that id */
5310 nnp
= rc_node_alloc();
5312 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
5316 nnp
->rn_id
= np
->rn_id
; /* structure assignment */
5317 nnp
->rn_hash
= np
->rn_hash
;
5318 nnp
->rn_name
= strdup(np
->rn_name
);
5319 nnp
->rn_snapshot_id
= snapid
;
5320 nnp
->rn_flags
= RC_NODE_IN_TX
| RC_NODE_USING_PARENT
;
5322 if (nnp
->rn_name
== NULL
) {
5323 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
5328 (void) pthread_mutex_unlock(&np
->rn_lock
);
5330 rc
= object_snapshot_attach(&np
->rn_id
, &snapid
, (parentp
!= NULL
));
5332 if (parentp
!= NULL
)
5333 nnp
->rn_snapshot_id
= snapid
; /* fill in new snapid */
5335 assert(nnp
->rn_snapshot_id
== snapid
);
5337 (void) pthread_mutex_lock(&np
->rn_lock
);
5338 if (rc
!= REP_PROTOCOL_SUCCESS
)
5342 * fix up the former chain
5345 prev
->rn_former
= nnp
->rn_former
;
5346 (void) pthread_mutex_lock(&nnp
->rn_lock
);
5347 nnp
->rn_flags
&= ~RC_NODE_ON_FORMER
;
5348 nnp
->rn_former
= NULL
;
5349 (void) pthread_mutex_unlock(&nnp
->rn_lock
);
5351 np
->rn_flags
|= RC_NODE_OLD
;
5352 (void) pthread_mutex_unlock(&np
->rn_lock
);
5355 * replace np with nnp
5357 rc_node_relink_child(pp
, np
, nnp
);
5360 smf_audit_event(event_id
, ADT_SUCCESS
, ADT_SUCCESS
, &audit_data
);
5361 rc
= REP_PROTOCOL_SUCCESS
;
5364 free(audit_data
.ed_auth
);
5365 free(audit_data
.ed_fmri
);
5366 free(audit_data
.ed_snapname
);
5370 rc_node_rele_flag(np
, RC_NODE_IN_TX
);
5371 rc_node_rele_locked(np
);
5372 (void) pthread_mutex_lock(&pp
->rn_lock
);
5373 rc_node_rele_flag(pp
, RC_NODE_CHILDREN_CHANGING
);
5374 (void) pthread_mutex_unlock(&pp
->rn_lock
);
5378 rc_node_destroy(nnp
);
5383 free(audit_data
.ed_auth
);
5384 free(audit_data
.ed_fmri
);
5385 free(audit_data
.ed_snapname
);
5390 rc_snapshot_take_new(rc_node_ptr_t
*npp
, const char *svcname
,
5391 const char *instname
, const char *name
, rc_node_ptr_t
*outpp
)
5393 perm_status_t granted
;
5395 rc_node_t
*outp
= NULL
;
5397 char fmri
[REP_PROTOCOL_FMRI_LEN
];
5398 audit_event_data_t audit_data
;
5401 rc_node_clear(outpp
, 0);
5404 * rc_node_modify_permission_check() must be called before the node
5405 * is locked. This is because the library functions that check
5406 * authorizations can trigger calls back into configd.
5408 granted
= rc_node_modify_permission_check(&audit_data
.ed_auth
);
5412 * We continue in this case, so that we can generate an
5413 * audit event later in this function.
5415 perm_rc
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
5418 perm_rc
= REP_PROTOCOL_SUCCESS
;
5421 /* No need to produce audit event if client is gone. */
5422 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED
);
5424 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
5426 bad_error("rc_node_modify_permission_check", granted
);
5430 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np
, npp
, audit_data
.ed_auth
);
5431 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_INSTANCE
) {
5432 (void) pthread_mutex_unlock(&np
->rn_lock
);
5433 free(audit_data
.ed_auth
);
5434 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
5437 rc
= rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT
, name
);
5438 if (rc
!= REP_PROTOCOL_SUCCESS
) {
5439 (void) pthread_mutex_unlock(&np
->rn_lock
);
5440 free(audit_data
.ed_auth
);
5444 if (svcname
!= NULL
&& (rc
=
5445 rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE
, svcname
)) !=
5446 REP_PROTOCOL_SUCCESS
) {
5447 (void) pthread_mutex_unlock(&np
->rn_lock
);
5448 free(audit_data
.ed_auth
);
5452 if (instname
!= NULL
&& (rc
=
5453 rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE
, instname
)) !=
5454 REP_PROTOCOL_SUCCESS
) {
5455 (void) pthread_mutex_unlock(&np
->rn_lock
);
5456 free(audit_data
.ed_auth
);
5460 audit_data
.ed_fmri
= fmri
;
5461 audit_data
.ed_snapname
= (char *)name
;
5463 if ((rc
= rc_node_get_fmri_or_fragment(np
, fmri
, sizeof (fmri
),
5464 &sz_out
)) != REP_PROTOCOL_SUCCESS
) {
5465 (void) pthread_mutex_unlock(&np
->rn_lock
);
5466 free(audit_data
.ed_auth
);
5469 if (perm_rc
!= REP_PROTOCOL_SUCCESS
) {
5470 (void) pthread_mutex_unlock(&np
->rn_lock
);
5471 smf_audit_event(ADT_smf_create_snap
, ADT_FAILURE
,
5472 ADT_FAIL_VALUE_AUTH
, &audit_data
);
5473 free(audit_data
.ed_auth
);
5477 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np
, npp
, RC_NODE_CREATING_CHILD
,
5478 audit_data
.ed_auth
);
5479 (void) pthread_mutex_unlock(&np
->rn_lock
);
5481 rc
= object_snapshot_take_new(np
, svcname
, instname
, name
, &outp
);
5483 if (rc
== REP_PROTOCOL_SUCCESS
) {
5484 rc_node_assign(outpp
, outp
);
5488 (void) pthread_mutex_lock(&np
->rn_lock
);
5489 rc_node_rele_flag(np
, RC_NODE_CREATING_CHILD
);
5490 (void) pthread_mutex_unlock(&np
->rn_lock
);
5492 if (rc
== REP_PROTOCOL_SUCCESS
) {
5493 smf_audit_event(ADT_smf_create_snap
, ADT_SUCCESS
, ADT_SUCCESS
,
5496 if (audit_data
.ed_auth
!= NULL
)
5497 free(audit_data
.ed_auth
);
5502 rc_snapshot_take_attach(rc_node_ptr_t
*npp
, rc_node_ptr_t
*outpp
)
5504 rc_node_t
*np
, *outp
;
5506 RC_NODE_PTR_GET_CHECK(np
, npp
);
5507 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_INSTANCE
) {
5508 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
5511 RC_NODE_PTR_GET_CHECK_AND_LOCK(outp
, outpp
);
5512 if (outp
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPSHOT
) {
5513 (void) pthread_mutex_unlock(&outp
->rn_lock
);
5514 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
5517 return (rc_attach_snapshot(outp
, 0, np
, NULL
,
5518 NULL
)); /* drops outp's lock */
5522 rc_snapshot_attach(rc_node_ptr_t
*npp
, rc_node_ptr_t
*cpp
)
5527 char old_name
[REP_PROTOCOL_NAME_LEN
];
5530 char old_fmri
[REP_PROTOCOL_FMRI_LEN
];
5532 RC_NODE_PTR_GET_CHECK_AND_LOCK(np
, npp
);
5533 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPSHOT
) {
5534 (void) pthread_mutex_unlock(&np
->rn_lock
);
5535 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
5537 snapid
= np
->rn_snapshot_id
;
5538 rc
= rc_node_get_fmri_or_fragment(np
, old_fmri
, sizeof (old_fmri
),
5540 (void) pthread_mutex_unlock(&np
->rn_lock
);
5541 if (rc
!= REP_PROTOCOL_SUCCESS
)
5543 if (np
->rn_name
!= NULL
) {
5544 if (strlcpy(old_name
, np
->rn_name
, sizeof (old_name
)) >=
5545 sizeof (old_name
)) {
5546 return (REP_PROTOCOL_FAIL_TRUNCATED
);
5550 RC_NODE_PTR_GET_CHECK_AND_LOCK(cp
, cpp
);
5551 if (cp
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPSHOT
) {
5552 (void) pthread_mutex_unlock(&cp
->rn_lock
);
5553 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
5556 rc
= rc_attach_snapshot(cp
, snapid
, NULL
,
5557 old_fmri
, old_name
); /* drops cp's lock */
5562 * If the pgname property group under ent has type pgtype, and it has a
5563 * propname property with type ptype, return _SUCCESS. If pgtype is NULL,
5564 * it is not checked. If ent is not a service node, we will return _SUCCESS if
5565 * a property meeting the requirements exists in either the instance or its
5569 * _SUCCESS - see above
5570 * _DELETED - ent or one of its ancestors was deleted
5571 * _NO_RESOURCES - no resources
5572 * _NOT_FOUND - no matching property was found
5575 rc_svc_prop_exists(rc_node_t
*ent
, const char *pgname
, const char *pgtype
,
5576 const char *propname
, rep_protocol_value_type_t ptype
)
5579 rc_node_t
*pg
= NULL
, *spg
= NULL
, *svc
, *prop
;
5581 assert(!MUTEX_HELD(&ent
->rn_lock
));
5583 (void) pthread_mutex_lock(&ent
->rn_lock
);
5584 ret
= rc_node_find_named_child(ent
, pgname
,
5585 REP_PROTOCOL_ENTITY_PROPERTYGRP
, &pg
);
5586 (void) pthread_mutex_unlock(&ent
->rn_lock
);
5589 case REP_PROTOCOL_SUCCESS
:
5592 case REP_PROTOCOL_FAIL_DELETED
:
5593 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
5597 bad_error("rc_node_find_named_child", ret
);
5600 if (ent
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SERVICE
) {
5601 ret
= rc_node_find_ancestor(ent
, REP_PROTOCOL_ENTITY_SERVICE
,
5603 if (ret
!= REP_PROTOCOL_SUCCESS
) {
5604 assert(ret
== REP_PROTOCOL_FAIL_DELETED
);
5609 assert(svc
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_SERVICE
);
5611 (void) pthread_mutex_lock(&svc
->rn_lock
);
5612 ret
= rc_node_find_named_child(svc
, pgname
,
5613 REP_PROTOCOL_ENTITY_PROPERTYGRP
, &spg
);
5614 (void) pthread_mutex_unlock(&svc
->rn_lock
);
5619 case REP_PROTOCOL_SUCCESS
:
5622 case REP_PROTOCOL_FAIL_DELETED
:
5623 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
5629 bad_error("rc_node_find_named_child", ret
);
5634 pgtype
!= NULL
&& strcmp(pg
->rn_type
, pgtype
) != 0) {
5640 pgtype
!= NULL
&& strcmp(spg
->rn_type
, pgtype
) != 0) {
5647 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
5653 * At this point, pg is non-NULL, and is a property group node of the
5654 * correct type. spg, if non-NULL, is also a property group node of
5655 * the correct type. Check for the property in pg first, then spg
5658 (void) pthread_mutex_lock(&pg
->rn_lock
);
5659 ret
= rc_node_find_named_child(pg
, propname
,
5660 REP_PROTOCOL_ENTITY_PROPERTY
, &prop
);
5661 (void) pthread_mutex_unlock(&pg
->rn_lock
);
5664 case REP_PROTOCOL_SUCCESS
:
5666 if (prop
->rn_valtype
== ptype
) {
5670 return (REP_PROTOCOL_SUCCESS
);
5676 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
5681 case REP_PROTOCOL_FAIL_DELETED
:
5685 bad_error("rc_node_find_named_child", ret
);
5689 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
5693 (void) pthread_mutex_lock(&pg
->rn_lock
);
5694 ret
= rc_node_find_named_child(pg
, propname
,
5695 REP_PROTOCOL_ENTITY_PROPERTY
, &prop
);
5696 (void) pthread_mutex_unlock(&pg
->rn_lock
);
5699 case REP_PROTOCOL_SUCCESS
:
5701 if (prop
->rn_valtype
== ptype
) {
5703 return (REP_PROTOCOL_SUCCESS
);
5707 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
5709 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
5712 case REP_PROTOCOL_FAIL_DELETED
:
5713 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
5716 bad_error("rc_node_find_named_child", ret
);
5719 return (REP_PROTOCOL_SUCCESS
);
5723 * Given a property group node, returns _SUCCESS if the property group may
5724 * be read without any special authorization.
5727 * _DELETED - np or an ancestor node was deleted
5728 * _TYPE_MISMATCH - np does not refer to a property group
5729 * _NO_RESOURCES - no resources
5730 * _PERMISSION_DENIED - authorization is required
5733 rc_node_pg_check_read_protect(rc_node_t
*np
)
5738 assert(!MUTEX_HELD(&np
->rn_lock
));
5740 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
)
5741 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
5743 if (strcmp(np
->rn_type
, SCF_GROUP_FRAMEWORK
) == 0 ||
5744 strcmp(np
->rn_type
, SCF_GROUP_DEPENDENCY
) == 0 ||
5745 strcmp(np
->rn_type
, SCF_GROUP_METHOD
) == 0)
5746 return (REP_PROTOCOL_SUCCESS
);
5748 ret
= rc_node_parent(np
, &ent
);
5750 if (ret
!= REP_PROTOCOL_SUCCESS
)
5753 ret
= rc_svc_prop_exists(ent
, np
->rn_name
, np
->rn_type
,
5754 AUTH_PROP_READ
, REP_PROTOCOL_TYPE_STRING
);
5759 case REP_PROTOCOL_FAIL_NOT_FOUND
:
5760 return (REP_PROTOCOL_SUCCESS
);
5761 case REP_PROTOCOL_SUCCESS
:
5762 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED
);
5763 case REP_PROTOCOL_FAIL_DELETED
:
5764 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
5767 bad_error("rc_svc_prop_exists", ret
);
5770 return (REP_PROTOCOL_SUCCESS
);
5775 * _DELETED - np's node or parent has been deleted
5776 * _TYPE_MISMATCH - np's node is not a property
5777 * _NO_RESOURCES - out of memory
5778 * _PERMISSION_DENIED - no authorization to read this property's value(s)
5779 * _BAD_REQUEST - np's parent is not a property group
5782 rc_node_property_may_read(rc_node_t
*np
)
5785 perm_status_t granted
= PERM_DENIED
;
5788 audit_event_data_t audit_data
;
5791 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTY
)
5792 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
5794 if (client_is_privileged())
5795 return (REP_PROTOCOL_SUCCESS
);
5798 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED
);
5800 ret
= rc_node_parent(np
, &pgp
);
5802 if (ret
!= REP_PROTOCOL_SUCCESS
)
5805 if (pgp
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
) {
5807 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
5810 ret
= rc_node_pg_check_read_protect(pgp
);
5812 if (ret
!= REP_PROTOCOL_FAIL_PERMISSION_DENIED
) {
5821 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
5824 ret
= perm_add_enabling(pcp
, AUTH_MODIFY
);
5826 if (ret
== REP_PROTOCOL_SUCCESS
) {
5827 const char * const auth
=
5828 perm_auth_for_pgtype(pgp
->rn_type
);
5831 ret
= perm_add_enabling(pcp
, auth
);
5835 * If you are permitted to modify the value, you may also
5836 * read it. This means that both the MODIFY and VALUE
5837 * authorizations are acceptable. We don't allow requests
5838 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
5839 * however, to avoid leaking possibly valuable information
5840 * since such a user can't change the property anyway.
5842 if (ret
== REP_PROTOCOL_SUCCESS
)
5843 ret
= perm_add_enabling_values(pcp
, pgp
,
5846 if (ret
== REP_PROTOCOL_SUCCESS
&&
5847 strcmp(np
->rn_name
, AUTH_PROP_MODIFY
) != 0)
5848 ret
= perm_add_enabling_values(pcp
, pgp
,
5851 if (ret
== REP_PROTOCOL_SUCCESS
)
5852 ret
= perm_add_enabling_values(pcp
, pgp
,
5857 if (ret
== REP_PROTOCOL_SUCCESS
) {
5858 granted
= perm_granted(pcp
);
5859 if (granted
== PERM_FAIL
)
5860 ret
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
5861 if (granted
== PERM_GONE
)
5862 ret
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
5865 if (ret
== REP_PROTOCOL_SUCCESS
) {
5866 /* Generate a read_prop audit event. */
5867 audit_data
.ed_fmri
= malloc(REP_PROTOCOL_FMRI_LEN
);
5868 if (audit_data
.ed_fmri
== NULL
)
5869 ret
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
5871 if (ret
== REP_PROTOCOL_SUCCESS
) {
5872 ret
= rc_node_get_fmri_or_fragment(np
, audit_data
.ed_fmri
,
5873 REP_PROTOCOL_FMRI_LEN
, &sz_out
);
5875 if (ret
== REP_PROTOCOL_SUCCESS
) {
5879 if (granted
== PERM_DENIED
) {
5880 status
= ADT_FAILURE
;
5881 ret_value
= ADT_FAIL_VALUE_AUTH
;
5883 status
= ADT_SUCCESS
;
5884 ret_value
= ADT_SUCCESS
;
5886 audit_data
.ed_auth
= pcp
->pc_auth_string
;
5887 smf_audit_event(ADT_smf_read_prop
,
5888 status
, ret_value
, &audit_data
);
5890 free(audit_data
.ed_fmri
);
5894 if ((ret
== REP_PROTOCOL_SUCCESS
) && (granted
== PERM_DENIED
))
5895 ret
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
5898 #endif /* NATIVE_BUILD */
5905 rc_iter_filter_name(rc_node_t
*np
, void *s
)
5907 const char *name
= s
;
5909 return (strcmp(np
->rn_name
, name
) == 0);
5913 rc_iter_filter_type(rc_node_t
*np
, void *s
)
5915 const char *type
= s
;
5917 return (np
->rn_type
!= NULL
&& strcmp(np
->rn_type
, type
) == 0);
5922 rc_iter_null_filter(rc_node_t
*np
, void *s
)
5928 * Allocate & initialize an rc_node_iter_t structure. Essentially, ensure
5929 * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
5930 * If successful, leaves a hold on np & increments np->rn_other_refs
5932 * If composed is true, then set up for iteration across the top level of np's
5933 * composition chain. If successful, leaves a hold on np and increments
5934 * rn_other_refs for the top level of np's composition chain.
5939 * _TYPE_MISMATCH - np cannot carry type children
5943 rc_iter_create(rc_node_iter_t
**resp
, rc_node_t
*np
, uint32_t type
,
5944 rc_iter_filter_func
*filter
, void *arg
, boolean_t composed
)
5946 rc_node_iter_t
*nip
;
5949 assert(*resp
== NULL
);
5951 nip
= uu_zalloc(sizeof (*nip
));
5953 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
5955 /* np is held by the client's rc_node_ptr_t */
5956 if (np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_CPROPERTYGRP
)
5960 (void) pthread_mutex_lock(&np
->rn_lock
);
5962 if ((res
= rc_node_fill_children(np
, type
)) !=
5963 REP_PROTOCOL_SUCCESS
) {
5964 (void) pthread_mutex_unlock(&np
->rn_lock
);
5969 nip
->rni_clevel
= -1;
5971 nip
->rni_iter
= uu_list_walk_start(np
->rn_children
,
5973 if (nip
->rni_iter
!= NULL
) {
5974 nip
->rni_iter_node
= np
;
5975 rc_node_hold_other(np
);
5977 (void) pthread_mutex_unlock(&np
->rn_lock
);
5979 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
5981 (void) pthread_mutex_unlock(&np
->rn_lock
);
5985 if (np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_SNAPSHOT
) {
5986 /* rn_cchain isn't valid until children are loaded. */
5987 (void) pthread_mutex_lock(&np
->rn_lock
);
5988 res
= rc_node_fill_children(np
,
5989 REP_PROTOCOL_ENTITY_SNAPLEVEL
);
5990 (void) pthread_mutex_unlock(&np
->rn_lock
);
5991 if (res
!= REP_PROTOCOL_SUCCESS
) {
5996 /* Check for an empty snapshot. */
5997 if (np
->rn_cchain
[0] == NULL
)
6001 /* Start at the top of the composition chain. */
6002 for (nip
->rni_clevel
= 0; ; ++nip
->rni_clevel
) {
6003 if (nip
->rni_clevel
>= COMPOSITION_DEPTH
) {
6004 /* Empty composition chain. */
6006 nip
->rni_clevel
= -1;
6007 nip
->rni_iter
= NULL
;
6008 /* It's ok, iter_next() will return _DONE. */
6012 ent
= np
->rn_cchain
[nip
->rni_clevel
];
6013 assert(ent
!= NULL
);
6015 if (rc_node_check_and_lock(ent
) == REP_PROTOCOL_SUCCESS
)
6018 /* Someone deleted it, so try the next one. */
6021 res
= rc_node_fill_children(ent
, type
);
6023 if (res
== REP_PROTOCOL_SUCCESS
) {
6024 nip
->rni_iter
= uu_list_walk_start(ent
->rn_children
,
6027 if (nip
->rni_iter
== NULL
)
6028 res
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
6030 nip
->rni_iter_node
= ent
;
6031 rc_node_hold_other(ent
);
6035 if (res
!= REP_PROTOCOL_SUCCESS
) {
6036 (void) pthread_mutex_unlock(&ent
->rn_lock
);
6041 (void) pthread_mutex_unlock(&ent
->rn_lock
);
6045 rc_node_hold(np
); /* released by rc_iter_end() */
6046 nip
->rni_parent
= np
;
6047 nip
->rni_type
= type
;
6048 nip
->rni_filter
= (filter
!= NULL
)? filter
: rc_iter_null_filter
;
6049 nip
->rni_filter_arg
= arg
;
6051 return (REP_PROTOCOL_SUCCESS
);
6055 rc_iter_end(rc_node_iter_t
*iter
)
6057 rc_node_t
*np
= iter
->rni_parent
;
6059 if (iter
->rni_clevel
>= 0)
6060 np
= np
->rn_cchain
[iter
->rni_clevel
];
6062 assert(MUTEX_HELD(&np
->rn_lock
));
6063 if (iter
->rni_iter
!= NULL
)
6064 uu_list_walk_end(iter
->rni_iter
);
6065 iter
->rni_iter
= NULL
;
6067 (void) pthread_mutex_unlock(&np
->rn_lock
);
6068 rc_node_rele(iter
->rni_parent
);
6069 if (iter
->rni_iter_node
!= NULL
)
6070 rc_node_rele_other(iter
->rni_iter_node
);
6075 * _NOT_SET - npp is reset
6076 * _DELETED - npp's node has been deleted
6077 * _NOT_APPLICABLE - npp's node is not a property
6078 * _NO_RESOURCES - out of memory
6081 rc_node_setup_value_iter(rc_node_ptr_t
*npp
, rc_node_iter_t
**iterp
)
6085 rc_node_iter_t
*nip
;
6087 assert(*iterp
== NULL
);
6089 RC_NODE_PTR_GET_CHECK_AND_LOCK(np
, npp
);
6091 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTY
) {
6092 (void) pthread_mutex_unlock(&np
->rn_lock
);
6093 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
6096 nip
= uu_zalloc(sizeof (*nip
));
6098 (void) pthread_mutex_unlock(&np
->rn_lock
);
6099 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
6102 nip
->rni_parent
= np
;
6103 nip
->rni_iter
= NULL
;
6104 nip
->rni_clevel
= -1;
6105 nip
->rni_type
= REP_PROTOCOL_ENTITY_VALUE
;
6106 nip
->rni_offset
= 0;
6107 nip
->rni_last_offset
= 0;
6109 rc_node_hold_locked(np
);
6112 (void) pthread_mutex_unlock(&np
->rn_lock
);
6114 return (REP_PROTOCOL_SUCCESS
);
6119 * _NO_RESOURCES - out of memory
6120 * _NOT_SET - npp is reset
6121 * _DELETED - npp's node has been deleted
6122 * _TYPE_MISMATCH - npp's node is not a property
6123 * _NOT_FOUND - property has no values
6124 * _TRUNCATED - property has >1 values (first is written into out)
6125 * _SUCCESS - property has 1 value (which is written into out)
6126 * _PERMISSION_DENIED - no authorization to read property value(s)
6128 * We shorten *sz_out to not include anything after the final '\0'.
6131 rc_node_get_property_value(rc_node_ptr_t
*npp
,
6132 struct rep_protocol_value_response
*out
, size_t *sz_out
)
6138 assert(*sz_out
== sizeof (*out
));
6140 RC_NODE_PTR_GET_CHECK_AND_HOLD(np
, npp
);
6141 ret
= rc_node_property_may_read(np
);
6144 if (ret
!= REP_PROTOCOL_SUCCESS
)
6147 RC_NODE_PTR_GET_CHECK_AND_LOCK(np
, npp
);
6149 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTY
) {
6150 (void) pthread_mutex_unlock(&np
->rn_lock
);
6151 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
6154 if (np
->rn_values_size
== 0) {
6155 (void) pthread_mutex_unlock(&np
->rn_lock
);
6156 return (REP_PROTOCOL_FAIL_NOT_FOUND
);
6158 out
->rpr_type
= np
->rn_valtype
;
6159 w
= strlcpy(out
->rpr_value
, &np
->rn_values
[0],
6160 sizeof (out
->rpr_value
));
6162 if (w
>= sizeof (out
->rpr_value
))
6163 backend_panic("value too large");
6165 *sz_out
= offsetof(struct rep_protocol_value_response
,
6168 ret
= (np
->rn_values_count
!= 1)? REP_PROTOCOL_FAIL_TRUNCATED
:
6169 REP_PROTOCOL_SUCCESS
;
6170 (void) pthread_mutex_unlock(&np
->rn_lock
);
6175 rc_iter_next_value(rc_node_iter_t
*iter
,
6176 struct rep_protocol_value_response
*out
, size_t *sz_out
, int repeat
)
6178 rc_node_t
*np
= iter
->rni_parent
;
6186 rep_protocol_responseid_t result
;
6188 assert(*sz_out
== sizeof (*out
));
6190 (void) memset(out
, '\0', *sz_out
);
6192 if (iter
->rni_type
!= REP_PROTOCOL_ENTITY_VALUE
)
6193 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
6196 ret
= rc_node_property_may_read(np
);
6198 if (ret
!= REP_PROTOCOL_SUCCESS
)
6201 RC_NODE_CHECK_AND_LOCK(np
);
6203 vals
= np
->rn_values
;
6204 len
= np
->rn_values_size
;
6206 out
->rpr_type
= np
->rn_valtype
;
6208 start
= (repeat
)? iter
->rni_last_offset
: iter
->rni_offset
;
6210 if (len
== 0 || start
>= len
) {
6211 result
= REP_PROTOCOL_DONE
;
6212 *sz_out
-= sizeof (out
->rpr_value
);
6214 w
= strlcpy(out
->rpr_value
, &vals
[start
],
6215 sizeof (out
->rpr_value
));
6217 if (w
>= sizeof (out
->rpr_value
))
6218 backend_panic("value too large");
6220 *sz_out
= offsetof(struct rep_protocol_value_response
,
6224 * update the offsets if we're not repeating
6227 iter
->rni_last_offset
= iter
->rni_offset
;
6228 iter
->rni_offset
+= (w
+ 1);
6231 result
= REP_PROTOCOL_SUCCESS
;
6234 (void) pthread_mutex_unlock(&np
->rn_lock
);
6239 * Entry point for ITER_START from client.c. Validate the arguments & call
6245 * _TYPE_MISMATCH - np cannot carry type children
6246 * _BAD_REQUEST - flags is invalid
6247 * pattern is invalid
6250 * _TYPE_MISMATCH - *npp cannot have children of type
6254 rc_node_setup_iter(rc_node_ptr_t
*npp
, rc_node_iter_t
**iterp
,
6255 uint32_t type
, uint32_t flags
, const char *pattern
)
6258 rc_iter_filter_func
*f
= NULL
;
6261 RC_NODE_PTR_GET_CHECK(np
, npp
);
6263 if (pattern
!= NULL
&& pattern
[0] == '\0')
6266 if (type
== REP_PROTOCOL_ENTITY_VALUE
) {
6267 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTY
)
6268 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
6269 if (flags
!= RP_ITER_START_ALL
|| pattern
!= NULL
)
6270 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
6272 rc
= rc_node_setup_value_iter(npp
, iterp
);
6273 assert(rc
!= REP_PROTOCOL_FAIL_NOT_APPLICABLE
);
6277 if ((rc
= rc_check_parent_child(np
->rn_id
.rl_type
, type
)) !=
6278 REP_PROTOCOL_SUCCESS
)
6281 if (((flags
& RP_ITER_START_FILT_MASK
) == RP_ITER_START_ALL
) ^
6283 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
6285 /* Composition only works for instances & snapshots. */
6286 if ((flags
& RP_ITER_START_COMPOSED
) &&
6287 (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_INSTANCE
&&
6288 np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_SNAPSHOT
))
6289 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
6291 if (pattern
!= NULL
) {
6292 if ((rc
= rc_check_type_name(type
, pattern
)) !=
6293 REP_PROTOCOL_SUCCESS
)
6295 pattern
= strdup(pattern
);
6296 if (pattern
== NULL
)
6297 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
6300 switch (flags
& RP_ITER_START_FILT_MASK
) {
6301 case RP_ITER_START_ALL
:
6304 case RP_ITER_START_EXACT
:
6305 f
= rc_iter_filter_name
;
6307 case RP_ITER_START_PGTYPE
:
6308 if (type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
) {
6309 free((void *)pattern
);
6310 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
6312 f
= rc_iter_filter_type
;
6315 free((void *)pattern
);
6316 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
6319 rc
= rc_iter_create(iterp
, np
, type
, f
, (void *)pattern
,
6320 flags
& RP_ITER_START_COMPOSED
);
6321 if (rc
!= REP_PROTOCOL_SUCCESS
&& pattern
!= NULL
)
6322 free((void *)pattern
);
6328 * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
6330 * For composed iterators, then check to see if there's an overlapping entity
6331 * (see embedded comments). If we reach the end of the list, start over at
6335 * _BAD_REQUEST - iter walks values
6336 * _TYPE_MISMATCH - iter does not walk type entities
6337 * _DELETED - parent was deleted
6339 * _INVALID_TYPE - type is invalid
6343 * For composed property group iterators, can also return
6344 * _TYPE_MISMATCH - parent cannot have type children
6347 rc_iter_next(rc_node_iter_t
*iter
, rc_node_ptr_t
*out
, uint32_t type
)
6349 rc_node_t
*np
= iter
->rni_parent
;
6353 if (iter
->rni_type
== REP_PROTOCOL_ENTITY_VALUE
)
6354 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
6356 if (iter
->rni_iter
== NULL
) {
6357 rc_node_clear(out
, 0);
6358 return (REP_PROTOCOL_DONE
);
6361 if (iter
->rni_type
!= type
) {
6362 rc_node_clear(out
, 0);
6363 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
6366 (void) pthread_mutex_lock(&np
->rn_lock
); /* held by _iter_create() */
6368 if (!rc_node_wait_flag(np
, RC_NODE_CHILDREN_CHANGING
)) {
6369 (void) pthread_mutex_unlock(&np
->rn_lock
);
6370 rc_node_clear(out
, 1);
6371 return (REP_PROTOCOL_FAIL_DELETED
);
6374 if (iter
->rni_clevel
>= 0) {
6375 /* Composed iterator. Iterate over appropriate level. */
6376 (void) pthread_mutex_unlock(&np
->rn_lock
);
6377 np
= np
->rn_cchain
[iter
->rni_clevel
];
6379 * If iter->rni_parent is an instance or a snapshot, np must
6380 * be valid since iter holds iter->rni_parent & possible
6381 * levels (service, instance, snaplevel) cannot be destroyed
6382 * while rni_parent is held. If iter->rni_parent is
6383 * a composed property group then rc_node_setup_cpg() put
6387 (void) pthread_mutex_lock(&np
->rn_lock
);
6389 if (!rc_node_wait_flag(np
, RC_NODE_CHILDREN_CHANGING
)) {
6390 (void) pthread_mutex_unlock(&np
->rn_lock
);
6391 rc_node_clear(out
, 1);
6392 return (REP_PROTOCOL_FAIL_DELETED
);
6396 assert(np
->rn_flags
& RC_NODE_HAS_CHILDREN
);
6399 res
= uu_list_walk_next(iter
->rni_iter
);
6401 rc_node_t
*parent
= iter
->rni_parent
;
6403 #if COMPOSITION_DEPTH == 2
6404 if (iter
->rni_clevel
< 0 || iter
->rni_clevel
== 1) {
6405 /* release walker and lock */
6410 /* Stop walking current level. */
6411 uu_list_walk_end(iter
->rni_iter
);
6412 iter
->rni_iter
= NULL
;
6413 (void) pthread_mutex_unlock(&np
->rn_lock
);
6414 rc_node_rele_other(iter
->rni_iter_node
);
6415 iter
->rni_iter_node
= NULL
;
6417 /* Start walking next level. */
6419 np
= parent
->rn_cchain
[iter
->rni_clevel
];
6422 #error This code must be updated.
6425 (void) pthread_mutex_lock(&np
->rn_lock
);
6427 rc
= rc_node_fill_children(np
, iter
->rni_type
);
6429 if (rc
== REP_PROTOCOL_SUCCESS
) {
6431 uu_list_walk_start(np
->rn_children
,
6434 if (iter
->rni_iter
== NULL
)
6435 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
6437 iter
->rni_iter_node
= np
;
6438 rc_node_hold_other(np
);
6442 if (rc
!= REP_PROTOCOL_SUCCESS
) {
6443 (void) pthread_mutex_unlock(&np
->rn_lock
);
6444 rc_node_clear(out
, 0);
6451 if (res
->rn_id
.rl_type
!= type
||
6452 !iter
->rni_filter(res
, iter
->rni_filter_arg
))
6456 * If we're composed and not at the top level, check to see if
6457 * there's an entity at a higher level with the same name. If
6458 * so, skip this one.
6460 if (iter
->rni_clevel
> 0) {
6461 rc_node_t
*ent
= iter
->rni_parent
->rn_cchain
[0];
6464 #if COMPOSITION_DEPTH == 2
6465 assert(iter
->rni_clevel
== 1);
6467 (void) pthread_mutex_unlock(&np
->rn_lock
);
6468 (void) pthread_mutex_lock(&ent
->rn_lock
);
6469 rc
= rc_node_find_named_child(ent
, res
->rn_name
, type
,
6471 if (rc
== REP_PROTOCOL_SUCCESS
&& pg
!= NULL
)
6473 (void) pthread_mutex_unlock(&ent
->rn_lock
);
6474 if (rc
!= REP_PROTOCOL_SUCCESS
) {
6475 rc_node_clear(out
, 0);
6478 (void) pthread_mutex_lock(&np
->rn_lock
);
6480 /* Make sure np isn't being deleted all of a sudden. */
6481 if (!rc_node_wait_flag(np
, RC_NODE_DYING
)) {
6482 (void) pthread_mutex_unlock(&np
->rn_lock
);
6483 rc_node_clear(out
, 1);
6484 return (REP_PROTOCOL_FAIL_DELETED
);
6491 #error This code must be updated.
6496 * If we're composed, iterating over property groups, and not
6497 * at the bottom level, check to see if there's a pg at lower
6498 * level with the same name. If so, return a cpg.
6500 if (iter
->rni_clevel
>= 0 &&
6501 type
== REP_PROTOCOL_ENTITY_PROPERTYGRP
&&
6502 iter
->rni_clevel
< COMPOSITION_DEPTH
- 1) {
6503 #if COMPOSITION_DEPTH == 2
6505 rc_node_t
*ent
= iter
->rni_parent
->rn_cchain
[1];
6507 rc_node_hold(res
); /* While we drop np->rn_lock */
6509 (void) pthread_mutex_unlock(&np
->rn_lock
);
6510 (void) pthread_mutex_lock(&ent
->rn_lock
);
6511 rc
= rc_node_find_named_child(ent
, res
->rn_name
, type
,
6513 /* holds pg if not NULL */
6514 (void) pthread_mutex_unlock(&ent
->rn_lock
);
6515 if (rc
!= REP_PROTOCOL_SUCCESS
) {
6517 rc_node_clear(out
, 0);
6521 (void) pthread_mutex_lock(&np
->rn_lock
);
6522 if (!rc_node_wait_flag(np
, RC_NODE_DYING
)) {
6523 (void) pthread_mutex_unlock(&np
->rn_lock
);
6527 rc_node_clear(out
, 1);
6528 return (REP_PROTOCOL_FAIL_DELETED
);
6532 (void) pthread_mutex_unlock(&np
->rn_lock
);
6534 (void) pthread_mutex_lock(&np
->rn_lock
);
6535 if (!rc_node_wait_flag(np
, RC_NODE_DYING
)) {
6536 (void) pthread_mutex_unlock(&np
->
6538 rc_node_clear(out
, 1);
6539 return (REP_PROTOCOL_FAIL_DELETED
);
6544 /* Keep res held for rc_node_setup_cpg(). */
6546 cpg
= rc_node_alloc();
6548 (void) pthread_mutex_unlock(
6552 rc_node_clear(out
, 0);
6553 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
6556 switch (rc_node_setup_cpg(cpg
, res
, pg
)) {
6557 case REP_PROTOCOL_SUCCESS
:
6561 case REP_PROTOCOL_FAIL_TYPE_MISMATCH
:
6563 (void) pthread_mutex_unlock(&np
->
6565 rc_node_destroy(cpg
);
6568 (void) pthread_mutex_lock(&np
->
6570 if (!rc_node_wait_flag(np
,
6572 (void) pthread_mutex_unlock(&
6574 rc_node_clear(out
, 1);
6576 (REP_PROTOCOL_FAIL_DELETED
);
6580 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
6581 rc_node_destroy(cpg
);
6582 (void) pthread_mutex_unlock(
6586 rc_node_clear(out
, 0);
6587 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
6595 #error This code must be updated.
6600 (void) pthread_mutex_unlock(&np
->rn_lock
);
6603 rc_node_assign(out
, res
);
6606 return (REP_PROTOCOL_DONE
);
6608 return (REP_PROTOCOL_SUCCESS
);
6612 rc_iter_destroy(rc_node_iter_t
**nipp
)
6614 rc_node_iter_t
*nip
= *nipp
;
6618 return; /* already freed */
6620 np
= nip
->rni_parent
;
6622 if (nip
->rni_filter_arg
!= NULL
)
6623 free(nip
->rni_filter_arg
);
6624 nip
->rni_filter_arg
= NULL
;
6626 if (nip
->rni_type
== REP_PROTOCOL_ENTITY_VALUE
||
6627 nip
->rni_iter
!= NULL
) {
6628 if (nip
->rni_clevel
< 0)
6629 (void) pthread_mutex_lock(&np
->rn_lock
);
6631 (void) pthread_mutex_lock(
6632 &np
->rn_cchain
[nip
->rni_clevel
]->rn_lock
);
6633 rc_iter_end(nip
); /* release walker and lock */
6635 nip
->rni_parent
= NULL
;
6642 rc_node_setup_tx(rc_node_ptr_t
*npp
, rc_node_ptr_t
*txp
)
6647 perm_status_t granted
;
6648 rc_auth_state_t authorized
= RC_AUTH_UNKNOWN
;
6649 char *auth_string
= NULL
;
6651 RC_NODE_PTR_GET_CHECK_AND_HOLD(np
, npp
);
6653 if (np
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_CPROPERTYGRP
) {
6655 np
= np
->rn_cchain
[0];
6656 RC_NODE_CHECK_AND_HOLD(np
);
6659 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
) {
6661 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH
);
6664 if (np
->rn_id
.rl_ids
[ID_SNAPSHOT
] != 0) {
6666 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED
);
6670 if (client_is_privileged())
6673 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED
);
6675 if (is_main_repository
== 0)
6678 /* permission check */
6682 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
6685 if (np
->rn_id
.rl_ids
[ID_INSTANCE
] != 0 && /* instance pg */
6686 ((strcmp(np
->rn_name
, AUTH_PG_ACTIONS
) == 0 &&
6687 strcmp(np
->rn_type
, AUTH_PG_ACTIONS_TYPE
) == 0) ||
6688 (strcmp(np
->rn_name
, AUTH_PG_GENERAL_OVR
) == 0 &&
6689 strcmp(np
->rn_type
, AUTH_PG_GENERAL_OVR_TYPE
) == 0))) {
6692 /* solaris.smf.modify can be used */
6693 ret
= perm_add_enabling(pcp
, AUTH_MODIFY
);
6694 if (ret
!= REP_PROTOCOL_SUCCESS
) {
6700 /* solaris.smf.manage can be used. */
6701 ret
= perm_add_enabling(pcp
, AUTH_MANAGE
);
6703 if (ret
!= REP_PROTOCOL_SUCCESS
) {
6709 /* general/action_authorization values can be used. */
6710 ret
= rc_node_parent(np
, &instn
);
6711 if (ret
!= REP_PROTOCOL_SUCCESS
) {
6712 assert(ret
== REP_PROTOCOL_FAIL_DELETED
);
6715 return (REP_PROTOCOL_FAIL_DELETED
);
6718 assert(instn
->rn_id
.rl_type
== REP_PROTOCOL_ENTITY_INSTANCE
);
6720 ret
= perm_add_inst_action_auth(pcp
, instn
);
6721 rc_node_rele(instn
);
6723 case REP_PROTOCOL_SUCCESS
:
6726 case REP_PROTOCOL_FAIL_DELETED
:
6727 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
6733 bad_error("perm_add_inst_action_auth", ret
);
6736 if (strcmp(np
->rn_name
, AUTH_PG_ACTIONS
) == 0)
6737 authorized
= RC_AUTH_PASSED
; /* No check on commit. */
6739 ret
= perm_add_enabling(pcp
, AUTH_MODIFY
);
6741 if (ret
== REP_PROTOCOL_SUCCESS
) {
6742 /* propertygroup-type-specific authorization */
6743 /* no locking because rn_type won't change anyway */
6744 const char * const auth
=
6745 perm_auth_for_pgtype(np
->rn_type
);
6748 ret
= perm_add_enabling(pcp
, auth
);
6751 if (ret
== REP_PROTOCOL_SUCCESS
)
6752 /* propertygroup/transaction-type-specific auths */
6754 perm_add_enabling_values(pcp
, np
, AUTH_PROP_VALUE
);
6756 if (ret
== REP_PROTOCOL_SUCCESS
)
6758 perm_add_enabling_values(pcp
, np
, AUTH_PROP_MODIFY
);
6760 /* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
6761 if (ret
== REP_PROTOCOL_SUCCESS
&&
6762 strcmp(np
->rn_name
, AUTH_PG_GENERAL
) == 0 &&
6763 strcmp(np
->rn_type
, AUTH_PG_GENERAL_TYPE
) == 0)
6764 ret
= perm_add_enabling(pcp
, AUTH_MANAGE
);
6766 if (ret
!= REP_PROTOCOL_SUCCESS
) {
6773 granted
= perm_granted(pcp
);
6774 ret
= map_granted_status(granted
, pcp
, &auth_string
);
6777 if ((granted
== PERM_GONE
) || (granted
== PERM_FAIL
) ||
6778 (ret
== REP_PROTOCOL_FAIL_NO_RESOURCES
)) {
6784 if (granted
== PERM_DENIED
) {
6786 * If we get here, the authorization failed.
6787 * Unfortunately, we don't have enough information at this
6788 * point to generate the security audit events. We'll only
6789 * get that information when the client tries to commit the
6790 * event. Thus, we'll remember the failed authorization,
6791 * so that we can generate the audit events later.
6793 authorized
= RC_AUTH_FAILED
;
6795 #endif /* NATIVE_BUILD */
6798 rc_node_assign(txp
, np
);
6799 txp
->rnp_authorized
= authorized
;
6800 if (authorized
!= RC_AUTH_UNKNOWN
) {
6801 /* Save the authorization string. */
6802 if (txp
->rnp_auth_string
!= NULL
)
6803 free((void *)txp
->rnp_auth_string
);
6804 txp
->rnp_auth_string
= auth_string
;
6805 auth_string
= NULL
; /* Don't free until done with txp. */
6809 if (auth_string
!= NULL
)
6811 return (REP_PROTOCOL_SUCCESS
);
6815 * Return 1 if the given transaction commands only modify the values of
6816 * properties other than "modify_authorization". Return -1 if any of the
6817 * commands are invalid, and 0 otherwise.
6820 tx_allow_value(const void *cmds_arg
, size_t cmds_sz
, rc_node_t
*pg
)
6822 const struct rep_protocol_transaction_cmd
*cmds
;
6828 assert(!MUTEX_HELD(&pg
->rn_lock
));
6830 loc
= (uintptr_t)cmds_arg
;
6832 while (cmds_sz
> 0) {
6833 cmds
= (struct rep_protocol_transaction_cmd
*)loc
;
6835 if (cmds_sz
<= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE
)
6838 sz
= cmds
->rptc_size
;
6839 if (sz
<= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE
)
6846 switch (cmds
[0].rptc_action
) {
6847 case REP_PROTOCOL_TX_ENTRY_CLEAR
:
6850 case REP_PROTOCOL_TX_ENTRY_REPLACE
:
6852 (void) pthread_mutex_lock(&pg
->rn_lock
);
6854 if (rc_node_find_named_child(pg
,
6855 (const char *)cmds
[0].rptc_data
,
6856 REP_PROTOCOL_ENTITY_PROPERTY
, &prop
) ==
6857 REP_PROTOCOL_SUCCESS
) {
6859 ok
= prop
->rn_valtype
==
6862 * rc_node_find_named_child()
6863 * places a hold on prop which we
6864 * do not need to hang on to.
6869 (void) pthread_mutex_unlock(&pg
->rn_lock
);
6878 if (strcmp((const char *)cmds
[0].rptc_data
, AUTH_PROP_MODIFY
)
6890 * Return 1 if any of the given transaction commands affect
6891 * "action_authorization". Return -1 if any of the commands are invalid and
6892 * 0 in all other cases.
6895 tx_modifies_action(const void *cmds_arg
, size_t cmds_sz
)
6897 const struct rep_protocol_transaction_cmd
*cmds
;
6901 loc
= (uintptr_t)cmds_arg
;
6903 while (cmds_sz
> 0) {
6904 cmds
= (struct rep_protocol_transaction_cmd
*)loc
;
6906 if (cmds_sz
<= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE
)
6909 sz
= cmds
->rptc_size
;
6910 if (sz
<= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE
)
6917 if (strcmp((const char *)cmds
[0].rptc_data
, AUTH_PROP_ACTION
)
6929 * Returns 1 if the transaction commands only modify properties named
6933 tx_only_enabled(const void *cmds_arg
, size_t cmds_sz
)
6935 const struct rep_protocol_transaction_cmd
*cmd
;
6939 loc
= (uintptr_t)cmds_arg
;
6941 while (cmds_sz
> 0) {
6942 cmd
= (struct rep_protocol_transaction_cmd
*)loc
;
6944 if (cmds_sz
<= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE
)
6947 sz
= cmd
->rptc_size
;
6948 if (sz
<= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE
)
6955 if (strcmp((const char *)cmd
->rptc_data
, AUTH_PROP_ENABLED
)
6967 rc_tx_commit(rc_node_ptr_t
*txp
, const void *cmds
, size_t cmds_sz
)
6969 rc_node_t
*np
= txp
->rnp_node
;
6972 rc_node_pg_notify_t
*pnp
;
6975 perm_status_t granted
;
6977 char *pg_fmri
= NULL
;
6978 char *auth_string
= NULL
;
6979 int auth_status
= ADT_SUCCESS
;
6980 int auth_ret_value
= ADT_SUCCESS
;
6983 tx_commit_data_t
*tx_data
= NULL
;
6987 if ((txp
->rnp_authorized
!= RC_AUTH_UNKNOWN
) &&
6988 (txp
->rnp_auth_string
!= NULL
)) {
6989 auth_string
= strdup(txp
->rnp_auth_string
);
6990 if (auth_string
== NULL
)
6991 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
6994 if ((txp
->rnp_authorized
== RC_AUTH_UNKNOWN
) &&
6995 is_main_repository
) {
6997 if (!client_is_privileged()) {
6998 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED
);
7001 /* permission check: depends on contents of transaction */
7004 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
7006 /* If normal is cleared, we won't do the normal checks. */
7008 rc
= REP_PROTOCOL_SUCCESS
;
7010 if (strcmp(np
->rn_name
, AUTH_PG_GENERAL
) == 0 &&
7011 strcmp(np
->rn_type
, AUTH_PG_GENERAL_TYPE
) == 0) {
7012 /* Touching general[framework]/action_authorization? */
7013 rc
= tx_modifies_action(cmds
, cmds_sz
);
7016 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
7021 * Yes: only AUTH_MODIFY and AUTH_MANAGE
7024 rc
= perm_add_enabling(pcp
, AUTH_MODIFY
);
7026 if (rc
== REP_PROTOCOL_SUCCESS
)
7027 rc
= perm_add_enabling(pcp
,
7032 rc
= REP_PROTOCOL_SUCCESS
;
7034 } else if (np
->rn_id
.rl_ids
[ID_INSTANCE
] != 0 &&
7035 strcmp(np
->rn_name
, AUTH_PG_GENERAL_OVR
) == 0 &&
7036 strcmp(np
->rn_type
, AUTH_PG_GENERAL_OVR_TYPE
) == 0) {
7039 rc
= tx_only_enabled(cmds
, cmds_sz
);
7042 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
7046 rc
= rc_node_parent(np
, &instn
);
7047 if (rc
!= REP_PROTOCOL_SUCCESS
) {
7048 assert(rc
== REP_PROTOCOL_FAIL_DELETED
);
7053 assert(instn
->rn_id
.rl_type
==
7054 REP_PROTOCOL_ENTITY_INSTANCE
);
7056 rc
= perm_add_inst_action_auth(pcp
, instn
);
7057 rc_node_rele(instn
);
7059 case REP_PROTOCOL_SUCCESS
:
7062 case REP_PROTOCOL_FAIL_DELETED
:
7063 case REP_PROTOCOL_FAIL_NO_RESOURCES
:
7068 bad_error("perm_add_inst_action_auth",
7072 rc
= REP_PROTOCOL_SUCCESS
;
7076 if (rc
== REP_PROTOCOL_SUCCESS
&& normal
) {
7077 rc
= perm_add_enabling(pcp
, AUTH_MODIFY
);
7079 if (rc
== REP_PROTOCOL_SUCCESS
) {
7080 /* Add pgtype-specific authorization. */
7081 const char * const auth
=
7082 perm_auth_for_pgtype(np
->rn_type
);
7085 rc
= perm_add_enabling(pcp
, auth
);
7088 /* Add pg-specific modify_authorization auths. */
7089 if (rc
== REP_PROTOCOL_SUCCESS
)
7090 rc
= perm_add_enabling_values(pcp
, np
,
7093 /* If value_authorization values are ok, add them. */
7094 if (rc
== REP_PROTOCOL_SUCCESS
) {
7095 rc
= tx_allow_value(cmds
, cmds_sz
, np
);
7097 rc
= REP_PROTOCOL_FAIL_BAD_REQUEST
;
7099 rc
= perm_add_enabling_values(pcp
, np
,
7104 if (rc
== REP_PROTOCOL_SUCCESS
) {
7105 granted
= perm_granted(pcp
);
7106 rc
= map_granted_status(granted
, pcp
, &auth_string
);
7107 if ((granted
== PERM_DENIED
) && auth_string
) {
7109 * _PERMISSION_DENIED should not cause us
7110 * to exit at this point, because we still
7111 * want to generate an audit event.
7113 rc
= REP_PROTOCOL_SUCCESS
;
7119 if (rc
!= REP_PROTOCOL_SUCCESS
)
7122 if (granted
== PERM_DENIED
) {
7123 auth_status
= ADT_FAILURE
;
7124 auth_ret_value
= ADT_FAIL_VALUE_AUTH
;
7127 #endif /* NATIVE_BUILD */
7128 } else if (txp
->rnp_authorized
== RC_AUTH_FAILED
) {
7129 auth_status
= ADT_FAILURE
;
7130 auth_ret_value
= ADT_FAIL_VALUE_AUTH
;
7134 pg_fmri
= malloc(REP_PROTOCOL_FMRI_LEN
);
7135 if (pg_fmri
== NULL
) {
7136 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
7139 if ((rc
= rc_node_get_fmri_or_fragment(np
, pg_fmri
,
7140 REP_PROTOCOL_FMRI_LEN
, &sz_out
)) != REP_PROTOCOL_SUCCESS
) {
7145 * Parse the transaction commands into a useful form.
7147 if ((rc
= tx_commit_data_new(cmds
, cmds_sz
, &tx_data
)) !=
7148 REP_PROTOCOL_SUCCESS
) {
7153 /* Authorization failed. Generate audit events. */
7154 generate_property_events(tx_data
, pg_fmri
, auth_string
,
7155 auth_status
, auth_ret_value
);
7156 rc
= REP_PROTOCOL_FAIL_PERMISSION_DENIED
;
7160 nnp
= rc_node_alloc();
7162 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
7166 nnp
->rn_id
= np
->rn_id
; /* structure assignment */
7167 nnp
->rn_hash
= np
->rn_hash
;
7168 nnp
->rn_name
= strdup(np
->rn_name
);
7169 nnp
->rn_type
= strdup(np
->rn_type
);
7170 nnp
->rn_pgflags
= np
->rn_pgflags
;
7172 nnp
->rn_flags
= RC_NODE_IN_TX
| RC_NODE_USING_PARENT
;
7174 if (nnp
->rn_name
== NULL
|| nnp
->rn_type
== NULL
) {
7175 rc_node_destroy(nnp
);
7176 rc
= REP_PROTOCOL_FAIL_NO_RESOURCES
;
7180 (void) pthread_mutex_lock(&np
->rn_lock
);
7183 * We must have all of the old properties in the cache, or the
7184 * database deletions could cause inconsistencies.
7186 if ((rc
= rc_node_fill_children(np
, REP_PROTOCOL_ENTITY_PROPERTY
)) !=
7187 REP_PROTOCOL_SUCCESS
) {
7188 (void) pthread_mutex_unlock(&np
->rn_lock
);
7189 rc_node_destroy(nnp
);
7193 if (!rc_node_hold_flag(np
, RC_NODE_USING_PARENT
)) {
7194 (void) pthread_mutex_unlock(&np
->rn_lock
);
7195 rc_node_destroy(nnp
);
7196 rc
= REP_PROTOCOL_FAIL_DELETED
;
7200 if (np
->rn_flags
& RC_NODE_OLD
) {
7201 rc_node_rele_flag(np
, RC_NODE_USING_PARENT
);
7202 (void) pthread_mutex_unlock(&np
->rn_lock
);
7203 rc_node_destroy(nnp
);
7204 rc
= REP_PROTOCOL_FAIL_NOT_LATEST
;
7208 pp
= rc_node_hold_parent_flag(np
, RC_NODE_CHILDREN_CHANGING
);
7210 /* our parent is gone, we're going next... */
7211 rc_node_destroy(nnp
);
7212 (void) pthread_mutex_lock(&np
->rn_lock
);
7213 if (np
->rn_flags
& RC_NODE_OLD
) {
7214 (void) pthread_mutex_unlock(&np
->rn_lock
);
7215 rc
= REP_PROTOCOL_FAIL_NOT_LATEST
;
7218 (void) pthread_mutex_unlock(&np
->rn_lock
);
7219 rc
= REP_PROTOCOL_FAIL_DELETED
;
7222 (void) pthread_mutex_unlock(&pp
->rn_lock
);
7225 * prepare for the transaction
7227 (void) pthread_mutex_lock(&np
->rn_lock
);
7228 if (!rc_node_hold_flag(np
, RC_NODE_IN_TX
)) {
7229 (void) pthread_mutex_unlock(&np
->rn_lock
);
7230 (void) pthread_mutex_lock(&pp
->rn_lock
);
7231 rc_node_rele_flag(pp
, RC_NODE_CHILDREN_CHANGING
);
7232 (void) pthread_mutex_unlock(&pp
->rn_lock
);
7233 rc_node_destroy(nnp
);
7234 rc
= REP_PROTOCOL_FAIL_DELETED
;
7237 nnp
->rn_gen_id
= np
->rn_gen_id
;
7238 (void) pthread_mutex_unlock(&np
->rn_lock
);
7240 /* Sets nnp->rn_gen_id on success. */
7241 rc
= object_tx_commit(&np
->rn_id
, tx_data
, &nnp
->rn_gen_id
);
7243 (void) pthread_mutex_lock(&np
->rn_lock
);
7244 if (rc
!= REP_PROTOCOL_SUCCESS
) {
7245 rc_node_rele_flag(np
, RC_NODE_IN_TX
);
7246 (void) pthread_mutex_unlock(&np
->rn_lock
);
7247 (void) pthread_mutex_lock(&pp
->rn_lock
);
7248 rc_node_rele_flag(pp
, RC_NODE_CHILDREN_CHANGING
);
7249 (void) pthread_mutex_unlock(&pp
->rn_lock
);
7250 rc_node_destroy(nnp
);
7251 rc_node_clear(txp
, 0);
7252 if (rc
== REP_PROTOCOL_DONE
)
7253 rc
= REP_PROTOCOL_SUCCESS
; /* successful empty tx */
7260 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
7261 while ((pnp
= uu_list_first(np
->rn_pg_notify_list
)) != NULL
)
7262 rc_pg_notify_fire(pnp
);
7263 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7265 np
->rn_flags
|= RC_NODE_OLD
;
7266 (void) pthread_mutex_unlock(&np
->rn_lock
);
7268 rc_notify_remove_node(np
);
7271 * replace np with nnp
7273 rc_node_relink_child(pp
, np
, nnp
);
7276 * all done -- clear the transaction.
7278 rc_node_clear(txp
, 0);
7279 generate_property_events(tx_data
, pg_fmri
, auth_string
,
7280 auth_status
, auth_ret_value
);
7282 rc
= REP_PROTOCOL_SUCCESS
;
7287 tx_commit_data_free(tx_data
);
7292 rc_pg_notify_init(rc_node_pg_notify_t
*pnp
)
7294 uu_list_node_init(pnp
, &pnp
->rnpn_node
, rc_pg_notify_pool
);
7295 pnp
->rnpn_pg
= NULL
;
7300 rc_pg_notify_setup(rc_node_pg_notify_t
*pnp
, rc_node_ptr_t
*npp
, int fd
)
7304 RC_NODE_PTR_GET_CHECK_AND_LOCK(np
, npp
);
7306 if (np
->rn_id
.rl_type
!= REP_PROTOCOL_ENTITY_PROPERTYGRP
) {
7307 (void) pthread_mutex_unlock(&np
->rn_lock
);
7308 return (REP_PROTOCOL_FAIL_BAD_REQUEST
);
7312 * wait for any transaction in progress to complete
7314 if (!rc_node_wait_flag(np
, RC_NODE_IN_TX
)) {
7315 (void) pthread_mutex_unlock(&np
->rn_lock
);
7316 return (REP_PROTOCOL_FAIL_DELETED
);
7319 if (np
->rn_flags
& RC_NODE_OLD
) {
7320 (void) pthread_mutex_unlock(&np
->rn_lock
);
7321 return (REP_PROTOCOL_FAIL_NOT_LATEST
);
7324 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
7325 rc_pg_notify_fire(pnp
);
7328 (void) uu_list_insert_after(np
->rn_pg_notify_list
, NULL
, pnp
);
7329 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7331 (void) pthread_mutex_unlock(&np
->rn_lock
);
7332 return (REP_PROTOCOL_SUCCESS
);
7336 rc_pg_notify_fini(rc_node_pg_notify_t
*pnp
)
7338 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
7339 rc_pg_notify_fire(pnp
);
7340 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7342 uu_list_node_fini(pnp
, &pnp
->rnpn_node
, rc_pg_notify_pool
);
7346 rc_notify_info_init(rc_notify_info_t
*rnip
)
7350 uu_list_node_init(rnip
, &rnip
->rni_list_node
, rc_notify_info_pool
);
7351 uu_list_node_init(&rnip
->rni_notify
, &rnip
->rni_notify
.rcn_list_node
,
7354 rnip
->rni_notify
.rcn_node
= NULL
;
7355 rnip
->rni_notify
.rcn_info
= rnip
;
7357 bzero(rnip
->rni_namelist
, sizeof (rnip
->rni_namelist
));
7358 bzero(rnip
->rni_typelist
, sizeof (rnip
->rni_typelist
));
7360 (void) pthread_cond_init(&rnip
->rni_cv
, NULL
);
7362 for (i
= 0; i
< RC_NOTIFY_MAX_NAMES
; i
++) {
7363 rnip
->rni_namelist
[i
] = NULL
;
7364 rnip
->rni_typelist
[i
] = NULL
;
7369 rc_notify_info_insert_locked(rc_notify_info_t
*rnip
)
7371 assert(MUTEX_HELD(&rc_pg_notify_lock
));
7373 assert(!(rnip
->rni_flags
& RC_NOTIFY_ACTIVE
));
7375 rnip
->rni_flags
|= RC_NOTIFY_ACTIVE
;
7376 (void) uu_list_insert_after(rc_notify_info_list
, NULL
, rnip
);
7377 (void) uu_list_insert_before(rc_notify_list
, NULL
, &rnip
->rni_notify
);
7381 rc_notify_info_remove_locked(rc_notify_info_t
*rnip
)
7383 rc_notify_t
*me
= &rnip
->rni_notify
;
7386 assert(MUTEX_HELD(&rc_pg_notify_lock
));
7388 assert(rnip
->rni_flags
& RC_NOTIFY_ACTIVE
);
7390 assert(!(rnip
->rni_flags
& RC_NOTIFY_DRAIN
));
7391 rnip
->rni_flags
|= RC_NOTIFY_DRAIN
;
7392 (void) pthread_cond_broadcast(&rnip
->rni_cv
);
7394 (void) uu_list_remove(rc_notify_info_list
, rnip
);
7397 * clean up any notifications at the beginning of the list
7399 if (uu_list_first(rc_notify_list
) == me
) {
7401 * We can't call rc_notify_remove_locked() unless
7402 * rc_notify_in_use is 0.
7404 while (rc_notify_in_use
) {
7405 (void) pthread_cond_wait(&rc_pg_notify_cv
,
7406 &rc_pg_notify_lock
);
7408 while ((np
= uu_list_next(rc_notify_list
, me
)) != NULL
&&
7409 np
->rcn_info
== NULL
)
7410 rc_notify_remove_locked(np
);
7412 (void) uu_list_remove(rc_notify_list
, me
);
7414 while (rnip
->rni_waiters
) {
7415 (void) pthread_cond_broadcast(&rc_pg_notify_cv
);
7416 (void) pthread_cond_broadcast(&rnip
->rni_cv
);
7417 (void) pthread_cond_wait(&rnip
->rni_cv
, &rc_pg_notify_lock
);
7420 rnip
->rni_flags
&= ~(RC_NOTIFY_DRAIN
| RC_NOTIFY_ACTIVE
);
7424 rc_notify_info_add_watch(rc_notify_info_t
*rnip
, const char **arr
,
7431 rc
= rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP
, name
);
7432 if (rc
!= REP_PROTOCOL_SUCCESS
)
7437 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
7439 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
7441 while (rnip
->rni_flags
& RC_NOTIFY_EMPTYING
)
7442 (void) pthread_cond_wait(&rnip
->rni_cv
, &rc_pg_notify_lock
);
7444 for (i
= 0; i
< RC_NOTIFY_MAX_NAMES
; i
++) {
7449 * Don't add name if it's already being tracked.
7451 if (strcmp(arr
[i
], f
) == 0) {
7457 if (i
== RC_NOTIFY_MAX_NAMES
) {
7458 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7460 return (REP_PROTOCOL_FAIL_NO_RESOURCES
);
7466 if (!(rnip
->rni_flags
& RC_NOTIFY_ACTIVE
))
7467 rc_notify_info_insert_locked(rnip
);
7469 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7470 return (REP_PROTOCOL_SUCCESS
);
7474 rc_notify_info_add_name(rc_notify_info_t
*rnip
, const char *name
)
7476 return (rc_notify_info_add_watch(rnip
, rnip
->rni_namelist
, name
));
7480 rc_notify_info_add_type(rc_notify_info_t
*rnip
, const char *type
)
7482 return (rc_notify_info_add_watch(rnip
, rnip
->rni_typelist
, type
));
7486 * Wait for and report an event of interest to rnip, a notification client
7489 rc_notify_info_wait(rc_notify_info_t
*rnip
, rc_node_ptr_t
*out
,
7490 char *outp
, size_t sz
)
7493 rc_notify_t
*me
= &rnip
->rni_notify
;
7495 rc_notify_delete_t
*ndp
;
7502 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
7504 while ((rnip
->rni_flags
& (RC_NOTIFY_ACTIVE
| RC_NOTIFY_DRAIN
)) ==
7507 * If I'm first on the notify list, it is my job to
7508 * clean up any notifications I pass by. I can't do that
7509 * if someone is blocking the list from removals, so I
7510 * have to wait until they have all drained.
7512 am_first_info
= (uu_list_first(rc_notify_list
) == me
);
7513 if (am_first_info
&& rc_notify_in_use
) {
7514 rnip
->rni_waiters
++;
7515 (void) pthread_cond_wait(&rc_pg_notify_cv
,
7516 &rc_pg_notify_lock
);
7517 rnip
->rni_waiters
--;
7522 * Search the list for a node of interest.
7524 np
= uu_list_next(rc_notify_list
, me
);
7525 while (np
!= NULL
&& !rc_notify_info_interested(rnip
, np
)) {
7526 rc_notify_t
*next
= uu_list_next(rc_notify_list
, np
);
7528 if (am_first_info
) {
7531 * Passing another client -- stop
7532 * cleaning up notifications
7536 rc_notify_remove_locked(np
);
7543 * Nothing of interest -- wait for notification
7546 rnip
->rni_waiters
++;
7547 (void) pthread_cond_wait(&rnip
->rni_cv
,
7548 &rc_pg_notify_lock
);
7549 rnip
->rni_waiters
--;
7554 * found something to report -- move myself after the
7555 * notification and process it.
7557 (void) uu_list_remove(rc_notify_list
, me
);
7558 (void) uu_list_insert_after(rc_notify_list
, np
, me
);
7560 if ((ndp
= np
->rcn_delete
) != NULL
) {
7561 (void) strlcpy(outp
, ndp
->rnd_fmri
, sz
);
7563 rc_notify_remove_locked(np
);
7564 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7565 rc_node_clear(out
, 0);
7566 return (REP_PROTOCOL_SUCCESS
);
7570 assert(nnp
!= NULL
);
7573 * We can't bump nnp's reference count without grabbing its
7574 * lock, and rc_pg_notify_lock is a leaf lock. So we
7575 * temporarily block all removals to keep nnp from
7579 assert(rc_notify_in_use
> 0);
7580 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7582 rc_node_assign(out
, nnp
);
7584 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
7585 assert(rc_notify_in_use
> 0);
7588 if (am_first_info
) {
7590 * While we had the lock dropped, another thread
7591 * may have also incremented rc_notify_in_use. We
7592 * need to make sure that we're back to 0 before
7593 * removing the node.
7595 while (rc_notify_in_use
) {
7596 (void) pthread_cond_wait(&rc_pg_notify_cv
,
7597 &rc_pg_notify_lock
);
7599 rc_notify_remove_locked(np
);
7601 if (rc_notify_in_use
== 0)
7602 (void) pthread_cond_broadcast(&rc_pg_notify_cv
);
7603 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7605 return (REP_PROTOCOL_SUCCESS
);
7608 * If we're the last one out, let people know it's clear.
7610 if (rnip
->rni_waiters
== 0)
7611 (void) pthread_cond_broadcast(&rnip
->rni_cv
);
7612 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7613 return (REP_PROTOCOL_DONE
);
7617 rc_notify_info_reset(rc_notify_info_t
*rnip
)
7621 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
7622 if (rnip
->rni_flags
& RC_NOTIFY_ACTIVE
)
7623 rc_notify_info_remove_locked(rnip
);
7624 assert(!(rnip
->rni_flags
& (RC_NOTIFY_DRAIN
| RC_NOTIFY_EMPTYING
)));
7625 rnip
->rni_flags
|= RC_NOTIFY_EMPTYING
;
7626 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7628 for (i
= 0; i
< RC_NOTIFY_MAX_NAMES
; i
++) {
7629 if (rnip
->rni_namelist
[i
] != NULL
) {
7630 free((void *)rnip
->rni_namelist
[i
]);
7631 rnip
->rni_namelist
[i
] = NULL
;
7633 if (rnip
->rni_typelist
[i
] != NULL
) {
7634 free((void *)rnip
->rni_typelist
[i
]);
7635 rnip
->rni_typelist
[i
] = NULL
;
7639 (void) pthread_mutex_lock(&rc_pg_notify_lock
);
7640 rnip
->rni_flags
&= ~RC_NOTIFY_EMPTYING
;
7641 (void) pthread_mutex_unlock(&rc_pg_notify_lock
);
7645 rc_notify_info_fini(rc_notify_info_t
*rnip
)
7647 rc_notify_info_reset(rnip
);
7649 uu_list_node_fini(rnip
, &rnip
->rni_list_node
, rc_notify_info_pool
);
7650 uu_list_node_fini(&rnip
->rni_notify
, &rnip
->rni_notify
.rcn_list_node
,