base: rename heim_base_atomic_{max,type} to ...integer_{max,type}
[heimdal.git] / lib / base / heimbase.c
blobc33d909e0d9cadf83d7954591ee720093102d83a
1 /*
2 * Copyright (c) 2010 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
4 * All rights reserved.
6 * Portions Copyright (c) 2010 Apple Inc. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the Institute nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 #include "baselocl.h"
37 #include <syslog.h>
39 static heim_base_atomic_integer_type tidglobal = HEIM_TID_USER;
41 struct heim_base {
42 heim_type_t isa;
43 heim_base_atomic_integer_type ref_cnt;
44 HEIM_TAILQ_ENTRY(heim_base) autorel;
45 heim_auto_release_t autorelpool;
46 uintptr_t isaextra[3];
49 /* specialized version of base */
50 struct heim_base_mem {
51 heim_type_t isa;
52 heim_base_atomic_integer_type ref_cnt;
53 HEIM_TAILQ_ENTRY(heim_base) autorel;
54 heim_auto_release_t autorelpool;
55 const char *name;
56 void (*dealloc)(void *);
57 uintptr_t isaextra[1];
60 #define PTR2BASE(ptr) (((struct heim_base *)ptr) - 1)
61 #define BASE2PTR(ptr) ((void *)(((struct heim_base *)ptr) + 1))
63 #ifdef HEIM_BASE_NEED_ATOMIC_MUTEX
64 HEIMDAL_MUTEX _heim_base_mutex = HEIMDAL_MUTEX_INITIALIZER;
65 #endif
68 * Auto release structure
71 struct heim_auto_release {
72 HEIM_TAILQ_HEAD(, heim_base) pool;
73 HEIMDAL_MUTEX pool_mutex;
74 struct heim_auto_release *parent;
78 /**
79 * Retain object (i.e., take a reference)
81 * @param object to be released, NULL is ok
83 * @return the same object as passed in
86 void *
87 heim_retain(void *ptr)
89 struct heim_base *p = PTR2BASE(ptr);
91 if (ptr == NULL || heim_base_is_tagged(ptr))
92 return ptr;
94 if (p->ref_cnt == heim_base_atomic_integer_max)
95 return ptr;
97 if ((heim_base_atomic_inc(&p->ref_cnt) - 1) == 0)
98 heim_abort("resurection");
99 return ptr;
103 * Release object, free if reference count reaches zero
105 * @param object to be released
108 void
109 heim_release(void *ptr)
111 heim_base_atomic_integer_type old;
112 struct heim_base *p = PTR2BASE(ptr);
114 if (ptr == NULL || heim_base_is_tagged(ptr))
115 return;
117 if (p->ref_cnt == heim_base_atomic_integer_max)
118 return;
120 old = heim_base_atomic_dec(&p->ref_cnt) + 1;
122 if (old > 1)
123 return;
125 if (old == 1) {
126 heim_auto_release_t ar = p->autorelpool;
127 /* remove from autorel pool list */
128 if (ar) {
129 p->autorelpool = NULL;
130 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
131 HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
132 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
134 if (p->isa->dealloc)
135 p->isa->dealloc(ptr);
136 free(p);
137 } else
138 heim_abort("over release");
142 * If used require wrapped in autorelease pool
145 heim_string_t
146 heim_description(heim_object_t ptr)
148 struct heim_base *p = PTR2BASE(ptr);
149 if (p->isa->desc == NULL)
150 return heim_auto_release(heim_string_ref_create(p->isa->name, NULL));
151 return heim_auto_release(p->isa->desc(ptr));
155 void
156 _heim_make_permanent(heim_object_t ptr)
158 struct heim_base *p = PTR2BASE(ptr);
159 p->ref_cnt = heim_base_atomic_integer_max;
163 static heim_type_t tagged_isa[9] = {
164 &_heim_number_object,
165 &_heim_null_object,
166 &_heim_bool_object,
168 NULL,
169 NULL,
170 NULL,
172 NULL,
173 NULL,
174 NULL
177 heim_type_t
178 _heim_get_isa(heim_object_t ptr)
180 struct heim_base *p;
181 if (heim_base_is_tagged(ptr)) {
182 if (heim_base_is_tagged_object(ptr))
183 return tagged_isa[heim_base_tagged_object_tid(ptr)];
184 heim_abort("not a supported tagged type");
186 p = PTR2BASE(ptr);
187 return p->isa;
191 * Get type ID of object
193 * @param object object to get type id of
195 * @return type id of object
198 heim_tid_t
199 heim_get_tid(heim_object_t ptr)
201 heim_type_t isa = _heim_get_isa(ptr);
202 return isa->tid;
206 * Get hash value of object
208 * @param object object to get hash value for
210 * @return a hash value
213 unsigned long
214 heim_get_hash(heim_object_t ptr)
216 heim_type_t isa = _heim_get_isa(ptr);
217 if (isa->hash)
218 return isa->hash(ptr);
219 return (unsigned long)ptr;
223 * Compare two objects, returns 0 if equal, can use used for qsort()
224 * and friends.
226 * @param a first object to compare
227 * @param b first object to compare
229 * @return 0 if objects are equal
233 heim_cmp(heim_object_t a, heim_object_t b)
235 heim_tid_t ta, tb;
236 heim_type_t isa;
238 ta = heim_get_tid(a);
239 tb = heim_get_tid(b);
241 if (ta != tb)
242 return ta - tb;
244 isa = _heim_get_isa(a);
246 if (isa->cmp)
247 return isa->cmp(a, b);
249 return (uintptr_t)a - (uintptr_t)b;
253 * Private - allocates an memory object
256 static void
257 memory_dealloc(void *ptr)
259 struct heim_base_mem *p = (struct heim_base_mem *)PTR2BASE(ptr);
260 if (p->dealloc)
261 p->dealloc(ptr);
264 struct heim_type_data memory_object = {
265 HEIM_TID_MEMORY,
266 "memory-object",
267 NULL,
268 memory_dealloc,
269 NULL,
270 NULL,
271 NULL,
272 NULL
276 * Allocate memory for an object of anonymous type
278 * @param size size of object to be allocated
279 * @param name name of ad-hoc type
280 * @param dealloc destructor function
282 * Objects allocated with this interface do not serialize.
284 * @return allocated object
287 void *
288 heim_alloc(size_t size, const char *name, heim_type_dealloc dealloc)
290 /* XXX use posix_memalign */
292 struct heim_base_mem *p = calloc(1, size + sizeof(*p));
293 if (p == NULL)
294 return NULL;
295 p->isa = &memory_object;
296 p->ref_cnt = 1;
297 p->name = name;
298 p->dealloc = dealloc;
299 return BASE2PTR(p);
302 heim_type_t
303 _heim_create_type(const char *name,
304 heim_type_init init,
305 heim_type_dealloc dealloc,
306 heim_type_copy copy,
307 heim_type_cmp cmp,
308 heim_type_hash hash,
309 heim_type_description desc)
311 heim_type_t type;
313 type = calloc(1, sizeof(*type));
314 if (type == NULL)
315 return NULL;
317 type->tid = heim_base_atomic_inc(&tidglobal);
318 type->name = name;
319 type->init = init;
320 type->dealloc = dealloc;
321 type->copy = copy;
322 type->cmp = cmp;
323 type->hash = hash;
324 type->desc = desc;
326 return type;
329 heim_object_t
330 _heim_alloc_object(heim_type_t type, size_t size)
332 /* XXX should use posix_memalign */
333 struct heim_base *p = calloc(1, size + sizeof(*p));
334 if (p == NULL)
335 return NULL;
336 p->isa = type;
337 p->ref_cnt = 1;
339 return BASE2PTR(p);
342 void *
343 _heim_get_isaextra(heim_object_t ptr, size_t idx)
345 struct heim_base *p = (struct heim_base *)PTR2BASE(ptr);
347 heim_assert(ptr != NULL, "internal error");
348 if (p->isa == &memory_object)
349 return NULL;
350 heim_assert(idx < 3, "invalid private heim_base extra data index");
351 return &p->isaextra[idx];
354 heim_tid_t
355 _heim_type_get_tid(heim_type_t type)
357 return type->tid;
360 #if !defined(WIN32) && !defined(HAVE_DISPATCH_DISPATCH_H) && defined(ENABLE_PTHREAD_SUPPORT)
361 static pthread_once_t once_arg_key_once = PTHREAD_ONCE_INIT;
362 static pthread_key_t once_arg_key;
364 static void
365 once_arg_key_once_init(void)
367 errno = pthread_key_create(&once_arg_key, NULL);
368 if (errno != 0) {
369 fprintf(stderr,
370 "Error: pthread_key_create() failed, cannot continue: %s\n",
371 strerror(errno));
372 abort();
376 struct once_callback {
377 void (*fn)(void *);
378 void *data;
381 static void
382 once_callback_caller(void)
384 struct once_callback *once_callback = pthread_getspecific(once_arg_key);
386 if (once_callback == NULL) {
387 fprintf(stderr, "Error: pthread_once() calls callback on "
388 "different thread?! Cannot continue.\n");
389 abort();
391 once_callback->fn(once_callback->data);
393 #endif
396 * Call func once and only once
398 * @param once pointer to a heim_base_once_t
399 * @param ctx context passed to func
400 * @param func function to be called
403 void
404 heim_base_once_f(heim_base_once_t *once, void *ctx, void (*func)(void *))
406 #if defined(WIN32)
408 * With a libroken wrapper for some CAS function and a libroken yield()
409 * wrapper we could make this the default implementation when we have
410 * neither Grand Central nor POSX threads.
412 * We could also adapt the double-checked lock pattern with CAS
413 * providing the necessary memory barriers in the absence of
414 * portable explicit memory barrier APIs.
417 * We use CAS operations in large part to provide implied memory
418 * barriers.
420 * State 0 means that func() has never executed.
421 * State 1 means that func() is executing.
422 * State 2 means that func() has completed execution.
424 if (InterlockedCompareExchange(once, 1L, 0L) == 0L) {
425 /* State is now 1 */
426 (*func)(ctx);
427 (void)InterlockedExchange(once, 2L);
428 /* State is now 2 */
429 } else {
431 * The InterlockedCompareExchange is being used to fetch
432 * the current state under a full memory barrier. As long
433 * as the current state is 1 continue to spin.
435 while (InterlockedCompareExchange(once, 2L, 0L) == 1L)
436 SwitchToThread();
438 #elif defined(HAVE_DISPATCH_DISPATCH_H)
439 dispatch_once_f(once, ctx, func);
440 #elif defined(ENABLE_PTHREAD_SUPPORT)
441 struct once_callback once_callback;
443 once_callback.fn = func;
444 once_callback.data = ctx;
446 errno = pthread_once(&once_arg_key_once, once_arg_key_once_init);
447 if (errno != 0) {
448 fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
449 strerror(errno));
450 abort();
452 errno = pthread_setspecific(once_arg_key, &once_callback);
453 if (errno != 0) {
454 fprintf(stderr,
455 "Error: pthread_setspecific() failed, cannot continue: %s\n",
456 strerror(errno));
457 abort();
459 errno = pthread_once(once, once_callback_caller);
460 if (errno != 0) {
461 fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
462 strerror(errno));
463 abort();
465 #else
466 static HEIMDAL_MUTEX mutex = HEIMDAL_MUTEX_INITIALIZER;
467 HEIMDAL_MUTEX_lock(&mutex);
468 if (*once == 0) {
469 *once = 1;
470 HEIMDAL_MUTEX_unlock(&mutex);
471 func(ctx);
472 HEIMDAL_MUTEX_lock(&mutex);
473 *once = 2;
474 HEIMDAL_MUTEX_unlock(&mutex);
475 } else if (*once == 2) {
476 HEIMDAL_MUTEX_unlock(&mutex);
477 } else {
478 HEIMDAL_MUTEX_unlock(&mutex);
479 while (1) {
480 struct timeval tv = { 0, 1000 };
481 select(0, NULL, NULL, NULL, &tv);
482 HEIMDAL_MUTEX_lock(&mutex);
483 if (*once == 2)
484 break;
485 HEIMDAL_MUTEX_unlock(&mutex);
487 HEIMDAL_MUTEX_unlock(&mutex);
489 #endif
493 * Abort and log the failure (using syslog)
496 void
497 heim_abort(const char *fmt, ...)
499 va_list ap;
500 va_start(ap, fmt);
501 heim_abortv(fmt, ap);
502 va_end(ap);
506 * Abort and log the failure (using syslog)
509 void
510 heim_abortv(const char *fmt, va_list ap)
512 static char str[1024];
514 vsnprintf(str, sizeof(str), fmt, ap);
515 syslog(LOG_ERR, "heim_abort: %s", str);
516 abort();
523 static int ar_created = 0;
524 static HEIMDAL_thread_key ar_key;
526 struct ar_tls {
527 struct heim_auto_release *head;
528 struct heim_auto_release *current;
529 HEIMDAL_MUTEX tls_mutex;
532 static void
533 ar_tls_delete(void *ptr)
535 struct ar_tls *tls = ptr;
536 heim_auto_release_t next = NULL;
538 if (tls == NULL)
539 return;
540 for (; tls->current != NULL; tls->current = next) {
541 next = tls->current->parent;
542 heim_release(tls->current);
544 free(tls);
547 static void
548 init_ar_tls(void *ptr)
550 int ret;
551 HEIMDAL_key_create(&ar_key, ar_tls_delete, ret);
552 if (ret == 0)
553 ar_created = 1;
556 static struct ar_tls *
557 autorel_tls(void)
559 static heim_base_once_t once = HEIM_BASE_ONCE_INIT;
560 struct ar_tls *arp;
561 int ret;
563 heim_base_once_f(&once, NULL, init_ar_tls);
564 if (!ar_created)
565 return NULL;
567 arp = HEIMDAL_getspecific(ar_key);
568 if (arp == NULL) {
570 arp = calloc(1, sizeof(*arp));
571 if (arp == NULL)
572 return NULL;
573 HEIMDAL_setspecific(ar_key, arp, ret);
574 if (ret) {
575 free(arp);
576 return NULL;
579 return arp;
583 static void
584 autorel_dealloc(void *ptr)
586 heim_auto_release_t ar = ptr;
587 struct ar_tls *tls;
589 tls = autorel_tls();
590 if (tls == NULL)
591 heim_abort("autorelease pool released on thread w/o autorelease inited");
593 heim_auto_release_drain(ar);
595 if (!HEIM_TAILQ_EMPTY(&ar->pool))
596 heim_abort("pool not empty after draining");
598 HEIMDAL_MUTEX_lock(&tls->tls_mutex);
599 if (tls->current != ptr)
600 heim_abort("autorelease not releaseing top pool");
602 tls->current = ar->parent;
603 HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
606 static int
607 autorel_cmp(void *a, void *b)
609 return (a == b);
612 static unsigned long
613 autorel_hash(void *ptr)
615 return (unsigned long)ptr;
619 static struct heim_type_data _heim_autorel_object = {
620 HEIM_TID_AUTORELEASE,
621 "autorelease-pool",
622 NULL,
623 autorel_dealloc,
624 NULL,
625 autorel_cmp,
626 autorel_hash,
627 NULL
631 * Create thread-specific object auto-release pool
633 * Objects placed on the per-thread auto-release pool (with
634 * heim_auto_release()) can be released in one fell swoop by calling
635 * heim_auto_release_drain().
638 heim_auto_release_t
639 heim_auto_release_create(void)
641 struct ar_tls *tls = autorel_tls();
642 heim_auto_release_t ar;
644 if (tls == NULL)
645 heim_abort("Failed to create/get autorelease head");
647 ar = _heim_alloc_object(&_heim_autorel_object, sizeof(struct heim_auto_release));
648 if (ar) {
649 HEIMDAL_MUTEX_lock(&tls->tls_mutex);
650 if (tls->head == NULL)
651 tls->head = ar;
652 ar->parent = tls->current;
653 tls->current = ar;
654 HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
657 return ar;
661 * Place the current object on the thread's auto-release pool
663 * @param ptr object
666 heim_object_t
667 heim_auto_release(heim_object_t ptr)
669 struct heim_base *p = PTR2BASE(ptr);
670 struct ar_tls *tls = autorel_tls();
671 heim_auto_release_t ar;
673 if (ptr == NULL || heim_base_is_tagged(ptr))
674 return ptr;
676 /* drop from old pool */
677 if ((ar = p->autorelpool) != NULL) {
678 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
679 HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
680 p->autorelpool = NULL;
681 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
684 if (tls == NULL || (ar = tls->current) == NULL)
685 heim_abort("no auto relase pool in place, would leak");
687 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
688 HEIM_TAILQ_INSERT_HEAD(&ar->pool, p, autorel);
689 p->autorelpool = ar;
690 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
692 return ptr;
696 * Release all objects on the given auto-release pool
699 void
700 heim_auto_release_drain(heim_auto_release_t autorel)
702 heim_object_t obj;
704 /* release all elements on the tail queue */
706 HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
707 while(!HEIM_TAILQ_EMPTY(&autorel->pool)) {
708 obj = HEIM_TAILQ_FIRST(&autorel->pool);
709 HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
710 heim_release(BASE2PTR(obj));
711 HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
713 HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
717 * Helper for heim_path_vget() and heim_path_delete(). On success
718 * outputs the node named by the path and the parent node and key
719 * (useful for heim_path_delete()).
722 static heim_object_t
723 heim_path_vget2(heim_object_t ptr, heim_object_t *parent, heim_object_t *key,
724 heim_error_t *error, va_list ap)
726 heim_object_t path_element;
727 heim_object_t node, next_node;
728 heim_tid_t node_type;
730 *parent = NULL;
731 *key = NULL;
732 if (ptr == NULL)
733 return NULL;
735 for (node = ptr; node != NULL; ) {
736 path_element = va_arg(ap, heim_object_t);
737 if (path_element == NULL) {
738 *parent = node;
739 *key = path_element;
740 return node;
743 node_type = heim_get_tid(node);
744 switch (node_type) {
745 case HEIM_TID_ARRAY:
746 case HEIM_TID_DICT:
747 case HEIM_TID_DB:
748 break;
749 default:
750 if (node == ptr)
751 heim_abort("heim_path_get() only operates on container types");
752 return NULL;
755 if (node_type == HEIM_TID_DICT) {
756 next_node = heim_dict_get_value(node, path_element);
757 } else if (node_type == HEIM_TID_DB) {
758 next_node = _heim_db_get_value(node, NULL, path_element, NULL);
759 } else if (node_type == HEIM_TID_ARRAY) {
760 int idx = -1;
762 if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
763 idx = heim_number_get_int(path_element);
764 if (idx < 0) {
765 if (error)
766 *error = heim_error_create(EINVAL,
767 "heim_path_get() path elements "
768 "for array nodes must be "
769 "numeric and positive");
770 return NULL;
772 next_node = heim_array_get_value(node, idx);
773 } else {
774 if (error)
775 *error = heim_error_create(EINVAL,
776 "heim_path_get() node in path "
777 "not a container type");
778 return NULL;
780 node = next_node;
782 return NULL;
786 * Get a node in a heim_object tree by path
788 * @param ptr tree
789 * @param error error (output)
790 * @param ap NULL-terminated va_list of heim_object_ts that form a path
792 * @return object (not retained) if found
794 * @addtogroup heimbase
797 heim_object_t
798 heim_path_vget(heim_object_t ptr, heim_error_t *error, va_list ap)
800 heim_object_t p, k;
802 return heim_path_vget2(ptr, &p, &k, error, ap);
806 * Get a node in a tree by path, with retained reference
808 * @param ptr tree
809 * @param error error (output)
810 * @param ap NULL-terminated va_list of heim_object_ts that form a path
812 * @return retained object if found
814 * @addtogroup heimbase
817 heim_object_t
818 heim_path_vcopy(heim_object_t ptr, heim_error_t *error, va_list ap)
820 heim_object_t p, k;
822 return heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
826 * Get a node in a tree by path
828 * @param ptr tree
829 * @param error error (output)
830 * @param ... NULL-terminated va_list of heim_object_ts that form a path
832 * @return object (not retained) if found
834 * @addtogroup heimbase
837 heim_object_t
838 heim_path_get(heim_object_t ptr, heim_error_t *error, ...)
840 heim_object_t o;
841 heim_object_t p, k;
842 va_list ap;
844 if (ptr == NULL)
845 return NULL;
847 va_start(ap, error);
848 o = heim_path_vget2(ptr, &p, &k, error, ap);
849 va_end(ap);
850 return o;
854 * Get a node in a tree by path, with retained reference
856 * @param ptr tree
857 * @param error error (output)
858 * @param ... NULL-terminated va_list of heim_object_ts that form a path
860 * @return retained object if found
862 * @addtogroup heimbase
865 heim_object_t
866 heim_path_copy(heim_object_t ptr, heim_error_t *error, ...)
868 heim_object_t o;
869 heim_object_t p, k;
870 va_list ap;
872 if (ptr == NULL)
873 return NULL;
875 va_start(ap, error);
876 o = heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
877 va_end(ap);
878 return o;
882 * Create a path in a heim_object_t tree
884 * @param ptr the tree
885 * @param size the size of the heim_dict_t nodes to be created
886 * @param leaf leaf node to be added, if any
887 * @param error error (output)
888 * @param ap NULL-terminated of path component objects
890 * Create a path of heim_dict_t interior nodes in a given heim_object_t
891 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
892 * then the leaf is not deleted).
894 * @return 0 on success, else a system error
896 * @addtogroup heimbase
900 heim_path_vcreate(heim_object_t ptr, size_t size, heim_object_t leaf,
901 heim_error_t *error, va_list ap)
903 heim_object_t path_element = va_arg(ap, heim_object_t);
904 heim_object_t next_path_element = NULL;
905 heim_object_t node = ptr;
906 heim_object_t next_node = NULL;
907 heim_tid_t node_type;
908 int ret = 0;
910 if (ptr == NULL)
911 heim_abort("heim_path_vcreate() does not create root nodes");
913 while (path_element != NULL) {
914 next_path_element = va_arg(ap, heim_object_t);
915 node_type = heim_get_tid(node);
917 if (node_type == HEIM_TID_DICT) {
918 next_node = heim_dict_get_value(node, path_element);
919 } else if (node_type == HEIM_TID_ARRAY) {
920 int idx = -1;
922 if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
923 idx = heim_number_get_int(path_element);
924 if (idx < 0) {
925 if (error)
926 *error = heim_error_create(EINVAL,
927 "heim_path() path elements for "
928 "array nodes must be numeric "
929 "and positive");
930 return EINVAL;
932 if (idx < heim_array_get_length(node))
933 next_node = heim_array_get_value(node, idx);
934 else
935 next_node = NULL;
936 } else if (node_type == HEIM_TID_DB && next_path_element != NULL) {
937 if (error)
938 *error = heim_error_create(EINVAL, "Interior node is a DB");
939 return EINVAL;
942 if (next_path_element == NULL)
943 break;
945 /* Create missing interior node */
946 if (next_node == NULL) {
947 next_node = heim_dict_create(size); /* no arrays or DBs, just dicts */
948 if (next_node == NULL) {
949 ret = ENOMEM;
950 goto err;
953 if (node_type == HEIM_TID_DICT) {
954 ret = heim_dict_set_value(node, path_element, next_node);
955 } else if (node_type == HEIM_TID_ARRAY &&
956 heim_number_get_int(path_element) <= heim_array_get_length(node)) {
957 ret = heim_array_insert_value(node,
958 heim_number_get_int(path_element),
959 next_node);
960 } else {
961 ret = EINVAL;
962 if (error)
963 *error = heim_error_create(ret, "Node in path not a "
964 "container");
966 heim_release(next_node);
967 if (ret)
968 goto err;
971 path_element = next_path_element;
972 node = next_node;
973 next_node = NULL;
976 if (path_element == NULL)
977 goto err;
979 /* Add the leaf */
980 if (leaf != NULL) {
981 if (node_type == HEIM_TID_DICT)
982 ret = heim_dict_set_value(node, path_element, leaf);
983 else
984 ret = heim_array_insert_value(node,
985 heim_number_get_int(path_element),
986 leaf);
988 return ret;
990 err:
991 if (error && !*error) {
992 if (ret == ENOMEM)
993 *error = heim_error_create_enomem();
994 else
995 *error = heim_error_create(ret, "Could not set "
996 "dict value");
998 return ret;
1002 * Create a path in a heim_object_t tree
1004 * @param ptr the tree
1005 * @param size the size of the heim_dict_t nodes to be created
1006 * @param leaf leaf node to be added, if any
1007 * @param error error (output)
1008 * @param ... NULL-terminated list of path component objects
1010 * Create a path of heim_dict_t interior nodes in a given heim_object_t
1011 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
1012 * then the leaf is not deleted).
1014 * @return 0 on success, else a system error
1016 * @addtogroup heimbase
1020 heim_path_create(heim_object_t ptr, size_t size, heim_object_t leaf,
1021 heim_error_t *error, ...)
1023 va_list ap;
1024 int ret;
1026 va_start(ap, error);
1027 ret = heim_path_vcreate(ptr, size, leaf, error, ap);
1028 va_end(ap);
1029 return ret;
1033 * Delete leaf node named by a path in a heim_object_t tree
1035 * @param ptr the tree
1036 * @param error error (output)
1037 * @param ap NULL-terminated list of path component objects
1039 * @addtogroup heimbase
1042 void
1043 heim_path_vdelete(heim_object_t ptr, heim_error_t *error, va_list ap)
1045 heim_object_t parent, key, child;
1047 child = heim_path_vget2(ptr, &parent, &key, error, ap);
1048 if (child != NULL) {
1049 if (heim_get_tid(parent) == HEIM_TID_DICT)
1050 heim_dict_delete_key(parent, key);
1051 else if (heim_get_tid(parent) == HEIM_TID_DB)
1052 heim_db_delete_key(parent, NULL, key, error);
1053 else if (heim_get_tid(parent) == HEIM_TID_ARRAY)
1054 heim_array_delete_value(parent, heim_number_get_int(key));
1055 heim_release(child);
1060 * Delete leaf node named by a path in a heim_object_t tree
1062 * @param ptr the tree
1063 * @param error error (output)
1064 * @param ap NULL-terminated list of path component objects
1066 * @addtogroup heimbase
1069 void
1070 heim_path_delete(heim_object_t ptr, heim_error_t *error, ...)
1072 va_list ap;
1074 va_start(ap, error);
1075 heim_path_vdelete(ptr, error, ap);
1076 va_end(ap);
1077 return;