s4:torture: Adapt KDC canon test to Heimdal upstream changes
[Samba.git] / source4 / heimdal / lib / base / heimbase.c
blob8aacdb9187d25f8ea811096a19ea8f84e0653d99
1 /*
2 * Copyright (c) 2010 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
4 * All rights reserved.
6 * Portions Copyright (c) 2010 Apple Inc. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the Institute nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 #include "baselocl.h"
37 #include <syslog.h>
39 static heim_base_atomic_integer_type tidglobal = HEIM_TID_USER;
41 struct heim_base {
42 heim_type_t isa;
43 heim_base_atomic_integer_type ref_cnt;
44 HEIM_TAILQ_ENTRY(heim_base) autorel;
45 heim_auto_release_t autorelpool;
46 uintptr_t isaextra[3];
49 /* specialized version of base */
50 struct heim_base_mem {
51 heim_type_t isa;
52 heim_base_atomic_integer_type ref_cnt;
53 HEIM_TAILQ_ENTRY(heim_base) autorel;
54 heim_auto_release_t autorelpool;
55 const char *name;
56 void (*dealloc)(void *);
57 uintptr_t isaextra[1];
60 #define PTR2BASE(ptr) (((struct heim_base *)ptr) - 1)
61 #define BASE2PTR(ptr) ((void *)(((struct heim_base *)ptr) + 1))
63 #ifdef HEIM_BASE_NEED_ATOMIC_MUTEX
64 HEIMDAL_MUTEX _heim_base_mutex = HEIMDAL_MUTEX_INITIALIZER;
65 #endif
68 * Auto release structure
71 struct heim_auto_release {
72 HEIM_TAILQ_HEAD(, heim_base) pool;
73 HEIMDAL_MUTEX pool_mutex;
74 struct heim_auto_release *parent;
78 /**
79 * Retain object (i.e., take a reference)
81 * @param object to be released, NULL is ok
83 * @return the same object as passed in
86 void *
87 heim_retain(void *ptr)
89 struct heim_base *p = NULL;
91 if (ptr == NULL || heim_base_is_tagged(ptr))
92 return ptr;
94 p = PTR2BASE(ptr);
96 if (heim_base_atomic_load(&p->ref_cnt) == heim_base_atomic_integer_max)
97 return ptr;
99 if ((heim_base_atomic_inc(&p->ref_cnt) - 1) == 0)
100 heim_abort("resurection");
101 return ptr;
105 * Release object, free if reference count reaches zero
107 * @param object to be released
110 void
111 heim_release(void *ptr)
113 heim_base_atomic_integer_type old;
114 struct heim_base *p = NULL;
116 if (ptr == NULL || heim_base_is_tagged(ptr))
117 return;
119 p = PTR2BASE(ptr);
121 if (heim_base_atomic_load(&p->ref_cnt) == heim_base_atomic_integer_max)
122 return;
124 old = heim_base_atomic_dec(&p->ref_cnt) + 1;
126 if (old > 1)
127 return;
129 if (old == 1) {
130 heim_auto_release_t ar = p->autorelpool;
131 /* remove from autorel pool list */
132 if (ar) {
133 p->autorelpool = NULL;
134 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
135 HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
136 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
138 if (p->isa->dealloc)
139 p->isa->dealloc(ptr);
140 free(p);
141 } else
142 heim_abort("over release");
146 * If used require wrapped in autorelease pool
149 heim_string_t
150 heim_description(heim_object_t ptr)
152 struct heim_base *p = PTR2BASE(ptr);
153 if (p->isa->desc == NULL)
154 return heim_auto_release(heim_string_ref_create(p->isa->name, NULL));
155 return heim_auto_release(p->isa->desc(ptr));
159 void
160 _heim_make_permanent(heim_object_t ptr)
162 struct heim_base *p = PTR2BASE(ptr);
163 heim_base_atomic_store(&p->ref_cnt, heim_base_atomic_integer_max);
167 static heim_type_t tagged_isa[9] = {
168 &_heim_number_object,
169 &_heim_null_object,
170 &_heim_bool_object,
172 NULL,
173 NULL,
174 NULL,
176 NULL,
177 NULL,
178 NULL
181 heim_type_t
182 _heim_get_isa(heim_object_t ptr)
184 struct heim_base *p;
185 if (heim_base_is_tagged(ptr)) {
186 if (heim_base_is_tagged_object(ptr))
187 return tagged_isa[heim_base_tagged_object_tid(ptr)];
188 heim_abort("not a supported tagged type");
190 p = PTR2BASE(ptr);
191 return p->isa;
195 * Get type ID of object
197 * @param object object to get type id of
199 * @return type id of object
202 heim_tid_t
203 heim_get_tid(heim_object_t ptr)
205 heim_type_t isa = _heim_get_isa(ptr);
206 return isa->tid;
210 * Get hash value of object
212 * @param object object to get hash value for
214 * @return a hash value
217 unsigned long
218 heim_get_hash(heim_object_t ptr)
220 heim_type_t isa = _heim_get_isa(ptr);
221 if (isa->hash)
222 return isa->hash(ptr);
223 return (unsigned long)ptr;
227 * Compare two objects, returns 0 if equal, can use used for qsort()
228 * and friends.
230 * @param a first object to compare
231 * @param b first object to compare
233 * @return 0 if objects are equal
237 heim_cmp(heim_object_t a, heim_object_t b)
239 heim_tid_t ta, tb;
240 heim_type_t isa;
242 ta = heim_get_tid(a);
243 tb = heim_get_tid(b);
245 if (ta != tb)
246 return ta - tb;
248 isa = _heim_get_isa(a);
250 if (isa->cmp)
251 return isa->cmp(a, b);
253 return (uintptr_t)a - (uintptr_t)b;
257 * Private - allocates an memory object
260 static void
261 memory_dealloc(void *ptr)
263 struct heim_base_mem *p = (struct heim_base_mem *)PTR2BASE(ptr);
264 if (p->dealloc)
265 p->dealloc(ptr);
268 struct heim_type_data memory_object = {
269 HEIM_TID_MEMORY,
270 "memory-object",
271 NULL,
272 memory_dealloc,
273 NULL,
274 NULL,
275 NULL,
276 NULL
280 * Allocate memory for an object of anonymous type
282 * @param size size of object to be allocated
283 * @param name name of ad-hoc type
284 * @param dealloc destructor function
286 * Objects allocated with this interface do not serialize.
288 * @return allocated object
291 void *
292 heim_alloc(size_t size, const char *name, heim_type_dealloc dealloc)
294 /* XXX use posix_memalign */
296 struct heim_base_mem *p = calloc(1, size + sizeof(*p));
297 if (p == NULL)
298 return NULL;
299 p->isa = &memory_object;
300 p->ref_cnt = 1;
301 p->name = name;
302 p->dealloc = dealloc;
303 return BASE2PTR(p);
306 heim_type_t
307 _heim_create_type(const char *name,
308 heim_type_init init,
309 heim_type_dealloc dealloc,
310 heim_type_copy copy,
311 heim_type_cmp cmp,
312 heim_type_hash hash,
313 heim_type_description desc)
315 heim_type_t type;
317 type = calloc(1, sizeof(*type));
318 if (type == NULL)
319 return NULL;
321 type->tid = heim_base_atomic_inc(&tidglobal);
322 type->name = name;
323 type->init = init;
324 type->dealloc = dealloc;
325 type->copy = copy;
326 type->cmp = cmp;
327 type->hash = hash;
328 type->desc = desc;
330 return type;
333 heim_object_t
334 _heim_alloc_object(heim_type_t type, size_t size)
336 /* XXX should use posix_memalign */
337 struct heim_base *p = calloc(1, size + sizeof(*p));
338 if (p == NULL)
339 return NULL;
340 p->isa = type;
341 p->ref_cnt = 1;
343 return BASE2PTR(p);
346 void *
347 _heim_get_isaextra(heim_object_t ptr, size_t idx)
349 struct heim_base *p = NULL;
351 heim_assert(ptr != NULL, "internal error");
352 p = (struct heim_base *)PTR2BASE(ptr);
353 if (p->isa == &memory_object)
354 return NULL;
355 heim_assert(idx < 3, "invalid private heim_base extra data index");
356 return &p->isaextra[idx];
359 heim_tid_t
360 _heim_type_get_tid(heim_type_t type)
362 return type->tid;
365 #if !defined(WIN32) && !defined(HAVE_DISPATCH_DISPATCH_H) && defined(ENABLE_PTHREAD_SUPPORT)
366 static pthread_once_t once_arg_key_once = PTHREAD_ONCE_INIT;
367 static pthread_key_t once_arg_key;
369 static void
370 once_arg_key_once_init(void)
372 errno = pthread_key_create(&once_arg_key, NULL);
373 if (errno != 0) {
374 fprintf(stderr,
375 "Error: pthread_key_create() failed, cannot continue: %s\n",
376 strerror(errno));
377 abort();
381 struct once_callback {
382 void (*fn)(void *);
383 void *data;
386 static void
387 once_callback_caller(void)
389 struct once_callback *once_callback = pthread_getspecific(once_arg_key);
391 if (once_callback == NULL) {
392 fprintf(stderr, "Error: pthread_once() calls callback on "
393 "different thread?! Cannot continue.\n");
394 abort();
396 once_callback->fn(once_callback->data);
398 #endif
401 * Call func once and only once
403 * @param once pointer to a heim_base_once_t
404 * @param ctx context passed to func
405 * @param func function to be called
408 void
409 heim_base_once_f(heim_base_once_t *once, void *ctx, void (*func)(void *))
411 #if defined(WIN32)
413 * With a libroken wrapper for some CAS function and a libroken yield()
414 * wrapper we could make this the default implementation when we have
415 * neither Grand Central nor POSX threads.
417 * We could also adapt the double-checked lock pattern with CAS
418 * providing the necessary memory barriers in the absence of
419 * portable explicit memory barrier APIs.
422 * We use CAS operations in large part to provide implied memory
423 * barriers.
425 * State 0 means that func() has never executed.
426 * State 1 means that func() is executing.
427 * State 2 means that func() has completed execution.
429 if (InterlockedCompareExchange(once, 1L, 0L) == 0L) {
430 /* State is now 1 */
431 (*func)(ctx);
432 (void)InterlockedExchange(once, 2L);
433 /* State is now 2 */
434 } else {
436 * The InterlockedCompareExchange is being used to fetch
437 * the current state under a full memory barrier. As long
438 * as the current state is 1 continue to spin.
440 while (InterlockedCompareExchange(once, 2L, 0L) == 1L)
441 SwitchToThread();
443 #elif defined(HAVE_DISPATCH_DISPATCH_H)
444 dispatch_once_f(once, ctx, func);
445 #elif defined(ENABLE_PTHREAD_SUPPORT)
446 struct once_callback once_callback;
448 once_callback.fn = func;
449 once_callback.data = ctx;
451 errno = pthread_once(&once_arg_key_once, once_arg_key_once_init);
452 if (errno != 0) {
453 fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
454 strerror(errno));
455 abort();
457 errno = pthread_setspecific(once_arg_key, &once_callback);
458 if (errno != 0) {
459 fprintf(stderr,
460 "Error: pthread_setspecific() failed, cannot continue: %s\n",
461 strerror(errno));
462 abort();
464 errno = pthread_once(once, once_callback_caller);
465 if (errno != 0) {
466 fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
467 strerror(errno));
468 abort();
470 #else
471 static HEIMDAL_MUTEX mutex = HEIMDAL_MUTEX_INITIALIZER;
472 HEIMDAL_MUTEX_lock(&mutex);
473 if (*once == 0) {
474 *once = 1;
475 HEIMDAL_MUTEX_unlock(&mutex);
476 func(ctx);
477 HEIMDAL_MUTEX_lock(&mutex);
478 *once = 2;
479 HEIMDAL_MUTEX_unlock(&mutex);
480 } else if (*once == 2) {
481 HEIMDAL_MUTEX_unlock(&mutex);
482 } else {
483 HEIMDAL_MUTEX_unlock(&mutex);
484 while (1) {
485 struct timeval tv = { 0, 1000 };
486 select(0, NULL, NULL, NULL, &tv);
487 HEIMDAL_MUTEX_lock(&mutex);
488 if (*once == 2)
489 break;
490 HEIMDAL_MUTEX_unlock(&mutex);
492 HEIMDAL_MUTEX_unlock(&mutex);
494 #endif
498 * Abort and log the failure (using syslog)
501 void
502 heim_abort(const char *fmt, ...)
504 va_list ap;
505 va_start(ap, fmt);
506 heim_abortv(fmt, ap);
507 va_end(ap);
511 * Abort and log the failure (using syslog)
514 void
515 heim_abortv(const char *fmt, va_list ap)
517 static char str[1024];
519 vsnprintf(str, sizeof(str), fmt, ap);
520 syslog(LOG_ERR, "heim_abort: %s", str);
521 abort();
528 static int ar_created = 0;
529 static HEIMDAL_thread_key ar_key;
531 struct ar_tls {
532 struct heim_auto_release *head;
533 struct heim_auto_release *current;
534 HEIMDAL_MUTEX tls_mutex;
537 static void
538 ar_tls_delete(void *ptr)
540 struct ar_tls *tls = ptr;
541 heim_auto_release_t next = NULL;
543 if (tls == NULL)
544 return;
545 for (; tls->current != NULL; tls->current = next) {
546 next = tls->current->parent;
547 heim_release(tls->current);
549 free(tls);
552 static void
553 init_ar_tls(void *ptr)
555 int ret;
556 HEIMDAL_key_create(&ar_key, ar_tls_delete, ret);
557 if (ret == 0)
558 ar_created = 1;
561 static struct ar_tls *
562 autorel_tls(void)
564 static heim_base_once_t once = HEIM_BASE_ONCE_INIT;
565 struct ar_tls *arp;
566 int ret;
568 heim_base_once_f(&once, NULL, init_ar_tls);
569 if (!ar_created)
570 return NULL;
572 arp = HEIMDAL_getspecific(ar_key);
573 if (arp == NULL) {
575 arp = calloc(1, sizeof(*arp));
576 if (arp == NULL)
577 return NULL;
578 HEIMDAL_setspecific(ar_key, arp, ret);
579 if (ret) {
580 free(arp);
581 return NULL;
584 return arp;
588 static void
589 autorel_dealloc(void *ptr)
591 heim_auto_release_t ar = ptr;
592 struct ar_tls *tls;
594 tls = autorel_tls();
595 if (tls == NULL)
596 heim_abort("autorelease pool released on thread w/o autorelease inited");
598 heim_auto_release_drain(ar);
600 if (!HEIM_TAILQ_EMPTY(&ar->pool))
601 heim_abort("pool not empty after draining");
603 HEIMDAL_MUTEX_lock(&tls->tls_mutex);
604 if (tls->current != ptr)
605 heim_abort("autorelease not releaseing top pool");
607 tls->current = ar->parent;
608 HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
611 static int
612 autorel_cmp(void *a, void *b)
614 return (a == b);
617 static unsigned long
618 autorel_hash(void *ptr)
620 return (unsigned long)ptr;
624 static struct heim_type_data _heim_autorel_object = {
625 HEIM_TID_AUTORELEASE,
626 "autorelease-pool",
627 NULL,
628 autorel_dealloc,
629 NULL,
630 autorel_cmp,
631 autorel_hash,
632 NULL
636 * Create thread-specific object auto-release pool
638 * Objects placed on the per-thread auto-release pool (with
639 * heim_auto_release()) can be released in one fell swoop by calling
640 * heim_auto_release_drain().
643 heim_auto_release_t
644 heim_auto_release_create(void)
646 struct ar_tls *tls = autorel_tls();
647 heim_auto_release_t ar;
649 if (tls == NULL)
650 heim_abort("Failed to create/get autorelease head");
652 ar = _heim_alloc_object(&_heim_autorel_object, sizeof(struct heim_auto_release));
653 if (ar) {
654 HEIMDAL_MUTEX_lock(&tls->tls_mutex);
655 if (tls->head == NULL)
656 tls->head = ar;
657 ar->parent = tls->current;
658 tls->current = ar;
659 HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
662 return ar;
666 * Place the current object on the thread's auto-release pool
668 * @param ptr object
671 heim_object_t
672 heim_auto_release(heim_object_t ptr)
674 struct heim_base *p = NULL;
675 struct ar_tls *tls = autorel_tls();
676 heim_auto_release_t ar;
678 if (ptr == NULL || heim_base_is_tagged(ptr))
679 return ptr;
681 p = PTR2BASE(ptr);
683 /* drop from old pool */
684 if ((ar = p->autorelpool) != NULL) {
685 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
686 HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
687 p->autorelpool = NULL;
688 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
691 if (tls == NULL || (ar = tls->current) == NULL)
692 heim_abort("no auto relase pool in place, would leak");
694 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
695 HEIM_TAILQ_INSERT_HEAD(&ar->pool, p, autorel);
696 p->autorelpool = ar;
697 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
699 return ptr;
703 * Release all objects on the given auto-release pool
706 void
707 heim_auto_release_drain(heim_auto_release_t autorel)
709 heim_object_t obj;
711 /* release all elements on the tail queue */
713 HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
714 while(!HEIM_TAILQ_EMPTY(&autorel->pool)) {
715 obj = HEIM_TAILQ_FIRST(&autorel->pool);
716 HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
717 heim_release(BASE2PTR(obj));
718 HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
720 HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
724 * Helper for heim_path_vget() and heim_path_delete(). On success
725 * outputs the node named by the path and the parent node and key
726 * (useful for heim_path_delete()).
729 static heim_object_t
730 heim_path_vget2(heim_object_t ptr, heim_object_t *parent, heim_object_t *key,
731 heim_error_t *error, va_list ap)
733 heim_object_t path_element;
734 heim_object_t node, next_node;
735 heim_tid_t node_type;
737 *parent = NULL;
738 *key = NULL;
739 if (ptr == NULL)
740 return NULL;
742 for (node = ptr; node != NULL; ) {
743 path_element = va_arg(ap, heim_object_t);
744 if (path_element == NULL) {
745 *parent = node;
746 *key = path_element;
747 return node;
750 node_type = heim_get_tid(node);
751 switch (node_type) {
752 case HEIM_TID_ARRAY:
753 case HEIM_TID_DICT:
754 case HEIM_TID_DB:
755 break;
756 default:
757 if (node == ptr)
758 heim_abort("heim_path_get() only operates on container types");
759 return NULL;
762 if (node_type == HEIM_TID_DICT) {
763 next_node = heim_dict_get_value(node, path_element);
764 } else if (node_type == HEIM_TID_DB) {
765 next_node = _heim_db_get_value(node, NULL, path_element, NULL);
766 } else if (node_type == HEIM_TID_ARRAY) {
767 int idx = -1;
769 if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
770 idx = heim_number_get_int(path_element);
771 if (idx < 0) {
772 if (error)
773 *error = heim_error_create(EINVAL,
774 "heim_path_get() path elements "
775 "for array nodes must be "
776 "numeric and positive");
777 return NULL;
779 next_node = heim_array_get_value(node, idx);
780 } else {
781 if (error)
782 *error = heim_error_create(EINVAL,
783 "heim_path_get() node in path "
784 "not a container type");
785 return NULL;
787 node = next_node;
789 return NULL;
793 * Get a node in a heim_object tree by path
795 * @param ptr tree
796 * @param error error (output)
797 * @param ap NULL-terminated va_list of heim_object_ts that form a path
799 * @return object (not retained) if found
801 * @addtogroup heimbase
804 heim_object_t
805 heim_path_vget(heim_object_t ptr, heim_error_t *error, va_list ap)
807 heim_object_t p, k;
809 return heim_path_vget2(ptr, &p, &k, error, ap);
813 * Get a node in a tree by path, with retained reference
815 * @param ptr tree
816 * @param error error (output)
817 * @param ap NULL-terminated va_list of heim_object_ts that form a path
819 * @return retained object if found
821 * @addtogroup heimbase
824 heim_object_t
825 heim_path_vcopy(heim_object_t ptr, heim_error_t *error, va_list ap)
827 heim_object_t p, k;
829 return heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
833 * Get a node in a tree by path
835 * @param ptr tree
836 * @param error error (output)
837 * @param ... NULL-terminated va_list of heim_object_ts that form a path
839 * @return object (not retained) if found
841 * @addtogroup heimbase
844 heim_object_t
845 heim_path_get(heim_object_t ptr, heim_error_t *error, ...)
847 heim_object_t o;
848 heim_object_t p, k;
849 va_list ap;
851 if (ptr == NULL)
852 return NULL;
854 va_start(ap, error);
855 o = heim_path_vget2(ptr, &p, &k, error, ap);
856 va_end(ap);
857 return o;
861 * Get a node in a tree by path, with retained reference
863 * @param ptr tree
864 * @param error error (output)
865 * @param ... NULL-terminated va_list of heim_object_ts that form a path
867 * @return retained object if found
869 * @addtogroup heimbase
872 heim_object_t
873 heim_path_copy(heim_object_t ptr, heim_error_t *error, ...)
875 heim_object_t o;
876 heim_object_t p, k;
877 va_list ap;
879 if (ptr == NULL)
880 return NULL;
882 va_start(ap, error);
883 o = heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
884 va_end(ap);
885 return o;
889 * Create a path in a heim_object_t tree
891 * @param ptr the tree
892 * @param size the size of the heim_dict_t nodes to be created
893 * @param leaf leaf node to be added, if any
894 * @param error error (output)
895 * @param ap NULL-terminated of path component objects
897 * Create a path of heim_dict_t interior nodes in a given heim_object_t
898 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
899 * then the leaf is not deleted).
901 * @return 0 on success, else a system error
903 * @addtogroup heimbase
907 heim_path_vcreate(heim_object_t ptr, size_t size, heim_object_t leaf,
908 heim_error_t *error, va_list ap)
910 heim_object_t path_element = va_arg(ap, heim_object_t);
911 heim_object_t next_path_element = NULL;
912 heim_object_t node = ptr;
913 heim_object_t next_node = NULL;
914 heim_tid_t node_type;
915 int ret = 0;
917 if (ptr == NULL)
918 heim_abort("heim_path_vcreate() does not create root nodes");
920 while (path_element != NULL) {
921 next_path_element = va_arg(ap, heim_object_t);
922 node_type = heim_get_tid(node);
924 if (node_type == HEIM_TID_DICT) {
925 next_node = heim_dict_get_value(node, path_element);
926 } else if (node_type == HEIM_TID_ARRAY) {
927 int idx = -1;
929 if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
930 idx = heim_number_get_int(path_element);
931 if (idx < 0) {
932 if (error)
933 *error = heim_error_create(EINVAL,
934 "heim_path() path elements for "
935 "array nodes must be numeric "
936 "and positive");
937 return EINVAL;
939 if (idx < heim_array_get_length(node))
940 next_node = heim_array_get_value(node, idx);
941 else
942 next_node = NULL;
943 } else if (node_type == HEIM_TID_DB && next_path_element != NULL) {
944 if (error)
945 *error = heim_error_create(EINVAL, "Interior node is a DB");
946 return EINVAL;
949 if (next_path_element == NULL)
950 break;
952 /* Create missing interior node */
953 if (next_node == NULL) {
954 next_node = heim_dict_create(size); /* no arrays or DBs, just dicts */
955 if (next_node == NULL) {
956 ret = ENOMEM;
957 goto err;
960 if (node_type == HEIM_TID_DICT) {
961 ret = heim_dict_set_value(node, path_element, next_node);
962 } else if (node_type == HEIM_TID_ARRAY &&
963 heim_number_get_int(path_element) <= heim_array_get_length(node)) {
964 ret = heim_array_insert_value(node,
965 heim_number_get_int(path_element),
966 next_node);
967 } else {
968 ret = EINVAL;
969 if (error)
970 *error = heim_error_create(ret, "Node in path not a "
971 "container");
973 heim_release(next_node);
974 if (ret)
975 goto err;
978 path_element = next_path_element;
979 node = next_node;
980 next_node = NULL;
983 if (path_element == NULL)
984 goto err;
986 /* Add the leaf */
987 if (leaf != NULL) {
988 if (node_type == HEIM_TID_DICT)
989 ret = heim_dict_set_value(node, path_element, leaf);
990 else
991 ret = heim_array_insert_value(node,
992 heim_number_get_int(path_element),
993 leaf);
995 return ret;
997 err:
998 if (error && !*error) {
999 if (ret == ENOMEM)
1000 *error = heim_error_create_enomem();
1001 else
1002 *error = heim_error_create(ret, "Could not set "
1003 "dict value");
1005 return ret;
1009 * Create a path in a heim_object_t tree
1011 * @param ptr the tree
1012 * @param size the size of the heim_dict_t nodes to be created
1013 * @param leaf leaf node to be added, if any
1014 * @param error error (output)
1015 * @param ... NULL-terminated list of path component objects
1017 * Create a path of heim_dict_t interior nodes in a given heim_object_t
1018 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
1019 * then the leaf is not deleted).
1021 * @return 0 on success, else a system error
1023 * @addtogroup heimbase
1027 heim_path_create(heim_object_t ptr, size_t size, heim_object_t leaf,
1028 heim_error_t *error, ...)
1030 va_list ap;
1031 int ret;
1033 va_start(ap, error);
1034 ret = heim_path_vcreate(ptr, size, leaf, error, ap);
1035 va_end(ap);
1036 return ret;
1040 * Delete leaf node named by a path in a heim_object_t tree
1042 * @param ptr the tree
1043 * @param error error (output)
1044 * @param ap NULL-terminated list of path component objects
1046 * @addtogroup heimbase
1049 void
1050 heim_path_vdelete(heim_object_t ptr, heim_error_t *error, va_list ap)
1052 heim_object_t parent, key, child;
1054 child = heim_path_vget2(ptr, &parent, &key, error, ap);
1055 if (child != NULL) {
1056 if (heim_get_tid(parent) == HEIM_TID_DICT)
1057 heim_dict_delete_key(parent, key);
1058 else if (heim_get_tid(parent) == HEIM_TID_DB)
1059 heim_db_delete_key(parent, NULL, key, error);
1060 else if (heim_get_tid(parent) == HEIM_TID_ARRAY)
1061 heim_array_delete_value(parent, heim_number_get_int(key));
1062 heim_release(child);
1067 * Delete leaf node named by a path in a heim_object_t tree
1069 * @param ptr the tree
1070 * @param error error (output)
1071 * @param ap NULL-terminated list of path component objects
1073 * @addtogroup heimbase
1076 void
1077 heim_path_delete(heim_object_t ptr, heim_error_t *error, ...)
1079 va_list ap;
1081 va_start(ap, error);
1082 heim_path_vdelete(ptr, error, ap);
1083 va_end(ap);
1084 return;