2 * Copyright (c) 2010 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
6 * Portions Copyright (c) 2010 Apple Inc. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the Institute nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 static heim_base_atomic_integer_type tidglobal
= HEIM_TID_USER
;
43 heim_base_atomic_integer_type ref_cnt
;
44 HEIM_TAILQ_ENTRY(heim_base
) autorel
;
45 heim_auto_release_t autorelpool
;
46 uintptr_t isaextra
[3];
49 /* specialized version of base */
50 struct heim_base_mem
{
52 heim_base_atomic_integer_type ref_cnt
;
53 HEIM_TAILQ_ENTRY(heim_base
) autorel
;
54 heim_auto_release_t autorelpool
;
56 void (*dealloc
)(void *);
57 uintptr_t isaextra
[1];
60 #define PTR2BASE(ptr) (((struct heim_base *)ptr) - 1)
61 #define BASE2PTR(ptr) ((void *)(((struct heim_base *)ptr) + 1))
63 #ifdef HEIM_BASE_NEED_ATOMIC_MUTEX
64 HEIMDAL_MUTEX _heim_base_mutex
= HEIMDAL_MUTEX_INITIALIZER
;
68 * Auto release structure
71 struct heim_auto_release
{
72 HEIM_TAILQ_HEAD(, heim_base
) pool
;
73 HEIMDAL_MUTEX pool_mutex
;
74 struct heim_auto_release
*parent
;
79 * Retain object (i.e., take a reference)
81 * @param object to be released, NULL is ok
83 * @return the same object as passed in
87 heim_retain(void *ptr
)
89 struct heim_base
*p
= PTR2BASE(ptr
);
91 if (ptr
== NULL
|| heim_base_is_tagged(ptr
))
94 if (p
->ref_cnt
== heim_base_atomic_integer_max
)
97 if ((heim_base_atomic_inc(&p
->ref_cnt
) - 1) == 0)
98 heim_abort("resurection");
103 * Release object, free if reference count reaches zero
105 * @param object to be released
109 heim_release(void *ptr
)
111 heim_base_atomic_integer_type old
;
112 struct heim_base
*p
= PTR2BASE(ptr
);
114 if (ptr
== NULL
|| heim_base_is_tagged(ptr
))
117 if (p
->ref_cnt
== heim_base_atomic_integer_max
)
120 old
= heim_base_atomic_dec(&p
->ref_cnt
) + 1;
126 heim_auto_release_t ar
= p
->autorelpool
;
127 /* remove from autorel pool list */
129 p
->autorelpool
= NULL
;
130 HEIMDAL_MUTEX_lock(&ar
->pool_mutex
);
131 HEIM_TAILQ_REMOVE(&ar
->pool
, p
, autorel
);
132 HEIMDAL_MUTEX_unlock(&ar
->pool_mutex
);
135 p
->isa
->dealloc(ptr
);
138 heim_abort("over release");
142 * If used require wrapped in autorelease pool
146 heim_description(heim_object_t ptr
)
148 struct heim_base
*p
= PTR2BASE(ptr
);
149 if (p
->isa
->desc
== NULL
)
150 return heim_auto_release(heim_string_ref_create(p
->isa
->name
, NULL
));
151 return heim_auto_release(p
->isa
->desc(ptr
));
156 _heim_make_permanent(heim_object_t ptr
)
158 struct heim_base
*p
= PTR2BASE(ptr
);
159 p
->ref_cnt
= heim_base_atomic_integer_max
;
163 static heim_type_t tagged_isa
[9] = {
164 &_heim_number_object
,
178 _heim_get_isa(heim_object_t ptr
)
181 if (heim_base_is_tagged(ptr
)) {
182 if (heim_base_is_tagged_object(ptr
))
183 return tagged_isa
[heim_base_tagged_object_tid(ptr
)];
184 heim_abort("not a supported tagged type");
191 * Get type ID of object
193 * @param object object to get type id of
195 * @return type id of object
199 heim_get_tid(heim_object_t ptr
)
201 heim_type_t isa
= _heim_get_isa(ptr
);
206 * Get hash value of object
208 * @param object object to get hash value for
210 * @return a hash value
214 heim_get_hash(heim_object_t ptr
)
216 heim_type_t isa
= _heim_get_isa(ptr
);
218 return isa
->hash(ptr
);
219 return (unsigned long)ptr
;
223 * Compare two objects, returns 0 if equal, can use used for qsort()
226 * @param a first object to compare
227 * @param b first object to compare
229 * @return 0 if objects are equal
233 heim_cmp(heim_object_t a
, heim_object_t b
)
238 ta
= heim_get_tid(a
);
239 tb
= heim_get_tid(b
);
244 isa
= _heim_get_isa(a
);
247 return isa
->cmp(a
, b
);
249 return (uintptr_t)a
- (uintptr_t)b
;
253 * Private - allocates an memory object
257 memory_dealloc(void *ptr
)
259 struct heim_base_mem
*p
= (struct heim_base_mem
*)PTR2BASE(ptr
);
264 struct heim_type_data memory_object
= {
276 * Allocate memory for an object of anonymous type
278 * @param size size of object to be allocated
279 * @param name name of ad-hoc type
280 * @param dealloc destructor function
282 * Objects allocated with this interface do not serialize.
284 * @return allocated object
288 heim_alloc(size_t size
, const char *name
, heim_type_dealloc dealloc
)
290 /* XXX use posix_memalign */
292 struct heim_base_mem
*p
= calloc(1, size
+ sizeof(*p
));
295 p
->isa
= &memory_object
;
298 p
->dealloc
= dealloc
;
303 _heim_create_type(const char *name
,
305 heim_type_dealloc dealloc
,
309 heim_type_description desc
)
313 type
= calloc(1, sizeof(*type
));
317 type
->tid
= heim_base_atomic_inc(&tidglobal
);
320 type
->dealloc
= dealloc
;
330 _heim_alloc_object(heim_type_t type
, size_t size
)
332 /* XXX should use posix_memalign */
333 struct heim_base
*p
= calloc(1, size
+ sizeof(*p
));
343 _heim_get_isaextra(heim_object_t ptr
, size_t idx
)
345 struct heim_base
*p
= (struct heim_base
*)PTR2BASE(ptr
);
347 heim_assert(ptr
!= NULL
, "internal error");
348 if (p
->isa
== &memory_object
)
350 heim_assert(idx
< 3, "invalid private heim_base extra data index");
351 return &p
->isaextra
[idx
];
355 _heim_type_get_tid(heim_type_t type
)
360 #if !defined(WIN32) && !defined(HAVE_DISPATCH_DISPATCH_H) && defined(ENABLE_PTHREAD_SUPPORT)
361 static pthread_once_t once_arg_key_once
= PTHREAD_ONCE_INIT
;
362 static pthread_key_t once_arg_key
;
365 once_arg_key_once_init(void)
367 errno
= pthread_key_create(&once_arg_key
, NULL
);
370 "Error: pthread_key_create() failed, cannot continue: %s\n",
376 struct once_callback
{
382 once_callback_caller(void)
384 struct once_callback
*once_callback
= pthread_getspecific(once_arg_key
);
386 if (once_callback
== NULL
) {
387 fprintf(stderr
, "Error: pthread_once() calls callback on "
388 "different thread?! Cannot continue.\n");
391 once_callback
->fn(once_callback
->data
);
396 * Call func once and only once
398 * @param once pointer to a heim_base_once_t
399 * @param ctx context passed to func
400 * @param func function to be called
404 heim_base_once_f(heim_base_once_t
*once
, void *ctx
, void (*func
)(void *))
408 * With a libroken wrapper for some CAS function and a libroken yield()
409 * wrapper we could make this the default implementation when we have
410 * neither Grand Central nor POSX threads.
412 * We could also adapt the double-checked lock pattern with CAS
413 * providing the necessary memory barriers in the absence of
414 * portable explicit memory barrier APIs.
417 * We use CAS operations in large part to provide implied memory
420 * State 0 means that func() has never executed.
421 * State 1 means that func() is executing.
422 * State 2 means that func() has completed execution.
424 if (InterlockedCompareExchange(once
, 1L, 0L) == 0L) {
427 (void)InterlockedExchange(once
, 2L);
431 * The InterlockedCompareExchange is being used to fetch
432 * the current state under a full memory barrier. As long
433 * as the current state is 1 continue to spin.
435 while (InterlockedCompareExchange(once
, 2L, 0L) == 1L)
438 #elif defined(HAVE_DISPATCH_DISPATCH_H)
439 dispatch_once_f(once
, ctx
, func
);
440 #elif defined(ENABLE_PTHREAD_SUPPORT)
441 struct once_callback once_callback
;
443 once_callback
.fn
= func
;
444 once_callback
.data
= ctx
;
446 errno
= pthread_once(&once_arg_key_once
, once_arg_key_once_init
);
448 fprintf(stderr
, "Error: pthread_once() failed, cannot continue: %s\n",
452 errno
= pthread_setspecific(once_arg_key
, &once_callback
);
455 "Error: pthread_setspecific() failed, cannot continue: %s\n",
459 errno
= pthread_once(once
, once_callback_caller
);
461 fprintf(stderr
, "Error: pthread_once() failed, cannot continue: %s\n",
466 static HEIMDAL_MUTEX mutex
= HEIMDAL_MUTEX_INITIALIZER
;
467 HEIMDAL_MUTEX_lock(&mutex
);
470 HEIMDAL_MUTEX_unlock(&mutex
);
472 HEIMDAL_MUTEX_lock(&mutex
);
474 HEIMDAL_MUTEX_unlock(&mutex
);
475 } else if (*once
== 2) {
476 HEIMDAL_MUTEX_unlock(&mutex
);
478 HEIMDAL_MUTEX_unlock(&mutex
);
480 struct timeval tv
= { 0, 1000 };
481 select(0, NULL
, NULL
, NULL
, &tv
);
482 HEIMDAL_MUTEX_lock(&mutex
);
485 HEIMDAL_MUTEX_unlock(&mutex
);
487 HEIMDAL_MUTEX_unlock(&mutex
);
493 * Abort and log the failure (using syslog)
497 heim_abort(const char *fmt
, ...)
501 heim_abortv(fmt
, ap
);
506 * Abort and log the failure (using syslog)
510 heim_abortv(const char *fmt
, va_list ap
)
512 static char str
[1024];
514 vsnprintf(str
, sizeof(str
), fmt
, ap
);
515 syslog(LOG_ERR
, "heim_abort: %s", str
);
523 static int ar_created
= 0;
524 static HEIMDAL_thread_key ar_key
;
527 struct heim_auto_release
*head
;
528 struct heim_auto_release
*current
;
529 HEIMDAL_MUTEX tls_mutex
;
533 ar_tls_delete(void *ptr
)
535 struct ar_tls
*tls
= ptr
;
536 heim_auto_release_t next
= NULL
;
540 for (; tls
->current
!= NULL
; tls
->current
= next
) {
541 next
= tls
->current
->parent
;
542 heim_release(tls
->current
);
548 init_ar_tls(void *ptr
)
551 HEIMDAL_key_create(&ar_key
, ar_tls_delete
, ret
);
556 static struct ar_tls
*
559 static heim_base_once_t once
= HEIM_BASE_ONCE_INIT
;
563 heim_base_once_f(&once
, NULL
, init_ar_tls
);
567 arp
= HEIMDAL_getspecific(ar_key
);
570 arp
= calloc(1, sizeof(*arp
));
573 HEIMDAL_setspecific(ar_key
, arp
, ret
);
584 autorel_dealloc(void *ptr
)
586 heim_auto_release_t ar
= ptr
;
591 heim_abort("autorelease pool released on thread w/o autorelease inited");
593 heim_auto_release_drain(ar
);
595 if (!HEIM_TAILQ_EMPTY(&ar
->pool
))
596 heim_abort("pool not empty after draining");
598 HEIMDAL_MUTEX_lock(&tls
->tls_mutex
);
599 if (tls
->current
!= ptr
)
600 heim_abort("autorelease not releaseing top pool");
602 tls
->current
= ar
->parent
;
603 HEIMDAL_MUTEX_unlock(&tls
->tls_mutex
);
607 autorel_cmp(void *a
, void *b
)
613 autorel_hash(void *ptr
)
615 return (unsigned long)ptr
;
619 static struct heim_type_data _heim_autorel_object
= {
620 HEIM_TID_AUTORELEASE
,
631 * Create thread-specific object auto-release pool
633 * Objects placed on the per-thread auto-release pool (with
634 * heim_auto_release()) can be released in one fell swoop by calling
635 * heim_auto_release_drain().
639 heim_auto_release_create(void)
641 struct ar_tls
*tls
= autorel_tls();
642 heim_auto_release_t ar
;
645 heim_abort("Failed to create/get autorelease head");
647 ar
= _heim_alloc_object(&_heim_autorel_object
, sizeof(struct heim_auto_release
));
649 HEIMDAL_MUTEX_lock(&tls
->tls_mutex
);
650 if (tls
->head
== NULL
)
652 ar
->parent
= tls
->current
;
654 HEIMDAL_MUTEX_unlock(&tls
->tls_mutex
);
661 * Place the current object on the thread's auto-release pool
667 heim_auto_release(heim_object_t ptr
)
669 struct heim_base
*p
= PTR2BASE(ptr
);
670 struct ar_tls
*tls
= autorel_tls();
671 heim_auto_release_t ar
;
673 if (ptr
== NULL
|| heim_base_is_tagged(ptr
))
676 /* drop from old pool */
677 if ((ar
= p
->autorelpool
) != NULL
) {
678 HEIMDAL_MUTEX_lock(&ar
->pool_mutex
);
679 HEIM_TAILQ_REMOVE(&ar
->pool
, p
, autorel
);
680 p
->autorelpool
= NULL
;
681 HEIMDAL_MUTEX_unlock(&ar
->pool_mutex
);
684 if (tls
== NULL
|| (ar
= tls
->current
) == NULL
)
685 heim_abort("no auto relase pool in place, would leak");
687 HEIMDAL_MUTEX_lock(&ar
->pool_mutex
);
688 HEIM_TAILQ_INSERT_HEAD(&ar
->pool
, p
, autorel
);
690 HEIMDAL_MUTEX_unlock(&ar
->pool_mutex
);
696 * Release all objects on the given auto-release pool
700 heim_auto_release_drain(heim_auto_release_t autorel
)
704 /* release all elements on the tail queue */
706 HEIMDAL_MUTEX_lock(&autorel
->pool_mutex
);
707 while(!HEIM_TAILQ_EMPTY(&autorel
->pool
)) {
708 obj
= HEIM_TAILQ_FIRST(&autorel
->pool
);
709 HEIMDAL_MUTEX_unlock(&autorel
->pool_mutex
);
710 heim_release(BASE2PTR(obj
));
711 HEIMDAL_MUTEX_lock(&autorel
->pool_mutex
);
713 HEIMDAL_MUTEX_unlock(&autorel
->pool_mutex
);
717 * Helper for heim_path_vget() and heim_path_delete(). On success
718 * outputs the node named by the path and the parent node and key
719 * (useful for heim_path_delete()).
723 heim_path_vget2(heim_object_t ptr
, heim_object_t
*parent
, heim_object_t
*key
,
724 heim_error_t
*error
, va_list ap
)
726 heim_object_t path_element
;
727 heim_object_t node
, next_node
;
728 heim_tid_t node_type
;
735 for (node
= ptr
; node
!= NULL
; ) {
736 path_element
= va_arg(ap
, heim_object_t
);
737 if (path_element
== NULL
) {
743 node_type
= heim_get_tid(node
);
751 heim_abort("heim_path_get() only operates on container types");
755 if (node_type
== HEIM_TID_DICT
) {
756 next_node
= heim_dict_get_value(node
, path_element
);
757 } else if (node_type
== HEIM_TID_DB
) {
758 next_node
= _heim_db_get_value(node
, NULL
, path_element
, NULL
);
759 } else if (node_type
== HEIM_TID_ARRAY
) {
762 if (heim_get_tid(path_element
) == HEIM_TID_NUMBER
)
763 idx
= heim_number_get_int(path_element
);
766 *error
= heim_error_create(EINVAL
,
767 "heim_path_get() path elements "
768 "for array nodes must be "
769 "numeric and positive");
772 next_node
= heim_array_get_value(node
, idx
);
775 *error
= heim_error_create(EINVAL
,
776 "heim_path_get() node in path "
777 "not a container type");
786 * Get a node in a heim_object tree by path
789 * @param error error (output)
790 * @param ap NULL-terminated va_list of heim_object_ts that form a path
792 * @return object (not retained) if found
794 * @addtogroup heimbase
798 heim_path_vget(heim_object_t ptr
, heim_error_t
*error
, va_list ap
)
802 return heim_path_vget2(ptr
, &p
, &k
, error
, ap
);
806 * Get a node in a tree by path, with retained reference
809 * @param error error (output)
810 * @param ap NULL-terminated va_list of heim_object_ts that form a path
812 * @return retained object if found
814 * @addtogroup heimbase
818 heim_path_vcopy(heim_object_t ptr
, heim_error_t
*error
, va_list ap
)
822 return heim_retain(heim_path_vget2(ptr
, &p
, &k
, error
, ap
));
826 * Get a node in a tree by path
829 * @param error error (output)
830 * @param ... NULL-terminated va_list of heim_object_ts that form a path
832 * @return object (not retained) if found
834 * @addtogroup heimbase
838 heim_path_get(heim_object_t ptr
, heim_error_t
*error
, ...)
848 o
= heim_path_vget2(ptr
, &p
, &k
, error
, ap
);
854 * Get a node in a tree by path, with retained reference
857 * @param error error (output)
858 * @param ... NULL-terminated va_list of heim_object_ts that form a path
860 * @return retained object if found
862 * @addtogroup heimbase
866 heim_path_copy(heim_object_t ptr
, heim_error_t
*error
, ...)
876 o
= heim_retain(heim_path_vget2(ptr
, &p
, &k
, error
, ap
));
882 * Create a path in a heim_object_t tree
884 * @param ptr the tree
885 * @param size the size of the heim_dict_t nodes to be created
886 * @param leaf leaf node to be added, if any
887 * @param error error (output)
888 * @param ap NULL-terminated of path component objects
890 * Create a path of heim_dict_t interior nodes in a given heim_object_t
891 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
892 * then the leaf is not deleted).
894 * @return 0 on success, else a system error
896 * @addtogroup heimbase
900 heim_path_vcreate(heim_object_t ptr
, size_t size
, heim_object_t leaf
,
901 heim_error_t
*error
, va_list ap
)
903 heim_object_t path_element
= va_arg(ap
, heim_object_t
);
904 heim_object_t next_path_element
= NULL
;
905 heim_object_t node
= ptr
;
906 heim_object_t next_node
= NULL
;
907 heim_tid_t node_type
;
911 heim_abort("heim_path_vcreate() does not create root nodes");
913 while (path_element
!= NULL
) {
914 next_path_element
= va_arg(ap
, heim_object_t
);
915 node_type
= heim_get_tid(node
);
917 if (node_type
== HEIM_TID_DICT
) {
918 next_node
= heim_dict_get_value(node
, path_element
);
919 } else if (node_type
== HEIM_TID_ARRAY
) {
922 if (heim_get_tid(path_element
) == HEIM_TID_NUMBER
)
923 idx
= heim_number_get_int(path_element
);
926 *error
= heim_error_create(EINVAL
,
927 "heim_path() path elements for "
928 "array nodes must be numeric "
932 if (idx
< heim_array_get_length(node
))
933 next_node
= heim_array_get_value(node
, idx
);
936 } else if (node_type
== HEIM_TID_DB
&& next_path_element
!= NULL
) {
938 *error
= heim_error_create(EINVAL
, "Interior node is a DB");
942 if (next_path_element
== NULL
)
945 /* Create missing interior node */
946 if (next_node
== NULL
) {
947 next_node
= heim_dict_create(size
); /* no arrays or DBs, just dicts */
948 if (next_node
== NULL
) {
953 if (node_type
== HEIM_TID_DICT
) {
954 ret
= heim_dict_set_value(node
, path_element
, next_node
);
955 } else if (node_type
== HEIM_TID_ARRAY
&&
956 heim_number_get_int(path_element
) <= heim_array_get_length(node
)) {
957 ret
= heim_array_insert_value(node
,
958 heim_number_get_int(path_element
),
963 *error
= heim_error_create(ret
, "Node in path not a "
966 heim_release(next_node
);
971 path_element
= next_path_element
;
976 if (path_element
== NULL
)
981 if (node_type
== HEIM_TID_DICT
)
982 ret
= heim_dict_set_value(node
, path_element
, leaf
);
984 ret
= heim_array_insert_value(node
,
985 heim_number_get_int(path_element
),
991 if (error
&& !*error
) {
993 *error
= heim_error_create_enomem();
995 *error
= heim_error_create(ret
, "Could not set "
1002 * Create a path in a heim_object_t tree
1004 * @param ptr the tree
1005 * @param size the size of the heim_dict_t nodes to be created
1006 * @param leaf leaf node to be added, if any
1007 * @param error error (output)
1008 * @param ... NULL-terminated list of path component objects
1010 * Create a path of heim_dict_t interior nodes in a given heim_object_t
1011 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
1012 * then the leaf is not deleted).
1014 * @return 0 on success, else a system error
1016 * @addtogroup heimbase
1020 heim_path_create(heim_object_t ptr
, size_t size
, heim_object_t leaf
,
1021 heim_error_t
*error
, ...)
1026 va_start(ap
, error
);
1027 ret
= heim_path_vcreate(ptr
, size
, leaf
, error
, ap
);
1033 * Delete leaf node named by a path in a heim_object_t tree
1035 * @param ptr the tree
1036 * @param error error (output)
1037 * @param ap NULL-terminated list of path component objects
1039 * @addtogroup heimbase
1043 heim_path_vdelete(heim_object_t ptr
, heim_error_t
*error
, va_list ap
)
1045 heim_object_t parent
, key
, child
;
1047 child
= heim_path_vget2(ptr
, &parent
, &key
, error
, ap
);
1048 if (child
!= NULL
) {
1049 if (heim_get_tid(parent
) == HEIM_TID_DICT
)
1050 heim_dict_delete_key(parent
, key
);
1051 else if (heim_get_tid(parent
) == HEIM_TID_DB
)
1052 heim_db_delete_key(parent
, NULL
, key
, error
);
1053 else if (heim_get_tid(parent
) == HEIM_TID_ARRAY
)
1054 heim_array_delete_value(parent
, heim_number_get_int(key
));
1055 heim_release(child
);
1060 * Delete leaf node named by a path in a heim_object_t tree
1062 * @param ptr the tree
1063 * @param error error (output)
1064 * @param ap NULL-terminated list of path component objects
1066 * @addtogroup heimbase
1070 heim_path_delete(heim_object_t ptr
, heim_error_t
*error
, ...)
1074 va_start(ap
, error
);
1075 heim_path_vdelete(ptr
, error
, ap
);