2 /* singularly linked-list */
5 #include "oscam-garbage.h"
6 #include "oscam-lock.h"
7 #include "oscam-string.h"
14 mutex lock is needed when...
15 1. l->initial + l->last is modified/accessed
16 2. LL_NODE nxt modified/accessed
22 static int8_t chk_debuglog(LLIST
*l
)
24 return (l
&& l
->lock
.name
!= LOG_LIST
);
28 static void _destroy(LLIST
*l
)
33 cs_writelock(__func__
, &l
->lock
); //just getting sure noone is using it
34 cs_writeunlock(__func__
, &l
->lock
);
36 cs_lock_destroy(__func__
, &l
->lock
);
41 LLIST
*ll_create(const char *name
)
44 if(!cs_malloc(&l
, sizeof(LLIST
)))
46 cs_lock_create(__func__
, &l
->lock
, name
, 5000);
50 void ll_destroy(LLIST
**pl
)
53 if(!l
|| l
->flag
) { return; }
60 void ll_destroy_data(LLIST
**pl
)
71 void ll_destroy_free_data(LLIST
**pl
)
74 if(!l
||l
->flag
) { return; }
77 //*********************************
78 cs_writelock(__func__
, &l
->lock
);
80 LL_NODE
*n
=l
->initial
, *nxt
;
92 cs_writeunlock(__func__
, &l
->lock
);
93 //**********************************
97 cs_writelock(__func__
, &l
->lock
); //just getting sure noone is using it
98 cs_writeunlock(__func__
, &l
->lock
);
100 cs_lock_destroy(__func__
, &l
->lock
);
106 /* Internal iteration function. Make sure that you don't have a lock and that it and it->l are set. */
107 static void *ll_iter_next_nolock(LL_ITER
*it
)
109 if(it
->l
->version
!= it
->ll_version
)
112 if(chk_debuglog(it
->l
))
113 { cs_log_dbg(D_TRACE
, "list changed, searching new position"); }
117 //cs_readlock(__func__, &it->l->lock);
118 if(!it
->cur
&& !it
->prv
)
120 it
->cur
= it
->l
->initial
;
124 for(ptr
= it
->l
->initial
; ptr
; ptr
= ptr
->nxt
)
135 ll_iter_reset(it
); // restart iteration
136 it
->cur
= it
->l
->initial
;
139 it
->ll_version
= it
->l
->version
;
140 //cs_readunlock(__func__, &it->l->lock);
143 { return it
->cur
->obj
; }
151 it
->cur
= it
->cur
->nxt
;
153 else if(it
->l
->initial
&& !it
->prv
)
154 { it
->cur
= it
->l
->initial
; }
157 { return it
->cur
->obj
; }
162 static void ll_clear_int(LLIST
*l
, int32_t clear_data
)
164 if(!l
|| l
->flag
) { return; }
166 cs_writelock(__func__
, &l
->lock
);
168 LL_NODE
*n
= l
->initial
, *nxt
;
173 { add_garbage(n
->obj
); }
181 cs_writeunlock(__func__
, &l
->lock
);
184 void ll_clear(LLIST
*l
)
190 void ll_clear_data(LLIST
*l
)
195 /* Appends to the list. Do not call this from outside without having a lock! */
196 static LL_NODE
*ll_append_nolock(LLIST
*l
, void *obj
)
198 if(l
&& obj
&& !l
->flag
)
201 if(!cs_malloc(&new, sizeof(LL_NODE
)))
206 { l
->last
->nxt
= new; }
208 { l
->initial
= new; }
218 LL_NODE
*ll_append(LLIST
*l
, void *obj
)
220 if(l
&& obj
&& !l
->flag
)
222 cs_writelock(__func__
, &l
->lock
);
224 LL_NODE
*n
= ll_append_nolock(l
, obj
);
225 cs_writeunlock(__func__
, &l
->lock
);
231 LL_NODE
*ll_prepend(LLIST
*l
, void *obj
)
233 if(l
&& obj
&& !l
->flag
)
236 if(!cs_malloc(&new, sizeof(LL_NODE
)))
240 cs_writelock(__func__
, &l
->lock
);
242 new->nxt
= l
->initial
;
246 { l
->last
= l
->initial
; }
248 cs_writeunlock(__func__
, &l
->lock
);
256 LL_ITER
ll_iter_create(LLIST
*l
)
259 memset(&it
, 0, sizeof(it
));
262 { it
.ll_version
= it
.l
->version
; }
267 void *ll_iter_next(LL_ITER
*it
)
269 if(it
&& it
->l
&& !it
->l
->flag
)
271 cs_readlock(__func__
, &it
->l
->lock
);
272 void *res
= ll_iter_next_nolock(it
);
273 cs_readunlock(__func__
, &it
->l
->lock
);
279 void *ll_iter_remove_nolock(LL_ITER
*it
)
284 LL_NODE
*del
= it
->cur
;
288 LL_NODE
*prv
= it
->prv
;
289 if(it
->ll_version
!= it
->l
->version
|| !prv
) // List has been modified so it->prv might be wrong!
291 LL_NODE
*n
= it
->l
->initial
;
303 { prv
->nxt
= del
->nxt
; }
305 { it
->l
->initial
= del
->nxt
; }
307 { it
->l
->last
= NULL
; }
308 else if(del
== it
->l
->last
)
309 { it
->l
->last
= prv
; }
311 it
->cur
= it
->l
->initial
;
315 while(it
->cur
&& it
->cur
!= prv
)
318 it
->cur
= it
->cur
->nxt
;
324 it
->ll_version
= ++it
->l
->version
;
332 void *ll_iter_next_remove(LL_ITER
*it
)
334 if(it
&& it
->l
&& !it
->l
->flag
)
336 cs_writelock(__func__
, &it
->l
->lock
);
337 void *res
= ll_iter_next_nolock(it
);
338 ll_iter_remove_nolock(it
);
339 cs_writeunlock(__func__
, &it
->l
->lock
);
345 void *ll_iter_move(LL_ITER
*it
, int32_t offset
)
347 if(it
&& it
->l
&& !it
->l
->flag
)
351 for(i
= 0; i
< offset
; i
++)
353 res
= ll_iter_next_nolock(it
);
362 void *ll_iter_peek(const LL_ITER
*it
, int32_t offset
)
364 if(it
&& it
->l
&& !it
->l
->flag
)
366 cs_readlock(__func__
, &((LL_ITER
*)it
)->l
->lock
);
368 LL_NODE
*n
= it
->cur
;
371 for(i
= 0; i
< offset
; i
++)
378 cs_readunlock(__func__
, &((LL_ITER
*)it
)->l
->lock
);
387 void ll_iter_reset(LL_ITER
*it
)
396 void ll_iter_insert(LL_ITER
*it
, void *obj
)
398 if(it
&& obj
&& !it
->l
->flag
)
400 cs_writelock(__func__
, &it
->l
->lock
);
402 if(!it
->cur
|| !it
->cur
->nxt
)
403 { ll_append_nolock(it
->l
, obj
); }
407 if(!cs_malloc(&n
, sizeof(LL_NODE
)))
409 cs_writeunlock(__func__
, &it
->l
->lock
);
414 n
->nxt
= it
->cur
->nxt
;
418 it
->ll_version
= ++it
->l
->version
;
420 cs_writeunlock(__func__
, &it
->l
->lock
);
424 /* Removes the element to which the iterator currently points. */
425 void *ll_iter_remove(LL_ITER
*it
)
428 if(it
&& it
->l
&& !it
->l
->flag
)
430 LL_NODE
*del
= it
->cur
;
433 cs_writelock(__func__
, &it
->l
->lock
);
434 obj
= ll_iter_remove_nolock(it
);
435 cs_writeunlock(__func__
, &it
->l
->lock
);
442 /* Moves the element which is currently pointed to by the iterator to the head of the list.*/
443 int32_t ll_iter_move_first(LL_ITER
*it
)
446 if(it
&& it
->l
&& !it
->l
->flag
)
448 LL_NODE
*move
= it
->cur
;
451 if(move
== it
->l
->initial
) //Can't move self to first
454 LL_NODE
*prv
= it
->prv
;
455 cs_writelock(__func__
, &it
->l
->lock
);
456 if(it
->ll_version
!= it
->l
->version
|| !prv
) // List has been modified so it->prv might be wrong!
458 LL_NODE
*n
= it
->l
->initial
;
460 while(n
&& n
!= move
)
467 cs_writeunlock(__func__
, &it
->l
->lock
);
473 { prv
->nxt
= move
->nxt
; }
475 { it
->l
->initial
= move
->nxt
; }
477 if(prv
&& it
->l
->last
== move
)
478 { it
->l
->last
= prv
; }
479 move
->nxt
= it
->l
->initial
;
480 it
->l
->initial
= move
;
482 it
->ll_version
= ++it
->l
->version
;
484 cs_writeunlock(__func__
, &it
->l
->lock
);
491 void ll_iter_remove_data(LL_ITER
*it
)
493 void *obj
= ll_iter_remove(it
);
497 void *ll_has_elements(const LLIST
*l
)
499 if(!l
|| !l
->initial
|| l
->flag
)
501 return l
->initial
->obj
;
504 void *ll_last_element(const LLIST
*l
)
506 if(!l
|| !l
->last
|| l
->flag
)
511 int32_t ll_contains(const LLIST
*l
, const void *obj
)
513 if(!l
|| !obj
|| l
->flag
)
515 LL_ITER it
= ll_iter_create((LLIST
*) l
);
517 while((data
= ll_iter_next(&it
)))
522 return (data
== obj
);
525 const void *ll_contains_data(const LLIST
*l
, const void *obj
, uint32_t size
)
527 if(!l
|| !obj
|| l
->flag
)
529 LL_ITER it
= ll_iter_create((LLIST
*) l
);
531 while((data
= ll_iter_next(&it
)))
533 if(!memcmp(data
, obj
, size
))
539 int32_t ll_remove(LLIST
*l
, const void *obj
)
542 LL_ITER it
= ll_iter_create(l
);
544 while((data
= ll_iter_next(&it
)))
555 void ll_remove_data(LLIST
*l
, void *obj
)
557 LL_ITER it
= ll_iter_create(l
);
559 while((data
= ll_iter_next(&it
)))
562 { ll_iter_remove_data(&it
); }
566 // removes all elements from l where elements are in elements_to_remove
567 int32_t ll_remove_all(LLIST
*l
, const LLIST
*elements_to_remove
)
570 LL_ITER it1
= ll_iter_create(l
);
571 LL_ITER it2
= ll_iter_create((LLIST
*) elements_to_remove
);
573 const void *data1
, *data2
;
574 while((data1
= ll_iter_next(&it1
)))
577 while((data2
= ll_iter_next(&it2
)))
581 ll_iter_remove(&it1
);
591 /* Returns an array with all elements sorted, the amount of elements is stored in size. We do not sort the original linked list
592 as this might harm running iterations. Furthermore, we need the array anyway for qsort() to work. Remember to free() the result. */
593 void **ll_sort(const LLIST
*l
, void *compare
, int32_t *size
)
595 if(!l
|| !l
->initial
|| !compare
)
603 cs_readlock(__func__
, &((LLIST
*)l
)->lock
);
606 if(!cs_malloc(&p
, l
->count
* sizeof(p
[0])))
608 cs_readunlock(__func__
, &((LLIST
*)l
)->lock
);
611 for(n
= l
->initial
; n
; n
= n
->nxt
)
615 cs_readunlock(__func__
, &((LLIST
*)l
)->lock
);
617 // if (chk_debugLog(it->l))
618 //cs_log_dbg(D_TRACE, "sort: count %d size %d", l->count, sizeof(p[0]));
620 qsort(p
, l
->count
, sizeof(p
[0]), compare
);
625 void ll_putall(LLIST
*dest
, LLIST
*src
)
627 LL_ITER it
= ll_iter_create(src
);
629 while((data
= ll_iter_next(&it
)))
631 ll_append(dest
, data
);
636 LL_LOCKITER
*ll_li_create(LLIST
*l
, int32_t writelock
)
638 if(!l
|| l
->flag
) { return NULL
; }
641 if(!cs_malloc(&li
, sizeof(LL_LOCKITER
)))
645 li
->writelock
= writelock
;
647 { cs_writelock(__func__
, &l
->lock
); }
649 { cs_readlock(__func__
, &l
->lock
); }
650 li
->it
= ll_iter_create(l
);
654 void ll_li_destroy(LL_LOCKITER
*li
)
659 { cs_writeunlock(__func__
, &li
->l
->lock
); }
661 { cs_readunlock(__func__
, &li
->l
->lock
); }
667 void *ll_li_next(LL_LOCKITER
*li
)
671 return ll_iter_next_nolock(&li
->it
);
676 LLIST
*ll_clone(LLIST
*l
, uint32_t copysize
)
678 if(!l
|| l
->flag
) { return NULL
; }
680 LLIST
*cloned
= ll_create(l
->lock
.name
);
681 LL_LOCKITER
*li
= ll_li_create(l
, 0);
683 while((data
= ll_li_next(li
)))
686 if(!cs_malloc(&new_data
, copysize
))
688 memcpy(new_data
, data
, copysize
);
689 ll_append_nolock(cloned
, new_data
);
695 void *ll_remove_first(LLIST
*l
)
699 LL_ITER it
= ll_iter_create(l
);
700 void *data
= ll_iter_next(&it
);
701 if(data
) { ll_iter_remove(&it
); }
707 void ll_remove_first_data(LLIST
*l
)
709 void *data
= ll_remove_first(l
);
710 if(data
) { NULLFREE(data
); }