1 /* singularly linked-list */
4 #include "oscam-garbage.h"
5 #include "oscam-lock.h"
6 #include "oscam-string.h"
13 mutex lock is needed when...
14 1. l->initial + l->last is modified/accessed
15 2. LL_NODE nxt modified/accessed
19 static int8_t chk_debuglog(LLIST
*l
)
21 return (l
&& l
->lock
.name
!= LOG_LIST
);
25 static void _destroy(LLIST
*l
)
30 cs_writelock(__func__
, &l
->lock
); // just getting sure noone is using it
31 cs_writeunlock(__func__
, &l
->lock
);
33 cs_lock_destroy(__func__
, &l
->lock
);
38 LLIST
*ll_create(const char *name
)
41 if(!cs_malloc(&l
, sizeof(LLIST
)))
43 cs_lock_create(__func__
, &l
->lock
, name
, 5000);
47 void ll_destroy(LLIST
**pl
)
50 if(!l
|| l
->flag
) { return; }
57 void ll_destroy_data(LLIST
**pl
)
67 void ll_destroy_free_data(LLIST
**pl
)
70 if(!l
||l
->flag
) { return; }
73 //*********************************
74 cs_writelock(__func__
, &l
->lock
);
76 LL_NODE
*n
=l
->initial
, *nxt
;
88 cs_writeunlock(__func__
, &l
->lock
);
89 //**********************************
93 cs_writelock(__func__
, &l
->lock
); //just getting sure noone is using it
94 cs_writeunlock(__func__
, &l
->lock
);
96 cs_lock_destroy(__func__
, &l
->lock
);
102 /* Internal iteration function. Make sure that you don't have a lock and that it and it->l are set. */
103 static void *ll_iter_next_nolock(LL_ITER
*it
)
105 if(it
->l
->version
!= it
->ll_version
)
108 if(chk_debuglog(it
->l
))
109 { cs_log_dbg(D_TRACE
, "list changed, searching new position"); }
113 //cs_readlock(__func__, &it->l->lock);
114 if(!it
->cur
&& !it
->prv
)
116 it
->cur
= it
->l
->initial
;
120 for(ptr
= it
->l
->initial
; ptr
; ptr
= ptr
->nxt
)
131 ll_iter_reset(it
); // restart iteration
132 it
->cur
= it
->l
->initial
;
135 it
->ll_version
= it
->l
->version
;
136 //cs_readunlock(__func__, &it->l->lock);
139 { return it
->cur
->obj
; }
147 it
->cur
= it
->cur
->nxt
;
149 else if(it
->l
->initial
&& !it
->prv
)
150 { it
->cur
= it
->l
->initial
; }
153 { return it
->cur
->obj
; }
158 static void ll_clear_int(LLIST
*l
, int32_t clear_data
)
160 if(!l
|| l
->flag
) { return; }
162 cs_writelock(__func__
, &l
->lock
);
164 LL_NODE
*n
= l
->initial
, *nxt
;
169 { add_garbage(n
->obj
); }
177 cs_writeunlock(__func__
, &l
->lock
);
180 void ll_clear(LLIST
*l
)
186 void ll_clear_data(LLIST
*l
)
191 /* Appends to the list. Do not call this from outside without having a lock! */
192 static LL_NODE
*ll_append_nolock(LLIST
*l
, void *obj
)
194 if(l
&& obj
&& !l
->flag
)
197 if(!cs_malloc(&new, sizeof(LL_NODE
)))
202 { l
->last
->nxt
= new; }
204 { l
->initial
= new; }
214 LL_NODE
*ll_append(LLIST
*l
, void *obj
)
216 if(l
&& obj
&& !l
->flag
)
218 cs_writelock(__func__
, &l
->lock
);
220 LL_NODE
*n
= ll_append_nolock(l
, obj
);
221 cs_writeunlock(__func__
, &l
->lock
);
227 LL_NODE
*ll_prepend(LLIST
*l
, void *obj
)
229 if(l
&& obj
&& !l
->flag
)
232 if(!cs_malloc(&new, sizeof(LL_NODE
)))
236 cs_writelock(__func__
, &l
->lock
);
238 new->nxt
= l
->initial
;
242 { l
->last
= l
->initial
; }
244 cs_writeunlock(__func__
, &l
->lock
);
252 LL_ITER
ll_iter_create(LLIST
*l
)
255 memset(&it
, 0, sizeof(it
));
258 { it
.ll_version
= it
.l
->version
; }
262 void *ll_iter_next(LL_ITER
*it
)
264 if(it
&& it
->l
&& !it
->l
->flag
)
266 cs_readlock(__func__
, &it
->l
->lock
);
267 void *res
= ll_iter_next_nolock(it
);
268 cs_readunlock(__func__
, &it
->l
->lock
);
274 void *ll_iter_remove_nolock(LL_ITER
*it
)
279 LL_NODE
*del
= it
->cur
;
283 LL_NODE
*prv
= it
->prv
;
284 if(it
->ll_version
!= it
->l
->version
|| !prv
) // List has been modified so it->prv might be wrong!
286 LL_NODE
*n
= it
->l
->initial
;
298 { prv
->nxt
= del
->nxt
; }
300 { it
->l
->initial
= del
->nxt
; }
302 { it
->l
->last
= NULL
; }
303 else if(del
== it
->l
->last
)
304 { it
->l
->last
= prv
; }
306 it
->cur
= it
->l
->initial
;
310 while(it
->cur
&& it
->cur
!= prv
)
313 it
->cur
= it
->cur
->nxt
;
319 it
->ll_version
= ++it
->l
->version
;
327 void *ll_iter_next_remove(LL_ITER
*it
)
329 if(it
&& it
->l
&& !it
->l
->flag
)
331 cs_writelock(__func__
, &it
->l
->lock
);
332 void *res
= ll_iter_next_nolock(it
);
333 ll_iter_remove_nolock(it
);
334 cs_writeunlock(__func__
, &it
->l
->lock
);
340 void *ll_iter_move(LL_ITER
*it
, int32_t offset
)
342 if(it
&& it
->l
&& !it
->l
->flag
)
346 for(i
= 0; i
< offset
; i
++)
348 res
= ll_iter_next_nolock(it
);
357 void *ll_iter_peek(const LL_ITER
*it
, int32_t offset
)
359 if(it
&& it
->l
&& !it
->l
->flag
)
361 cs_readlock(__func__
, &((LL_ITER
*)it
)->l
->lock
);
363 LL_NODE
*n
= it
->cur
;
366 for(i
= 0; i
< offset
; i
++)
373 cs_readunlock(__func__
, &((LL_ITER
*)it
)->l
->lock
);
382 void ll_iter_reset(LL_ITER
*it
)
391 void ll_iter_insert(LL_ITER
*it
, void *obj
)
393 if(it
&& obj
&& !it
->l
->flag
)
395 cs_writelock(__func__
, &it
->l
->lock
);
397 if(!it
->cur
|| !it
->cur
->nxt
)
398 { ll_append_nolock(it
->l
, obj
); }
402 if(!cs_malloc(&n
, sizeof(LL_NODE
)))
404 cs_writeunlock(__func__
, &it
->l
->lock
);
409 n
->nxt
= it
->cur
->nxt
;
413 it
->ll_version
= ++it
->l
->version
;
415 cs_writeunlock(__func__
, &it
->l
->lock
);
419 /* Removes the element to which the iterator currently points. */
420 void *ll_iter_remove(LL_ITER
*it
)
423 if(it
&& it
->l
&& !it
->l
->flag
)
425 LL_NODE
*del
= it
->cur
;
428 cs_writelock(__func__
, &it
->l
->lock
);
429 obj
= ll_iter_remove_nolock(it
);
430 cs_writeunlock(__func__
, &it
->l
->lock
);
437 /* Moves the element which is currently pointed to by the iterator to the head of the list.*/
438 int32_t ll_iter_move_first(LL_ITER
*it
)
441 if(it
&& it
->l
&& !it
->l
->flag
)
443 LL_NODE
*move
= it
->cur
;
446 if(move
== it
->l
->initial
) //Can't move self to first
449 LL_NODE
*prv
= it
->prv
;
450 cs_writelock(__func__
, &it
->l
->lock
);
451 if(it
->ll_version
!= it
->l
->version
|| !prv
) // List has been modified so it->prv might be wrong!
453 LL_NODE
*n
= it
->l
->initial
;
455 while(n
&& n
!= move
)
462 cs_writeunlock(__func__
, &it
->l
->lock
);
468 { prv
->nxt
= move
->nxt
; }
470 { it
->l
->initial
= move
->nxt
; }
472 if(prv
&& it
->l
->last
== move
)
473 { it
->l
->last
= prv
; }
474 move
->nxt
= it
->l
->initial
;
475 it
->l
->initial
= move
;
477 it
->ll_version
= ++it
->l
->version
;
479 cs_writeunlock(__func__
, &it
->l
->lock
);
486 void ll_iter_remove_data(LL_ITER
*it
)
488 void *obj
= ll_iter_remove(it
);
492 void *ll_has_elements(const LLIST
*l
)
494 if(!l
|| !l
->initial
|| l
->flag
)
496 return l
->initial
->obj
;
499 void *ll_last_element(const LLIST
*l
)
501 if(!l
|| !l
->last
|| l
->flag
)
506 int32_t ll_contains(const LLIST
*l
, const void *obj
)
508 if(!l
|| !obj
|| l
->flag
)
510 LL_ITER it
= ll_iter_create((LLIST
*) l
);
512 while((data
= ll_iter_next(&it
)))
517 return (data
== obj
);
520 const void *ll_contains_data(const LLIST
*l
, const void *obj
, uint32_t size
)
522 if(!l
|| !obj
|| l
->flag
)
524 LL_ITER it
= ll_iter_create((LLIST
*) l
);
526 while((data
= ll_iter_next(&it
)))
528 if(!memcmp(data
, obj
, size
))
534 int32_t ll_remove(LLIST
*l
, const void *obj
)
537 LL_ITER it
= ll_iter_create(l
);
539 while((data
= ll_iter_next(&it
)))
550 void ll_remove_data(LLIST
*l
, void *obj
)
552 LL_ITER it
= ll_iter_create(l
);
554 while((data
= ll_iter_next(&it
)))
557 { ll_iter_remove_data(&it
); }
561 // removes all elements from l where elements are in elements_to_remove
562 int32_t ll_remove_all(LLIST
*l
, const LLIST
*elements_to_remove
)
565 LL_ITER it1
= ll_iter_create(l
);
566 LL_ITER it2
= ll_iter_create((LLIST
*) elements_to_remove
);
568 const void *data1
, *data2
;
569 while((data1
= ll_iter_next(&it1
)))
572 while((data2
= ll_iter_next(&it2
)))
576 ll_iter_remove(&it1
);
586 /* Returns an array with all elements sorted, the amount of elements is stored in size. We do not sort the original linked list
587 as this might harm running iterations. Furthermore, we need the array anyway for qsort() to work. Remember to free() the result. */
588 void **ll_sort(const LLIST
*l
, void *compare
, int32_t *size
)
590 if(!l
|| !l
->initial
|| !compare
)
598 cs_readlock(__func__
, &((LLIST
*)l
)->lock
);
601 if(!cs_malloc(&p
, l
->count
* sizeof(p
[0])))
603 cs_readunlock(__func__
, &((LLIST
*)l
)->lock
);
606 for(n
= l
->initial
; n
; n
= n
->nxt
)
610 cs_readunlock(__func__
, &((LLIST
*)l
)->lock
);
612 //if (chk_debugLog(it->l))
613 //cs_log_dbg(D_TRACE, "sort: count %d size %d", l->count, sizeof(p[0]));
615 qsort(p
, l
->count
, sizeof(p
[0]), compare
);
620 void ll_putall(LLIST
*dest
, LLIST
*src
)
622 LL_ITER it
= ll_iter_create(src
);
624 while((data
= ll_iter_next(&it
)))
626 ll_append(dest
, data
);
631 LL_LOCKITER
*ll_li_create(LLIST
*l
, int32_t writelock
)
633 if(!l
|| l
->flag
) { return NULL
; }
636 if(!cs_malloc(&li
, sizeof(LL_LOCKITER
)))
640 li
->writelock
= writelock
;
642 { cs_writelock(__func__
, &l
->lock
); }
644 { cs_readlock(__func__
, &l
->lock
); }
645 li
->it
= ll_iter_create(l
);
649 void ll_li_destroy(LL_LOCKITER
*li
)
654 { cs_writeunlock(__func__
, &li
->l
->lock
); }
656 { cs_readunlock(__func__
, &li
->l
->lock
); }
662 void *ll_li_next(LL_LOCKITER
*li
)
666 return ll_iter_next_nolock(&li
->it
);
671 LLIST
*ll_clone(LLIST
*l
, uint32_t copysize
)
673 if(!l
|| l
->flag
) { return NULL
; }
675 LLIST
*cloned
= ll_create(l
->lock
.name
);
676 LL_LOCKITER
*li
= ll_li_create(l
, 0);
678 while((data
= ll_li_next(li
)))
681 if(!cs_malloc(&new_data
, copysize
))
683 memcpy(new_data
, data
, copysize
);
684 ll_append_nolock(cloned
, new_data
);
690 void *ll_remove_first(LLIST
*l
)
694 LL_ITER it
= ll_iter_create(l
);
695 void *data
= ll_iter_next(&it
);
696 if(data
) { ll_iter_remove(&it
); }
702 void ll_remove_first_data(LLIST
*l
)
704 void *data
= ll_remove_first(l
);
705 if(data
) { NULLFREE(data
); }