prefix all mpool functions with nobug_
[nobug.git] / src / nobug_resources.c
blob9e4dd270ea2e89f0b9c4dbe9fb2b42aaea73a415
1 /*
2 This file is part of the NoBug debugging library.
4 Copyright (C)
5 2007, 2008, 2009, 2010, Christian Thaeter <ct@pipapo.org>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, contact Christian Thaeter <ct@pipapo.org>.
20 #include <stdlib.h>
22 #define NOBUG_LIBNOBUG_C
23 #include "nobug.h"
24 #include "llist.h"
25 #include "mpool.h"
28 //deadlock There are 2 kinds of nodes: `resource_record` hold registered resources and `resource_user` which
29 //deadlock attach to (enter) a resource.
30 //deadlock
31 //deadlock Each thread keeps stacklist of each `resource_user` it created, new enters will push on the stack,
32 //deadlock leaving a resource will remove it from this stacklist.
33 //deadlock
34 //deadlock All `resource_records` in use are linked in a global precedence list, items of equal precedence are
35 //deadlock spaned by a skip pointer. Whenever a resource is entered the deadlock checker asserts that one does
36 //deadlock not break existing precedences. By doing so the precedence list gets continously refined as the system
37 //deadlock learns about new lock patterns.
38 //deadlock
39 //deadlock As a consequence of this algorithm the deadlock checker does not only find real deadlocks but
40 //deadlock already potential deadlocks by violations of the locking order which is a lot simpler than finding
41 //deadlock actual deadlocks.
42 //deadlock
43 //deadlock This also means that the deadlock tracker currently only works with hierarchical locking policies
44 //deadlock other approaches to prevent deadlocks are not yet supported and will be added on demand.
45 //deadlock
46 //deadlock
47 //deadlock
51 How much memory to reserve for a mpool chunk, 16k by default
53 #ifndef NOBUG_RESOURCE_MPOOL_CHUNKSIZE
54 #define NOBUG_RESOURCE_MPOOL_CHUNKSIZE (4096<<(sizeof(void*)/4)) /* That is roughly 8k chunks on 32bit, 16k chunks on 64 bit machines */
55 #endif
57 #if NOBUG_USE_PTHREAD
58 pthread_mutex_t nobug_resource_mutex;
59 #endif
61 #define nobug_resourcestates \
62 resource_state(invalid), \
63 resource_state(waiting), \
64 resource_state(trying), \
65 resource_state(exclusive), \
66 resource_state(recursive), \
67 resource_state(shared),
70 #define resource_state(name) #name
71 const char* nobug_resource_states[] =
73 nobug_resourcestates
74 NULL
76 #undef resource_state
78 const char* nobug_resource_error = NULL;
80 static llist nobug_resource_registry;
81 static nobug_mpool nobug_resource_record_pool;
82 static nobug_mpool nobug_resource_user_pool;
83 #if NOBUG_USE_PTHREAD
84 static nobug_mpool nobug_resource_node_pool;
85 #endif
87 static void nobug_resource_record_dtor (void*);
88 static void nobug_resource_user_dtor (void*);
89 #if NOBUG_USE_PTHREAD
90 static void nobug_resource_node_dtor (void*);
91 #endif
94 void
95 nobug_resource_init (void)
97 #if NOBUG_USE_PTHREAD
98 static pthread_mutexattr_t attr;
99 pthread_mutexattr_init (&attr);
100 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
101 pthread_mutex_init (&nobug_resource_mutex, &attr);
102 #endif
104 llist_init (&nobug_resource_registry);
106 nobug_mpool_init (&nobug_resource_record_pool,
107 sizeof(struct nobug_resource_record),
108 NOBUG_RESOURCE_MPOOL_CHUNKSIZE/sizeof(struct nobug_resource_record),
109 nobug_resource_record_dtor);
111 nobug_mpool_init (&nobug_resource_user_pool,
112 sizeof(struct nobug_resource_user),
113 NOBUG_RESOURCE_MPOOL_CHUNKSIZE/sizeof(struct nobug_resource_user),
114 nobug_resource_user_dtor);
116 #if NOBUG_USE_PTHREAD
117 nobug_mpool_init (&nobug_resource_node_pool,
118 sizeof(struct nobug_resource_node),
119 NOBUG_RESOURCE_MPOOL_CHUNKSIZE/sizeof(struct nobug_resource_node),
120 nobug_resource_node_dtor);
121 #endif
125 void
126 nobug_resource_destroy (void)
128 #if NOBUG_USE_PTHREAD
129 nobug_mpool_destroy (&nobug_resource_node_pool);
130 #endif
131 nobug_mpool_destroy (&nobug_resource_user_pool);
132 nobug_mpool_destroy (&nobug_resource_record_pool);
136 unsigned
137 nobug_resource_record_available (void)
139 return nobug_mpool_available (&nobug_resource_record_pool);
143 unsigned
144 nobug_resource_user_available (void)
146 return nobug_mpool_available (&nobug_resource_user_pool);
150 #if NOBUG_USE_PTHREAD
151 unsigned
152 nobug_resource_node_available (void)
154 return nobug_mpool_available (&nobug_resource_node_pool);
158 static void
159 nobug_resource_node_free (struct nobug_resource_node* self)
161 LLIST_WHILE_HEAD (&self->childs, c)
162 nobug_resource_node_free (LLIST_TO_STRUCTP(c, struct nobug_resource_node, cldnode));
164 llist_unlink_fast_ (&self->cldnode);
165 llist_unlink_fast_ (&self->node);
166 nobug_mpool_free (&nobug_resource_node_pool, self);
170 static void
171 nobug_resource_node_dtor (void* p)
173 struct nobug_resource_node* n = p;
174 llist_unlink_fast_ (&n->node);
175 /* must unlink childs, because we don't destroy the tree bottom up */
176 llist_unlink_fast_ (&n->childs);
177 llist_unlink_fast_ (&n->cldnode);
179 #endif
182 static void
183 nobug_resource_record_dtor (void* p)
185 struct nobug_resource_record* self = p;
186 llist_unlink_fast_ (&self->hdr.node);
188 #if NOBUG_USE_PTHREAD
189 /* destroy all nodes recursively */
190 LLIST_WHILE_HEAD (&self->nodes, n)
191 nobug_resource_node_free ((struct nobug_resource_node*)n);
192 #endif
196 static void
197 nobug_resource_user_dtor (void* p)
199 struct nobug_resource_user* u = p;
200 llist_unlink_fast_ (&u->hdr.node);
201 #if NOBUG_USE_PTHREAD
202 llist_unlink_fast_ (&u->res_stack);
203 #endif
207 static int
208 compare_resource_records (const_LList av, const_LList bv, void* unused)
210 (void) unused;
211 const struct nobug_resource_record* a = (const struct nobug_resource_record*)av;
212 const struct nobug_resource_record* b = (const struct nobug_resource_record*)bv;
214 return a->object_id > b->object_id ? 1 : a->object_id < b->object_id ? -1 : 0;
218 struct nobug_resource_record*
219 nobug_resource_announce (const char* type, const char* name, const void* object_id, const struct nobug_context extra)
221 #if NOBUG_USE_PTHREAD
222 pthread_mutex_lock (&nobug_resource_mutex);
223 #endif
225 struct nobug_resource_record* node = nobug_mpool_alloc (&nobug_resource_record_pool);
226 if (!node)
228 nobug_resource_error = "internal allocation error";
229 return NULL;
232 node->hdr.name = name;
233 node->object_id = object_id;
234 node->type = type;
236 /* TODO better lookup method than list search (psplay?) */
237 if (llist_ufind (&nobug_resource_registry, &node->hdr.node, compare_resource_records, NULL))
239 nobug_resource_error = "already registered";
240 return NULL;
243 llist_init (&node->users);
244 node->hdr.extra = extra;
245 #if NOBUG_USE_PTHREAD
246 llist_init (&node->nodes);
247 #endif
249 llist_insert_head (&nobug_resource_registry, llist_init (&node->hdr.node));
251 return node;
254 void
255 nobug_resource_announce_complete (void)
257 #if NOBUG_USE_PTHREAD
258 pthread_mutex_unlock (&nobug_resource_mutex);
259 #endif
264 nobug_resource_forget (struct nobug_resource_record* self)
266 #if NOBUG_USE_PTHREAD
267 pthread_mutex_lock (&nobug_resource_mutex);
268 #endif
269 if (!llist_find (&nobug_resource_registry, &self->hdr.node, compare_resource_records, NULL))
271 nobug_resource_error = "not registered";
272 return 0;
275 if (!llist_is_empty (&self->users))
277 nobug_resource_error = "still in use";
278 return 0;
281 nobug_resource_record_dtor (self);
283 nobug_mpool_free (&nobug_resource_record_pool, self);
285 #if NOBUG_USE_PTHREAD
286 pthread_mutex_unlock (&nobug_resource_mutex);
287 #endif
289 return 1;
294 #if NOBUG_USE_PTHREAD
295 static int
296 nobug_resource_node_resource_cmpfn (const_LList a, const_LList b, void* extra)
298 (void) extra;
299 return ((struct nobug_resource_node*)a)->resource ==
300 ((struct nobug_resource_node*)b)->resource?0:-1;
304 struct nobug_resource_node*
305 nobug_resource_node_new (struct nobug_resource_record* resource,
306 struct nobug_resource_node* parent)
308 struct nobug_resource_node* self = nobug_mpool_alloc (&nobug_resource_node_pool);
309 if (self)
311 llist_insert_head (&resource->nodes, llist_init (&self->node));
312 self->resource = resource;
314 self->parent = parent;
316 llist_init (&self->childs);
317 llist_init (&self->cldnode);
318 if (parent)
319 llist_insert_head (&parent->childs, &self->cldnode);
321 return self;
323 #endif
326 //dlalgo HEAD~ The Resource Tracking Algorithm; deadlock_detection; how resources are tracked
327 //dlalgo
328 //dlalgo Each resource registers a global 'resource_record'.
329 //dlalgo
330 //dlalgo Every new locking path discovered is stored as 'resource_node' structures which refer to the associated
331 //dlalgo 'resource_record'.
332 //dlalgo
333 //dlalgo Threads keep a trail of 'resource_user' strcutures for each resource entered. This 'resource_user' struct
334 //dlalgo refer to the 'resource_nodes' and thus indirectly to the associated 'resource_record'.
335 //dlalgo
336 //dlalgo The deadlock checker uses this information to test if the acqusition of a new resource would yield a
337 //dlalgo potential deadlock.
338 //dlalgo
339 struct nobug_resource_user*
340 nobug_resource_enter (struct nobug_resource_record* resource,
341 const char* identifier,
342 enum nobug_resource_state state,
343 const struct nobug_context extra)
345 if (!resource)
347 nobug_resource_error = "no resource";
348 return NULL;
351 #if NOBUG_USE_PTHREAD
352 pthread_mutex_lock (&nobug_resource_mutex);
354 struct nobug_tls_data* tls = nobug_thread_get ();
356 //dlalgo HEAD^ Entering Resources; nobug_resource_enter; deadlock check on enter
357 //dlalgo
358 //dlalgo In multithreaded programs, whenever a thread wants to wait for a 'resource_record'
359 //dlalgo the deadlock checker jumps in.
360 //dlalgo
361 //dlalgo The deadlock checking algorithm is anticipatory as it will find and abort on conditions which may lead
362 //dlalgo to a potential deadlock by violating the locking order learned earlier.
363 //dlalgo
364 //dlalgo Each thread holds a stack (list) of each 'resource_user' it created. Leaving
365 //dlalgo a resource will remove it from this stacklist.
366 //dlalgo
367 //dlalgo Each 'resource_record' stores the trail which other 'resource_records' are already entered. This relations
368 //dlalgo are implemented with the 'resource_node' helper structure.
369 //dlalgo
370 //dlalgo ////
371 //dlalgo TODO: insert diagram here
372 //dlalgo 2-3
373 //dlalgo 1
374 //dlalgo 3-4-2
375 //dlalgo
376 //dlalgo 1-3-2-4
377 //dlalgo
378 //dlalgo 3-4-2
379 //dlalgo
380 //dlalgo 1-4-2
381 //dlalgo
382 //dlalgo ////
383 //dlalgo
384 //dlalgo First we find out if there is already a node from the to be acquired resource back to
385 //dlalgo the topmost node of the current threads user stack.
386 //dlalgo
387 //dlalgo [source,c]
388 //dlalgo ---------------------------------------------------------------------
389 struct nobug_resource_user* user = NULL; //dlalgo VERBATIM
390 struct nobug_resource_node* node = NULL; //dlalgo VERBATIM
391 //dlalgo VERBATIM
392 if (!llist_is_empty (&tls->res_stack)) //dlalgo VERBATIM
393 { //dlalgo VERBATIM
394 user = LLIST_TO_STRUCTP (llist_tail (&tls->res_stack), //dlalgo VERBATIM
395 struct nobug_resource_user, //dlalgo VERBATIM
396 res_stack); //dlalgo VERBATIM
397 //dlalgo VERBATIM
398 struct nobug_resource_node templ = //dlalgo VERBATIM
399 { //dlalgo VERBATIM
400 {NULL, NULL}, //dlalgo ...
401 user->current->resource, //dlalgo VERBATIM
402 NULL, //dlalgo ...
403 {NULL, NULL},
404 {NULL, NULL}
405 }; //dlalgo VERBATIM
406 //dlalgo VERBATIM
407 node = (struct nobug_resource_node*) //dlalgo VERBATIM
408 llist_ufind (&resource->nodes, //dlalgo VERBATIM
409 &templ.node, //dlalgo VERBATIM
410 nobug_resource_node_resource_cmpfn, //dlalgo VERBATIM
411 NULL); //dlalgo VERBATIM
412 } //dlalgo VERBATIM
413 //dlalgo ...
414 //dlalgo ---------------------------------------------------------------------
415 //dlalgo
416 #endif
418 //dlalgo Deadlock checking is only done when the node is entered in `WAITING` state and only
419 //dlalgo available in multithreaded programs.
420 //dlalgo
421 //dlalgo [source,c]
422 //dlalgo ---------------------------------------------------------------------
423 if (state == NOBUG_RESOURCE_WAITING) //dlalgo VERBATIM
424 { //dlalgo VERBATIM
425 #if NOBUG_USE_PTHREAD //dlalgo VERBATIM
426 //dlalgo ...
427 //dlalgo ---------------------------------------------------------------------
428 //dlalgo
430 //dlalgo If node was found above, then this locking path is already validated and no deadlock can happen,
431 //dlalgo else, if this stack already holds a resource (user is set) we have to go on with checking.
432 //dlalgo
433 //dlalgo [source,c]
434 //dlalgo ---------------------------------------------------------------------
435 if (!node && user) //dlalgo VERBATIM
436 { //dlalgo VERBATIM
437 //dlalgo ...
438 //dlalgo ---------------------------------------------------------------------
439 //dlalgo
440 //dlalgo If not then its checked that the resource to be entered is not on any parent trail of the current topmost resource,
441 //dlalgo if it is then this could be a deadlock which needs to be further investigated.
442 //dlalgo
443 //dlalgo [source,c]
444 //dlalgo ---------------------------------------------------------------------
445 LLIST_FOREACH (&user->current->resource->nodes, n) //dlalgo VERBATIM
446 { //dlalgo VERBATIM
447 for (struct nobug_resource_node* itr = //dlalgo VERBATIM
448 ((struct nobug_resource_node*)n)->parent; //dlalgo VERBATIM
449 itr; //dlalgo VERBATIM
450 itr = itr->parent) //dlalgo VERBATIM
451 { //dlalgo VERBATIM
452 if (itr->resource == resource) //dlalgo VERBATIM
453 { //dlalgo VERBATIM
454 //dlalgo ...
455 //dlalgo ---------------------------------------------------------------------
456 //dlalgo
457 //dlalgo if the resource was on the trail, we search if there is a common ancestor before the resource
458 //dlalgo on the trail and the threads current chain,
459 //dlalgo if yes then this ancestor protects against deadlocks and we can continue.
460 //dlalgo
461 //dlalgo [source,c]
462 //dlalgo ---------------------------------------------------------------------
463 for (struct nobug_resource_node* itr2 = itr->parent; //dlalgo VERBATIM
464 itr2; //dlalgo VERBATIM
465 itr2 = itr2->parent) //dlalgo VERBATIM
466 { //dlalgo VERBATIM
467 LLIST_FOREACH_REV (&tls->res_stack, p) //dlalgo VERBATIM
468 { //dlalgo VERBATIM
469 struct nobug_resource_user* user = //dlalgo VERBATIM
470 LLIST_TO_STRUCTP (p, //dlalgo VERBATIM
471 struct nobug_resource_user, //dlalgo VERBATIM
472 res_stack); //dlalgo VERBATIM
473 if (user->current->resource == itr2->resource) //dlalgo VERBATIM
474 goto done; //dlalgo VERBATIM
475 } //dlalgo VERBATIM
476 //dlalgo ---------------------------------------------------------------------
477 //dlalgo
478 //dlalgo If no ancestor found, we finally abort with a potential deadlock condition.
479 //dlalgo
480 //dlalgo [source,c]
481 //dlalgo ---------------------------------------------------------------------
482 nobug_resource_error = "possible deadlock detected"; //dlalgo VERBATIM
483 return NULL; //dlalgo VERBATIM
484 //dlalgo ...
485 //dlalgo ---------------------------------------------------------------------
486 //dlalgo
492 done:;
493 #endif
495 else if (state == NOBUG_RESOURCE_TRYING)
497 /* nothing */
499 else if (state == NOBUG_RESOURCE_EXCLUSIVE)
501 /* check that everyone is waiting */
502 LLIST_FOREACH (&resource->users, n)
503 if (((struct nobug_resource_user*)n)->state != NOBUG_RESOURCE_WAITING &&
504 ((struct nobug_resource_user*)n)->state != NOBUG_RESOURCE_TRYING)
506 nobug_resource_error = "invalid enter state (resource already claimed)";
507 break;
510 #if NOBUG_USE_PTHREAD
511 else if (state == NOBUG_RESOURCE_RECURSIVE)
513 /* check that everyone *else* is waiting */
514 LLIST_FOREACH (&resource->users, n)
516 struct nobug_resource_user* user = (struct nobug_resource_user*)n;
517 if (user->state != NOBUG_RESOURCE_WAITING &&
518 user->state != NOBUG_RESOURCE_TRYING &&
519 user->thread != tls)
521 nobug_resource_error = "invalid enter state (resource already claimed non recursive by another thread)";
522 break;
524 else if (!(user->state == NOBUG_RESOURCE_WAITING ||
525 user->state == NOBUG_RESOURCE_TRYING ||
526 user->state == NOBUG_RESOURCE_RECURSIVE) &&
527 user->thread == tls)
529 nobug_resource_error = "invalid enter state (resource already claimed non recursive by this thread)";
530 break;
534 #endif
535 else if (state == NOBUG_RESOURCE_SHARED)
537 /* check that every one else is waiting or hold it shared */
538 LLIST_FOREACH (&resource->users, n)
539 if (((struct nobug_resource_user*)n)->state != NOBUG_RESOURCE_WAITING &&
540 ((struct nobug_resource_user*)n)->state != NOBUG_RESOURCE_TRYING &&
541 ((struct nobug_resource_user*)n)->state != NOBUG_RESOURCE_SHARED)
543 nobug_resource_error = "invalid enter state (resource already claimed non shared)";
544 break;
547 else
548 nobug_resource_error = "invalid enter state";
550 if (nobug_resource_error)
551 return NULL;
553 struct nobug_resource_user* new_user = nobug_mpool_alloc (&nobug_resource_user_pool);
554 if (!new_user)
556 nobug_resource_error = "internal allocation error";
557 return NULL;
560 new_user->hdr.name = identifier;
561 new_user->hdr.extra = extra;
562 new_user->state = state;
563 llist_insert_head (&resource->users, llist_init (&new_user->hdr.node));
565 #if NOBUG_USE_PTHREAD
566 if (!node)
568 /* no node found, create a new one */
569 node = nobug_resource_node_new (resource, user?user->current:NULL);
570 if (!node)
572 nobug_resource_error = "internal allocation error";
573 return NULL;
577 new_user->current = node;
578 new_user->thread = tls;
579 llist_insert_tail (&tls->res_stack, llist_init (&new_user->res_stack));
581 pthread_mutex_unlock (&nobug_resource_mutex);
582 #endif
584 return new_user;
588 #if NOBUG_USE_PTHREAD
589 static int
590 nobug_resource_node_parent_cmpfn (const_LList a, const_LList b, void* extra)
592 (void) extra;
593 return ((struct nobug_resource_node*)a)->parent ==
594 ((struct nobug_resource_node*)b)->parent?0:-1;
596 #endif
599 void
600 nobug_resource_leave_pre (void)
602 #if NOBUG_USE_PTHREAD
603 pthread_mutex_lock (&nobug_resource_mutex);
604 #endif
609 nobug_resource_leave (struct nobug_resource_user* user)
611 if (!user)
613 nobug_resource_error = "no handle";
614 return 0;
617 if (!user->current?user->current->resource:NULL)
619 nobug_resource_error = "not entered";
620 return 0;
622 else
624 //dlalgo
625 //dlalgo HEAD^ Leaving Resources; nobug_resource_leave; fix resource lists
626 //dlalgo
627 //dlalgo store the tail and next aside, we need it later
628 //dlalgo
629 //dlalgo [source,c]
630 //dlalgo ---------------------------------------------------------------------
631 #if NOBUG_USE_PTHREAD //dlalgo VERBATIM
632 struct nobug_resource_user* tail = //dlalgo VERBATIM
633 LLIST_TO_STRUCTP (llist_tail (&user->thread->res_stack), //dlalgo VERBATIM
634 struct nobug_resource_user, //dlalgo VERBATIM
635 res_stack); //dlalgo VERBATIM
637 struct nobug_resource_user* next = //dlalgo VERBATIM
638 LLIST_TO_STRUCTP (llist_next (&user->res_stack), //dlalgo VERBATIM
639 struct nobug_resource_user, //dlalgo VERBATIM
640 res_stack); //dlalgo VERBATIM
641 //dlalgo ---------------------------------------------------------------------
642 //dlalgo
643 //dlalgo remove user struct from thread stack
644 //dlalgo The res_stack is now like it is supposed to look like with the 'user' removed.
645 //dlalgo We now need to fix the node tree up to match this list.
646 //dlalgo
647 //dlalgo [source,c]
648 //dlalgo ---------------------------------------------------------------------
649 llist_unlink_fast_ (&user->res_stack); //dlalgo VERBATIM
650 //dlalgo ---------------------------------------------------------------------
651 //dlalgo
652 //dlalgo When the the user node was not the tail or only node of the thread stack, we have to check
653 //dlalgo (and possibly construct) a new node chain for it. No valdation of this chain needs to be done,
654 //dlalgo since it was already validated when entering the resources first.
655 //dlalgo
656 //dlalgo [source,c]
657 //dlalgo ---------------------------------------------------------------------
658 if (user != tail && !llist_is_empty (&user->thread->res_stack)) //dlalgo VERBATIM
659 { //dlalgo VERBATIM
660 struct nobug_resource_user* parent = NULL; //dlalgo VERBATIM
661 if (llist_head (&user->thread->res_stack) != &next->res_stack) //dlalgo VERBATIM
662 { //dlalgo VERBATIM
663 parent = //dlalgo VERBATIM
664 LLIST_TO_STRUCTP (llist_prev (&next->res_stack), //dlalgo VERBATIM
665 struct nobug_resource_user, //dlalgo VERBATIM
666 res_stack); //dlalgo VERBATIM
667 } //dlalgo VERBATIM
668 //dlalgo ---------------------------------------------------------------------
669 //dlalgo
670 //dlalgo iterate over all users following the removed node, finding nodes pointing to this users or
671 //dlalgo create new nodes.
672 //dlalgo
673 //dlalgo [source,c]
674 //dlalgo ---------------------------------------------------------------------
675 LLIST_FORRANGE (&next->res_stack, &user->thread->res_stack, n) //dlalgo VERBATIM
676 { //dlalgo VERBATIM
677 struct nobug_resource_user* cur = //dlalgo VERBATIM
678 LLIST_TO_STRUCTP (n, //dlalgo VERBATIM
679 struct nobug_resource_user, //dlalgo VERBATIM
680 res_stack); //dlalgo VERBATIM
681 //dlalgo VERBATIM
682 struct nobug_resource_record* resource = cur->current->resource;
684 //dlalgo ---------------------------------------------------------------------
685 //TODO this search could be optimized out after we creates a node once,
686 //TODO all following nodes need to be created too
687 //dlalgo
688 //dlalgo find the node pointing back to parent, create a new one if not found, rinse repeat
689 //dlalgo
690 //dlalgo [source,c]
691 //dlalgo ---------------------------------------------------------------------
692 struct nobug_resource_node templ = //dlalgo VERBATIM
693 { //dlalgo VERBATIM
694 {NULL, NULL}, //dlalgo ...
695 NULL, //dlalgo VERBATIM
696 parent?parent->current:NULL, //dlalgo ...
697 {NULL, NULL},
698 {NULL, NULL}
699 }; //dlalgo VERBATIM
700 //dlalgo VERBATIM
701 struct nobug_resource_node* node = (struct nobug_resource_node*) //dlalgo VERBATIM
702 llist_ufind (&resource->nodes, //dlalgo VERBATIM
703 &templ.node, //dlalgo VERBATIM
704 nobug_resource_node_parent_cmpfn, //dlalgo VERBATIM
705 NULL); //dlalgo VERBATIM
706 //dlalgo VERBATIM
707 if (!node) //dlalgo VERBATIM
708 { //dlalgo VERBATIM
709 node = nobug_resource_node_new (resource, //dlalgo VERBATIM
710 parent?parent->current:NULL); //dlalgo VERBATIM
711 if (!node) //dlalgo VERBATIM
712 { //dlalgo VERBATIM
713 nobug_resource_error = "internal allocation error"; //dlalgo VERBATIM
714 return 0; //dlalgo VERBATIM
715 } //dlalgo VERBATIM
716 } //dlalgo VERBATIM
717 //dlalgo VERBATIM
718 parent = cur; //dlalgo VERBATIM
719 } //dlalgo VERBATIM
720 } //dlalgo VERBATIM
721 //dlalgo ---------------------------------------------------------------------
722 //dlalgo
723 #endif
725 llist_unlink_fast_ (&user->hdr.node);
726 nobug_mpool_free (&nobug_resource_user_pool, user);
730 #if NOBUG_USE_PTHREAD
731 pthread_mutex_unlock (&nobug_resource_mutex);
732 #endif
734 return 1;
739 nobug_resource_state (struct nobug_resource_user* user,
740 enum nobug_resource_state state)
742 if (!user)
744 nobug_resource_error = "no user handle";
745 return 0;
748 #if NOBUG_USE_PTHREAD
749 pthread_mutex_lock (&nobug_resource_mutex);
750 #endif
752 if (user->state == NOBUG_RESOURCE_WAITING || user->state == NOBUG_RESOURCE_TRYING)
754 if (state == NOBUG_RESOURCE_EXCLUSIVE)
756 /* check that every one is waiting */
757 LLIST_FOREACH (&user->current->resource->users, n)
758 if (((struct nobug_resource_user*)n)->state != NOBUG_RESOURCE_WAITING)
760 nobug_resource_error = "resource hold by another thread";
761 break;
764 #if NOBUG_USE_PTHREAD
765 else if (state == NOBUG_RESOURCE_RECURSIVE)
767 /* check that every one else is waiting */
768 LLIST_FOREACH (&user->current->resource->users, n)
769 if (((struct nobug_resource_user*)n)->state != NOBUG_RESOURCE_WAITING &&
770 ((struct nobug_resource_user*)n)->thread != nobug_thread_get ())
772 nobug_resource_error = "resource hold by another thread";
773 break;
776 #endif
777 else if (state == NOBUG_RESOURCE_SHARED)
779 /* check that every one else is waiting or shared */
780 LLIST_FOREACH (&user->current->resource->users, n)
781 if (((struct nobug_resource_user*)n)->state != NOBUG_RESOURCE_WAITING
782 && ((struct nobug_resource_user*)n)->state != NOBUG_RESOURCE_SHARED)
784 nobug_resource_error = "resource hold by another thread nonshared";
785 break;
788 else
789 nobug_resource_error = "invalid state transistion";
791 /* ok we got it */
792 if (!nobug_resource_error)
793 user->state = state;
795 else if (state == NOBUG_RESOURCE_WAITING || state == NOBUG_RESOURCE_TRYING)
796 user->state = state;
797 else
798 nobug_resource_error = "invalid state transistion";
800 if (nobug_resource_error)
801 return 0;
803 #if NOBUG_USE_PTHREAD
804 pthread_mutex_unlock (&nobug_resource_mutex);
805 #endif
807 return 1;
811 enum nobug_resource_state
812 nobug_resource_mystate (struct nobug_resource_record* res)
814 enum nobug_resource_state ret = NOBUG_RESOURCE_INVALID;
815 #if NOBUG_USE_PTHREAD
816 pthread_mutex_lock (&nobug_resource_mutex);
817 struct nobug_tls_data* iam = nobug_thread_get ();
818 #endif
820 LLIST_FOREACH_REV (&res->users, u)
822 struct nobug_resource_user* user = (struct nobug_resource_user*) u;
823 #if NOBUG_USE_PTHREAD
824 if (user->thread == iam)
825 ret = user->state;
826 #else
827 ret = user->state;
828 #endif
831 #if NOBUG_USE_PTHREAD
832 pthread_mutex_unlock (&nobug_resource_mutex);
833 #endif
835 return ret;
839 static void
840 nobug_resource_dump_ (char** start, char* header, struct nobug_resource_record* resource, const struct nobug_resource_dump_context context)
842 #if NOBUG_USE_PTHREAD
843 nobug_log_line (start, header, context.flag, context.level,
844 " %s:%d: %s:%s: hold by %u entities:",
845 nobug_basename(resource->hdr.extra.file), resource->hdr.extra.line,
846 resource->type, resource->hdr.name,
847 llist_count (&resource->users));
848 #else
849 nobug_log_line (start, header, context.flag, context.level,
850 " %s:%d: %s:%s: hold by %u entities:",
851 nobug_basename(resource->hdr.extra.file), resource->hdr.extra.line,
852 resource->type, resource->hdr.name,
853 llist_count (&resource->users));
854 #endif
856 LLIST_FOREACH (&resource->users, n)
858 struct nobug_resource_user* node = (struct nobug_resource_user*)n;
859 #if NOBUG_USE_PTHREAD
860 nobug_log_line (start, header, context.flag, context.level,
861 NOBUG_TAB"%s:%d: %s %s: %s",
862 nobug_basename(node->hdr.extra.file), node->hdr.extra.line,
863 node->hdr.name, node->thread->thread_id,
864 nobug_resource_states[node->state]);
865 #else
866 nobug_log_line (start, header, context.flag, context.level,
867 NOBUG_TAB"%s:%d: %s: %s",
868 nobug_basename(node->hdr.extra.file), node->hdr.extra.line,
869 node->hdr.name, nobug_resource_states[node->state]);
870 #endif
874 void
875 nobug_resource_dump (struct nobug_resource_record* resource, const struct nobug_resource_dump_context context)
877 if (!resource) return;
879 #if NOBUG_USE_PTHREAD
880 pthread_mutex_lock (&nobug_resource_mutex);
881 #endif
883 char header[NOBUG_MAX_LOG_HEADER_SIZE];
884 char* start = nobug_log_begin (header, context.flag, "RESOURCE_DUMP", context.ctx);
886 nobug_resource_dump_ (&start, header, resource, context);
888 nobug_log_end (context.flag, context.level);
890 #if NOBUG_USE_PTHREAD
891 pthread_mutex_unlock (&nobug_resource_mutex);
892 #endif
896 void
897 nobug_resource_dump_all (const struct nobug_resource_dump_context context)
899 #if NOBUG_USE_PTHREAD
900 pthread_mutex_lock (&nobug_resource_mutex);
901 #endif
903 char header[NOBUG_MAX_LOG_HEADER_SIZE];
904 char* start = nobug_log_begin (header, context.flag, "RESOURCE_DUMP", context.ctx);
906 LLIST_FOREACH (&nobug_resource_registry, n)
908 struct nobug_resource_record* node = (struct nobug_resource_record*)n;
909 nobug_resource_dump_ (&start, header, node, context);
912 nobug_log_end (context.flag, context.level);
914 #if NOBUG_USE_PTHREAD
915 pthread_mutex_unlock (&nobug_resource_mutex);
916 #endif
921 void
922 nobug_resource_list (const struct nobug_resource_dump_context context)
924 #if NOBUG_USE_PTHREAD
925 pthread_mutex_lock (&nobug_resource_mutex);
926 #endif
928 char header[NOBUG_MAX_LOG_HEADER_SIZE];
929 char* start = nobug_log_begin (header, context.flag, "RESOURCE_LIST", context.ctx);
931 if (!llist_is_empty (&nobug_resource_registry))
933 LLIST_FOREACH (&nobug_resource_registry, n)
935 struct nobug_resource_record* node = (struct nobug_resource_record*)n;
936 nobug_log_line (&start, header,
937 context.flag, context.level,
938 " %s:%d: %s: %s: %p",
939 nobug_basename(node->hdr.extra.file), node->hdr.extra.line,
940 node->type, node->hdr.name, node->object_id);
943 else
945 nobug_log_line (&start, header, context.flag, context.level, " No resources registered");
948 nobug_log_end (context.flag, context.level);
950 #if NOBUG_USE_PTHREAD
951 pthread_mutex_unlock (&nobug_resource_mutex);
952 #endif
956 // Local Variables:
957 // mode: C
958 // c-file-style: "gnu"
959 // indent-tabs-mode: nil
960 // End: