1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
25 #include "requestqueue.h"
27 #ifdef CONFIG_DLM_DEBUG
28 int dlm_create_debug_file(struct dlm_ls
*ls
);
29 void dlm_delete_debug_file(struct dlm_ls
*ls
);
31 static inline int dlm_create_debug_file(struct dlm_ls
*ls
) { return 0; }
32 static inline void dlm_delete_debug_file(struct dlm_ls
*ls
) { }
36 static struct mutex ls_lock
;
37 static struct list_head lslist
;
38 static spinlock_t lslist_lock
;
39 static struct task_struct
* scand_task
;
42 static ssize_t
dlm_control_store(struct dlm_ls
*ls
, const char *buf
, size_t len
)
45 int n
= simple_strtol(buf
, NULL
, 0);
47 ls
= dlm_find_lockspace_local(ls
->ls_local_handle
);
61 dlm_put_lockspace(ls
);
65 static ssize_t
dlm_event_store(struct dlm_ls
*ls
, const char *buf
, size_t len
)
67 ls
->ls_uevent_result
= simple_strtol(buf
, NULL
, 0);
68 set_bit(LSFL_UEVENT_WAIT
, &ls
->ls_flags
);
69 wake_up(&ls
->ls_uevent_wait
);
73 static ssize_t
dlm_id_show(struct dlm_ls
*ls
, char *buf
)
75 return snprintf(buf
, PAGE_SIZE
, "%u\n", ls
->ls_global_id
);
78 static ssize_t
dlm_id_store(struct dlm_ls
*ls
, const char *buf
, size_t len
)
80 ls
->ls_global_id
= simple_strtoul(buf
, NULL
, 0);
84 static ssize_t
dlm_recover_status_show(struct dlm_ls
*ls
, char *buf
)
86 uint32_t status
= dlm_recover_status(ls
);
87 return snprintf(buf
, PAGE_SIZE
, "%x\n", status
);
90 static ssize_t
dlm_recover_nodeid_show(struct dlm_ls
*ls
, char *buf
)
92 return snprintf(buf
, PAGE_SIZE
, "%d\n", ls
->ls_recover_nodeid
);
96 struct attribute attr
;
97 ssize_t (*show
)(struct dlm_ls
*, char *);
98 ssize_t (*store
)(struct dlm_ls
*, const char *, size_t);
101 static struct dlm_attr dlm_attr_control
= {
102 .attr
= {.name
= "control", .mode
= S_IWUSR
},
103 .store
= dlm_control_store
106 static struct dlm_attr dlm_attr_event
= {
107 .attr
= {.name
= "event_done", .mode
= S_IWUSR
},
108 .store
= dlm_event_store
111 static struct dlm_attr dlm_attr_id
= {
112 .attr
= {.name
= "id", .mode
= S_IRUGO
| S_IWUSR
},
114 .store
= dlm_id_store
117 static struct dlm_attr dlm_attr_recover_status
= {
118 .attr
= {.name
= "recover_status", .mode
= S_IRUGO
},
119 .show
= dlm_recover_status_show
122 static struct dlm_attr dlm_attr_recover_nodeid
= {
123 .attr
= {.name
= "recover_nodeid", .mode
= S_IRUGO
},
124 .show
= dlm_recover_nodeid_show
127 static struct attribute
*dlm_attrs
[] = {
128 &dlm_attr_control
.attr
,
129 &dlm_attr_event
.attr
,
131 &dlm_attr_recover_status
.attr
,
132 &dlm_attr_recover_nodeid
.attr
,
136 static ssize_t
dlm_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
139 struct dlm_ls
*ls
= container_of(kobj
, struct dlm_ls
, ls_kobj
);
140 struct dlm_attr
*a
= container_of(attr
, struct dlm_attr
, attr
);
141 return a
->show
? a
->show(ls
, buf
) : 0;
144 static ssize_t
dlm_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
145 const char *buf
, size_t len
)
147 struct dlm_ls
*ls
= container_of(kobj
, struct dlm_ls
, ls_kobj
);
148 struct dlm_attr
*a
= container_of(attr
, struct dlm_attr
, attr
);
149 return a
->store
? a
->store(ls
, buf
, len
) : len
;
152 static void lockspace_kobj_release(struct kobject
*k
)
154 struct dlm_ls
*ls
= container_of(k
, struct dlm_ls
, ls_kobj
);
158 static struct sysfs_ops dlm_attr_ops
= {
159 .show
= dlm_attr_show
,
160 .store
= dlm_attr_store
,
163 static struct kobj_type dlm_ktype
= {
164 .default_attrs
= dlm_attrs
,
165 .sysfs_ops
= &dlm_attr_ops
,
166 .release
= lockspace_kobj_release
,
169 static struct kset dlm_kset
= {
173 static int kobject_setup(struct dlm_ls
*ls
)
175 char lsname
[DLM_LOCKSPACE_LEN
];
178 memset(lsname
, 0, DLM_LOCKSPACE_LEN
);
179 snprintf(lsname
, DLM_LOCKSPACE_LEN
, "%s", ls
->ls_name
);
181 error
= kobject_set_name(&ls
->ls_kobj
, "%s", lsname
);
185 ls
->ls_kobj
.kset
= &dlm_kset
;
186 ls
->ls_kobj
.ktype
= &dlm_ktype
;
190 static int do_uevent(struct dlm_ls
*ls
, int in
)
195 kobject_uevent(&ls
->ls_kobj
, KOBJ_ONLINE
);
197 kobject_uevent(&ls
->ls_kobj
, KOBJ_OFFLINE
);
199 log_debug(ls
, "%s the lockspace group...", in
? "joining" : "leaving");
201 /* dlm_controld will see the uevent, do the necessary group management
202 and then write to sysfs to wake us */
204 error
= wait_event_interruptible(ls
->ls_uevent_wait
,
205 test_and_clear_bit(LSFL_UEVENT_WAIT
, &ls
->ls_flags
));
207 log_debug(ls
, "group event done %d %d", error
, ls
->ls_uevent_result
);
212 error
= ls
->ls_uevent_result
;
215 log_error(ls
, "group %s failed %d %d", in
? "join" : "leave",
216 error
, ls
->ls_uevent_result
);
221 int dlm_lockspace_init(void)
226 mutex_init(&ls_lock
);
227 INIT_LIST_HEAD(&lslist
);
228 spin_lock_init(&lslist_lock
);
230 kobject_set_name(&dlm_kset
.kobj
, "dlm");
231 kobj_set_kset_s(&dlm_kset
, kernel_subsys
);
232 error
= kset_register(&dlm_kset
);
234 printk("dlm_lockspace_init: cannot register kset %d\n", error
);
238 void dlm_lockspace_exit(void)
240 kset_unregister(&dlm_kset
);
243 static int dlm_scand(void *data
)
247 while (!kthread_should_stop()) {
248 list_for_each_entry(ls
, &lslist
, ls_list
) {
249 if (dlm_lock_recovery_try(ls
)) {
251 dlm_scan_timeout(ls
);
252 dlm_unlock_recovery(ls
);
255 schedule_timeout_interruptible(dlm_config
.ci_scan_secs
* HZ
);
260 static int dlm_scand_start(void)
262 struct task_struct
*p
;
265 p
= kthread_run(dlm_scand
, NULL
, "dlm_scand");
273 static void dlm_scand_stop(void)
275 kthread_stop(scand_task
);
278 static struct dlm_ls
*dlm_find_lockspace_name(char *name
, int namelen
)
282 spin_lock(&lslist_lock
);
284 list_for_each_entry(ls
, &lslist
, ls_list
) {
285 if (ls
->ls_namelen
== namelen
&&
286 memcmp(ls
->ls_name
, name
, namelen
) == 0)
291 spin_unlock(&lslist_lock
);
295 struct dlm_ls
*dlm_find_lockspace_global(uint32_t id
)
299 spin_lock(&lslist_lock
);
301 list_for_each_entry(ls
, &lslist
, ls_list
) {
302 if (ls
->ls_global_id
== id
) {
309 spin_unlock(&lslist_lock
);
313 struct dlm_ls
*dlm_find_lockspace_local(dlm_lockspace_t
*lockspace
)
317 spin_lock(&lslist_lock
);
318 list_for_each_entry(ls
, &lslist
, ls_list
) {
319 if (ls
->ls_local_handle
== lockspace
) {
326 spin_unlock(&lslist_lock
);
330 struct dlm_ls
*dlm_find_lockspace_device(int minor
)
334 spin_lock(&lslist_lock
);
335 list_for_each_entry(ls
, &lslist
, ls_list
) {
336 if (ls
->ls_device
.minor
== minor
) {
343 spin_unlock(&lslist_lock
);
347 void dlm_put_lockspace(struct dlm_ls
*ls
)
349 spin_lock(&lslist_lock
);
351 spin_unlock(&lslist_lock
);
354 static void remove_lockspace(struct dlm_ls
*ls
)
357 spin_lock(&lslist_lock
);
358 if (ls
->ls_count
== 0) {
359 list_del(&ls
->ls_list
);
360 spin_unlock(&lslist_lock
);
363 spin_unlock(&lslist_lock
);
368 static int threads_start(void)
372 /* Thread which process lock requests for all lockspace's */
373 error
= dlm_astd_start();
375 log_print("cannot start dlm_astd thread %d", error
);
379 error
= dlm_scand_start();
381 log_print("cannot start dlm_scand thread %d", error
);
385 /* Thread for sending/receiving messages for all lockspace's */
386 error
= dlm_lowcomms_start();
388 log_print("cannot start dlm lowcomms %d", error
);
402 static void threads_stop(void)
409 static int new_lockspace(char *name
, int namelen
, void **lockspace
,
410 uint32_t flags
, int lvblen
)
413 int i
, size
, error
= -ENOMEM
;
416 if (namelen
> DLM_LOCKSPACE_LEN
)
419 if (!lvblen
|| (lvblen
% 8))
422 if (!try_module_get(THIS_MODULE
))
425 ls
= dlm_find_lockspace_name(name
, namelen
);
428 module_put(THIS_MODULE
);
432 ls
= kzalloc(sizeof(struct dlm_ls
) + namelen
, GFP_KERNEL
);
435 memcpy(ls
->ls_name
, name
, namelen
);
436 ls
->ls_namelen
= namelen
;
437 ls
->ls_lvblen
= lvblen
;
441 if (flags
& DLM_LSFL_TIMEWARN
)
442 set_bit(LSFL_TIMEWARN
, &ls
->ls_flags
);
444 if (flags
& DLM_LSFL_FS
)
445 ls
->ls_allocation
= GFP_NOFS
;
447 ls
->ls_allocation
= GFP_KERNEL
;
449 /* ls_exflags are forced to match among nodes, and we don't
450 need to require all nodes to have TIMEWARN or FS set */
451 ls
->ls_exflags
= (flags
& ~(DLM_LSFL_TIMEWARN
| DLM_LSFL_FS
));
453 size
= dlm_config
.ci_rsbtbl_size
;
454 ls
->ls_rsbtbl_size
= size
;
456 ls
->ls_rsbtbl
= kmalloc(sizeof(struct dlm_rsbtable
) * size
, GFP_KERNEL
);
459 for (i
= 0; i
< size
; i
++) {
460 INIT_LIST_HEAD(&ls
->ls_rsbtbl
[i
].list
);
461 INIT_LIST_HEAD(&ls
->ls_rsbtbl
[i
].toss
);
462 rwlock_init(&ls
->ls_rsbtbl
[i
].lock
);
465 size
= dlm_config
.ci_lkbtbl_size
;
466 ls
->ls_lkbtbl_size
= size
;
468 ls
->ls_lkbtbl
= kmalloc(sizeof(struct dlm_lkbtable
) * size
, GFP_KERNEL
);
471 for (i
= 0; i
< size
; i
++) {
472 INIT_LIST_HEAD(&ls
->ls_lkbtbl
[i
].list
);
473 rwlock_init(&ls
->ls_lkbtbl
[i
].lock
);
474 ls
->ls_lkbtbl
[i
].counter
= 1;
477 size
= dlm_config
.ci_dirtbl_size
;
478 ls
->ls_dirtbl_size
= size
;
480 ls
->ls_dirtbl
= kmalloc(sizeof(struct dlm_dirtable
) * size
, GFP_KERNEL
);
483 for (i
= 0; i
< size
; i
++) {
484 INIT_LIST_HEAD(&ls
->ls_dirtbl
[i
].list
);
485 rwlock_init(&ls
->ls_dirtbl
[i
].lock
);
488 INIT_LIST_HEAD(&ls
->ls_waiters
);
489 mutex_init(&ls
->ls_waiters_mutex
);
490 INIT_LIST_HEAD(&ls
->ls_orphans
);
491 mutex_init(&ls
->ls_orphans_mutex
);
492 INIT_LIST_HEAD(&ls
->ls_timeout
);
493 mutex_init(&ls
->ls_timeout_mutex
);
495 INIT_LIST_HEAD(&ls
->ls_nodes
);
496 INIT_LIST_HEAD(&ls
->ls_nodes_gone
);
497 ls
->ls_num_nodes
= 0;
498 ls
->ls_low_nodeid
= 0;
499 ls
->ls_total_weight
= 0;
500 ls
->ls_node_array
= NULL
;
502 memset(&ls
->ls_stub_rsb
, 0, sizeof(struct dlm_rsb
));
503 ls
->ls_stub_rsb
.res_ls
= ls
;
505 ls
->ls_debug_rsb_dentry
= NULL
;
506 ls
->ls_debug_waiters_dentry
= NULL
;
508 init_waitqueue_head(&ls
->ls_uevent_wait
);
509 ls
->ls_uevent_result
= 0;
510 init_completion(&ls
->ls_members_done
);
511 ls
->ls_members_result
= -1;
513 ls
->ls_recoverd_task
= NULL
;
514 mutex_init(&ls
->ls_recoverd_active
);
515 spin_lock_init(&ls
->ls_recover_lock
);
516 spin_lock_init(&ls
->ls_rcom_spin
);
517 get_random_bytes(&ls
->ls_rcom_seq
, sizeof(uint64_t));
518 ls
->ls_recover_status
= 0;
519 ls
->ls_recover_seq
= 0;
520 ls
->ls_recover_args
= NULL
;
521 init_rwsem(&ls
->ls_in_recovery
);
522 init_rwsem(&ls
->ls_recv_active
);
523 INIT_LIST_HEAD(&ls
->ls_requestqueue
);
524 mutex_init(&ls
->ls_requestqueue_mutex
);
525 mutex_init(&ls
->ls_clear_proc_locks
);
527 ls
->ls_recover_buf
= kmalloc(dlm_config
.ci_buffer_size
, GFP_KERNEL
);
528 if (!ls
->ls_recover_buf
)
531 INIT_LIST_HEAD(&ls
->ls_recover_list
);
532 spin_lock_init(&ls
->ls_recover_list_lock
);
533 ls
->ls_recover_list_count
= 0;
534 ls
->ls_local_handle
= ls
;
535 init_waitqueue_head(&ls
->ls_wait_general
);
536 INIT_LIST_HEAD(&ls
->ls_root_list
);
537 init_rwsem(&ls
->ls_root_sem
);
539 down_write(&ls
->ls_in_recovery
);
541 spin_lock(&lslist_lock
);
542 list_add(&ls
->ls_list
, &lslist
);
543 spin_unlock(&lslist_lock
);
545 /* needs to find ls in lslist */
546 error
= dlm_recoverd_start(ls
);
548 log_error(ls
, "can't start dlm_recoverd %d", error
);
552 error
= kobject_setup(ls
);
556 error
= kobject_register(&ls
->ls_kobj
);
560 /* let kobject handle freeing of ls if there's an error */
563 /* This uevent triggers dlm_controld in userspace to add us to the
564 group of nodes that are members of this lockspace (managed by the
565 cluster infrastructure.) Once it's done that, it tells us who the
566 current lockspace members are (via configfs) and then tells the
567 lockspace to start running (via sysfs) in dlm_ls_start(). */
569 error
= do_uevent(ls
, 1);
573 wait_for_completion(&ls
->ls_members_done
);
574 error
= ls
->ls_members_result
;
578 dlm_create_debug_file(ls
);
580 log_debug(ls
, "join complete");
587 dlm_clear_members(ls
);
588 kfree(ls
->ls_node_array
);
590 dlm_recoverd_stop(ls
);
592 spin_lock(&lslist_lock
);
593 list_del(&ls
->ls_list
);
594 spin_unlock(&lslist_lock
);
595 kfree(ls
->ls_recover_buf
);
597 kfree(ls
->ls_dirtbl
);
599 kfree(ls
->ls_lkbtbl
);
601 kfree(ls
->ls_rsbtbl
);
604 kobject_unregister(&ls
->ls_kobj
);
608 module_put(THIS_MODULE
);
612 int dlm_new_lockspace(char *name
, int namelen
, void **lockspace
,
613 uint32_t flags
, int lvblen
)
617 mutex_lock(&ls_lock
);
619 error
= threads_start();
623 error
= new_lockspace(name
, namelen
, lockspace
, flags
, lvblen
);
629 mutex_unlock(&ls_lock
);
633 /* Return 1 if the lockspace still has active remote locks,
634 * 2 if the lockspace still has active local locks.
636 static int lockspace_busy(struct dlm_ls
*ls
)
638 int i
, lkb_found
= 0;
641 /* NOTE: We check the lockidtbl here rather than the resource table.
642 This is because there may be LKBs queued as ASTs that have been
643 unlinked from their RSBs and are pending deletion once the AST has
646 for (i
= 0; i
< ls
->ls_lkbtbl_size
; i
++) {
647 read_lock(&ls
->ls_lkbtbl
[i
].lock
);
648 if (!list_empty(&ls
->ls_lkbtbl
[i
].list
)) {
650 list_for_each_entry(lkb
, &ls
->ls_lkbtbl
[i
].list
,
652 if (!lkb
->lkb_nodeid
) {
653 read_unlock(&ls
->ls_lkbtbl
[i
].lock
);
658 read_unlock(&ls
->ls_lkbtbl
[i
].lock
);
663 static int release_lockspace(struct dlm_ls
*ls
, int force
)
667 struct list_head
*head
;
669 int busy
= lockspace_busy(ls
);
677 dlm_recoverd_stop(ls
);
679 remove_lockspace(ls
);
681 dlm_delete_debug_file(ls
);
685 kfree(ls
->ls_recover_buf
);
688 * Free direntry structs.
692 kfree(ls
->ls_dirtbl
);
695 * Free all lkb's on lkbtbl[] lists.
698 for (i
= 0; i
< ls
->ls_lkbtbl_size
; i
++) {
699 head
= &ls
->ls_lkbtbl
[i
].list
;
700 while (!list_empty(head
)) {
701 lkb
= list_entry(head
->next
, struct dlm_lkb
,
704 list_del(&lkb
->lkb_idtbl_list
);
708 if (lkb
->lkb_lvbptr
&& lkb
->lkb_flags
& DLM_IFL_MSTCPY
)
709 free_lvb(lkb
->lkb_lvbptr
);
716 kfree(ls
->ls_lkbtbl
);
719 * Free all rsb's on rsbtbl[] lists
722 for (i
= 0; i
< ls
->ls_rsbtbl_size
; i
++) {
723 head
= &ls
->ls_rsbtbl
[i
].list
;
724 while (!list_empty(head
)) {
725 rsb
= list_entry(head
->next
, struct dlm_rsb
,
728 list_del(&rsb
->res_hashchain
);
732 head
= &ls
->ls_rsbtbl
[i
].toss
;
733 while (!list_empty(head
)) {
734 rsb
= list_entry(head
->next
, struct dlm_rsb
,
736 list_del(&rsb
->res_hashchain
);
741 kfree(ls
->ls_rsbtbl
);
744 * Free structures on any other lists
747 dlm_purge_requestqueue(ls
);
748 kfree(ls
->ls_recover_args
);
749 dlm_clear_free_entries(ls
);
750 dlm_clear_members(ls
);
751 dlm_clear_members_gone(ls
);
752 kfree(ls
->ls_node_array
);
753 kobject_unregister(&ls
->ls_kobj
);
754 /* The ls structure will be freed when the kobject is done with */
756 mutex_lock(&ls_lock
);
760 mutex_unlock(&ls_lock
);
762 module_put(THIS_MODULE
);
767 * Called when a system has released all its locks and is not going to use the
768 * lockspace any longer. We free everything we're managing for this lockspace.
769 * Remaining nodes will go through the recovery process as if we'd died. The
770 * lockspace must continue to function as usual, participating in recoveries,
771 * until this returns.
773 * Force has 4 possible values:
774 * 0 - don't destroy locksapce if it has any LKBs
775 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
776 * 2 - destroy lockspace regardless of LKBs
777 * 3 - destroy lockspace as part of a forced shutdown
780 int dlm_release_lockspace(void *lockspace
, int force
)
784 ls
= dlm_find_lockspace_local(lockspace
);
787 dlm_put_lockspace(ls
);
788 return release_lockspace(ls
, force
);