1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
26 #ifdef CONFIG_DLM_DEBUG
27 int dlm_create_debug_file(struct dlm_ls
*ls
);
28 void dlm_delete_debug_file(struct dlm_ls
*ls
);
30 static inline int dlm_create_debug_file(struct dlm_ls
*ls
) { return 0; }
31 static inline void dlm_delete_debug_file(struct dlm_ls
*ls
) { }
35 static struct mutex ls_lock
;
36 static struct list_head lslist
;
37 static spinlock_t lslist_lock
;
38 static struct task_struct
* scand_task
;
41 static ssize_t
dlm_control_store(struct dlm_ls
*ls
, const char *buf
, size_t len
)
44 int n
= simple_strtol(buf
, NULL
, 0);
59 static ssize_t
dlm_event_store(struct dlm_ls
*ls
, const char *buf
, size_t len
)
61 ls
->ls_uevent_result
= simple_strtol(buf
, NULL
, 0);
62 set_bit(LSFL_UEVENT_WAIT
, &ls
->ls_flags
);
63 wake_up(&ls
->ls_uevent_wait
);
67 static ssize_t
dlm_id_show(struct dlm_ls
*ls
, char *buf
)
69 return snprintf(buf
, PAGE_SIZE
, "%u\n", ls
->ls_global_id
);
72 static ssize_t
dlm_id_store(struct dlm_ls
*ls
, const char *buf
, size_t len
)
74 ls
->ls_global_id
= simple_strtoul(buf
, NULL
, 0);
78 static ssize_t
dlm_recover_status_show(struct dlm_ls
*ls
, char *buf
)
80 uint32_t status
= dlm_recover_status(ls
);
81 return snprintf(buf
, PAGE_SIZE
, "%x\n", status
);
84 static ssize_t
dlm_recover_nodeid_show(struct dlm_ls
*ls
, char *buf
)
86 return snprintf(buf
, PAGE_SIZE
, "%d\n", ls
->ls_recover_nodeid
);
90 struct attribute attr
;
91 ssize_t (*show
)(struct dlm_ls
*, char *);
92 ssize_t (*store
)(struct dlm_ls
*, const char *, size_t);
95 static struct dlm_attr dlm_attr_control
= {
96 .attr
= {.name
= "control", .mode
= S_IWUSR
},
97 .store
= dlm_control_store
100 static struct dlm_attr dlm_attr_event
= {
101 .attr
= {.name
= "event_done", .mode
= S_IWUSR
},
102 .store
= dlm_event_store
105 static struct dlm_attr dlm_attr_id
= {
106 .attr
= {.name
= "id", .mode
= S_IRUGO
| S_IWUSR
},
108 .store
= dlm_id_store
111 static struct dlm_attr dlm_attr_recover_status
= {
112 .attr
= {.name
= "recover_status", .mode
= S_IRUGO
},
113 .show
= dlm_recover_status_show
116 static struct dlm_attr dlm_attr_recover_nodeid
= {
117 .attr
= {.name
= "recover_nodeid", .mode
= S_IRUGO
},
118 .show
= dlm_recover_nodeid_show
121 static struct attribute
*dlm_attrs
[] = {
122 &dlm_attr_control
.attr
,
123 &dlm_attr_event
.attr
,
125 &dlm_attr_recover_status
.attr
,
126 &dlm_attr_recover_nodeid
.attr
,
130 static ssize_t
dlm_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
133 struct dlm_ls
*ls
= container_of(kobj
, struct dlm_ls
, ls_kobj
);
134 struct dlm_attr
*a
= container_of(attr
, struct dlm_attr
, attr
);
135 return a
->show
? a
->show(ls
, buf
) : 0;
138 static ssize_t
dlm_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
139 const char *buf
, size_t len
)
141 struct dlm_ls
*ls
= container_of(kobj
, struct dlm_ls
, ls_kobj
);
142 struct dlm_attr
*a
= container_of(attr
, struct dlm_attr
, attr
);
143 return a
->store
? a
->store(ls
, buf
, len
) : len
;
146 static struct sysfs_ops dlm_attr_ops
= {
147 .show
= dlm_attr_show
,
148 .store
= dlm_attr_store
,
151 static struct kobj_type dlm_ktype
= {
152 .default_attrs
= dlm_attrs
,
153 .sysfs_ops
= &dlm_attr_ops
,
156 static struct kset dlm_kset
= {
157 .subsys
= &kernel_subsys
,
158 .kobj
= {.name
= "dlm",},
162 static int kobject_setup(struct dlm_ls
*ls
)
164 char lsname
[DLM_LOCKSPACE_LEN
];
167 memset(lsname
, 0, DLM_LOCKSPACE_LEN
);
168 snprintf(lsname
, DLM_LOCKSPACE_LEN
, "%s", ls
->ls_name
);
170 error
= kobject_set_name(&ls
->ls_kobj
, "%s", lsname
);
174 ls
->ls_kobj
.kset
= &dlm_kset
;
175 ls
->ls_kobj
.ktype
= &dlm_ktype
;
179 static int do_uevent(struct dlm_ls
*ls
, int in
)
184 kobject_uevent(&ls
->ls_kobj
, KOBJ_ONLINE
);
186 kobject_uevent(&ls
->ls_kobj
, KOBJ_OFFLINE
);
188 error
= wait_event_interruptible(ls
->ls_uevent_wait
,
189 test_and_clear_bit(LSFL_UEVENT_WAIT
, &ls
->ls_flags
));
193 error
= ls
->ls_uevent_result
;
199 int dlm_lockspace_init(void)
204 mutex_init(&ls_lock
);
205 INIT_LIST_HEAD(&lslist
);
206 spin_lock_init(&lslist_lock
);
208 error
= kset_register(&dlm_kset
);
210 printk("dlm_lockspace_init: cannot register kset %d\n", error
);
214 void dlm_lockspace_exit(void)
216 kset_unregister(&dlm_kset
);
219 static int dlm_scand(void *data
)
223 while (!kthread_should_stop()) {
224 list_for_each_entry(ls
, &lslist
, ls_list
)
226 schedule_timeout_interruptible(dlm_config
.scan_secs
* HZ
);
231 static int dlm_scand_start(void)
233 struct task_struct
*p
;
236 p
= kthread_run(dlm_scand
, NULL
, "dlm_scand");
244 static void dlm_scand_stop(void)
246 kthread_stop(scand_task
);
249 static struct dlm_ls
*dlm_find_lockspace_name(char *name
, int namelen
)
253 spin_lock(&lslist_lock
);
255 list_for_each_entry(ls
, &lslist
, ls_list
) {
256 if (ls
->ls_namelen
== namelen
&&
257 memcmp(ls
->ls_name
, name
, namelen
) == 0)
262 spin_unlock(&lslist_lock
);
266 struct dlm_ls
*dlm_find_lockspace_global(uint32_t id
)
270 spin_lock(&lslist_lock
);
272 list_for_each_entry(ls
, &lslist
, ls_list
) {
273 if (ls
->ls_global_id
== id
) {
280 spin_unlock(&lslist_lock
);
284 struct dlm_ls
*dlm_find_lockspace_local(dlm_lockspace_t
*lockspace
)
288 spin_lock(&lslist_lock
);
289 list_for_each_entry(ls
, &lslist
, ls_list
) {
290 if (ls
->ls_local_handle
== lockspace
) {
297 spin_unlock(&lslist_lock
);
301 struct dlm_ls
*dlm_find_lockspace_device(int minor
)
305 spin_lock(&lslist_lock
);
306 list_for_each_entry(ls
, &lslist
, ls_list
) {
307 if (ls
->ls_device
.minor
== minor
) {
314 spin_unlock(&lslist_lock
);
318 void dlm_put_lockspace(struct dlm_ls
*ls
)
320 spin_lock(&lslist_lock
);
322 spin_unlock(&lslist_lock
);
325 static void remove_lockspace(struct dlm_ls
*ls
)
328 spin_lock(&lslist_lock
);
329 if (ls
->ls_count
== 0) {
330 list_del(&ls
->ls_list
);
331 spin_unlock(&lslist_lock
);
334 spin_unlock(&lslist_lock
);
339 static int threads_start(void)
343 /* Thread which process lock requests for all lockspace's */
344 error
= dlm_astd_start();
346 log_print("cannot start dlm_astd thread %d", error
);
350 error
= dlm_scand_start();
352 log_print("cannot start dlm_scand thread %d", error
);
356 /* Thread for sending/receiving messages for all lockspace's */
357 error
= dlm_lowcomms_start();
359 log_print("cannot start dlm lowcomms %d", error
);
373 static void threads_stop(void)
380 static int new_lockspace(char *name
, int namelen
, void **lockspace
,
381 uint32_t flags
, int lvblen
)
384 int i
, size
, error
= -ENOMEM
;
386 if (namelen
> DLM_LOCKSPACE_LEN
)
389 if (!lvblen
|| (lvblen
% 8))
392 if (!try_module_get(THIS_MODULE
))
395 ls
= dlm_find_lockspace_name(name
, namelen
);
398 module_put(THIS_MODULE
);
402 ls
= kzalloc(sizeof(struct dlm_ls
) + namelen
, GFP_KERNEL
);
405 memcpy(ls
->ls_name
, name
, namelen
);
406 ls
->ls_namelen
= namelen
;
407 ls
->ls_exflags
= flags
;
408 ls
->ls_lvblen
= lvblen
;
412 size
= dlm_config
.rsbtbl_size
;
413 ls
->ls_rsbtbl_size
= size
;
415 ls
->ls_rsbtbl
= kmalloc(sizeof(struct dlm_rsbtable
) * size
, GFP_KERNEL
);
418 for (i
= 0; i
< size
; i
++) {
419 INIT_LIST_HEAD(&ls
->ls_rsbtbl
[i
].list
);
420 INIT_LIST_HEAD(&ls
->ls_rsbtbl
[i
].toss
);
421 rwlock_init(&ls
->ls_rsbtbl
[i
].lock
);
424 size
= dlm_config
.lkbtbl_size
;
425 ls
->ls_lkbtbl_size
= size
;
427 ls
->ls_lkbtbl
= kmalloc(sizeof(struct dlm_lkbtable
) * size
, GFP_KERNEL
);
430 for (i
= 0; i
< size
; i
++) {
431 INIT_LIST_HEAD(&ls
->ls_lkbtbl
[i
].list
);
432 rwlock_init(&ls
->ls_lkbtbl
[i
].lock
);
433 ls
->ls_lkbtbl
[i
].counter
= 1;
436 size
= dlm_config
.dirtbl_size
;
437 ls
->ls_dirtbl_size
= size
;
439 ls
->ls_dirtbl
= kmalloc(sizeof(struct dlm_dirtable
) * size
, GFP_KERNEL
);
442 for (i
= 0; i
< size
; i
++) {
443 INIT_LIST_HEAD(&ls
->ls_dirtbl
[i
].list
);
444 rwlock_init(&ls
->ls_dirtbl
[i
].lock
);
447 INIT_LIST_HEAD(&ls
->ls_waiters
);
448 mutex_init(&ls
->ls_waiters_mutex
);
450 INIT_LIST_HEAD(&ls
->ls_nodes
);
451 INIT_LIST_HEAD(&ls
->ls_nodes_gone
);
452 ls
->ls_num_nodes
= 0;
453 ls
->ls_low_nodeid
= 0;
454 ls
->ls_total_weight
= 0;
455 ls
->ls_node_array
= NULL
;
457 memset(&ls
->ls_stub_rsb
, 0, sizeof(struct dlm_rsb
));
458 ls
->ls_stub_rsb
.res_ls
= ls
;
460 ls
->ls_debug_rsb_dentry
= NULL
;
461 ls
->ls_debug_waiters_dentry
= NULL
;
463 init_waitqueue_head(&ls
->ls_uevent_wait
);
464 ls
->ls_uevent_result
= 0;
466 ls
->ls_recoverd_task
= NULL
;
467 mutex_init(&ls
->ls_recoverd_active
);
468 spin_lock_init(&ls
->ls_recover_lock
);
469 ls
->ls_recover_status
= 0;
470 ls
->ls_recover_seq
= 0;
471 ls
->ls_recover_args
= NULL
;
472 init_rwsem(&ls
->ls_in_recovery
);
473 INIT_LIST_HEAD(&ls
->ls_requestqueue
);
474 mutex_init(&ls
->ls_requestqueue_mutex
);
475 mutex_init(&ls
->ls_clear_proc_locks
);
477 ls
->ls_recover_buf
= kmalloc(dlm_config
.buffer_size
, GFP_KERNEL
);
478 if (!ls
->ls_recover_buf
)
481 INIT_LIST_HEAD(&ls
->ls_recover_list
);
482 spin_lock_init(&ls
->ls_recover_list_lock
);
483 ls
->ls_recover_list_count
= 0;
484 ls
->ls_local_handle
= ls
;
485 init_waitqueue_head(&ls
->ls_wait_general
);
486 INIT_LIST_HEAD(&ls
->ls_root_list
);
487 init_rwsem(&ls
->ls_root_sem
);
489 down_write(&ls
->ls_in_recovery
);
491 spin_lock(&lslist_lock
);
492 list_add(&ls
->ls_list
, &lslist
);
493 spin_unlock(&lslist_lock
);
495 /* needs to find ls in lslist */
496 error
= dlm_recoverd_start(ls
);
498 log_error(ls
, "can't start dlm_recoverd %d", error
);
502 dlm_create_debug_file(ls
);
504 error
= kobject_setup(ls
);
508 error
= kobject_register(&ls
->ls_kobj
);
512 error
= do_uevent(ls
, 1);
520 kobject_unregister(&ls
->ls_kobj
);
522 dlm_delete_debug_file(ls
);
523 dlm_recoverd_stop(ls
);
525 spin_lock(&lslist_lock
);
526 list_del(&ls
->ls_list
);
527 spin_unlock(&lslist_lock
);
528 kfree(ls
->ls_recover_buf
);
530 kfree(ls
->ls_dirtbl
);
532 kfree(ls
->ls_lkbtbl
);
534 kfree(ls
->ls_rsbtbl
);
538 module_put(THIS_MODULE
);
542 int dlm_new_lockspace(char *name
, int namelen
, void **lockspace
,
543 uint32_t flags
, int lvblen
)
547 mutex_lock(&ls_lock
);
549 error
= threads_start();
553 error
= new_lockspace(name
, namelen
, lockspace
, flags
, lvblen
);
557 mutex_unlock(&ls_lock
);
561 /* Return 1 if the lockspace still has active remote locks,
562 * 2 if the lockspace still has active local locks.
564 static int lockspace_busy(struct dlm_ls
*ls
)
566 int i
, lkb_found
= 0;
569 /* NOTE: We check the lockidtbl here rather than the resource table.
570 This is because there may be LKBs queued as ASTs that have been
571 unlinked from their RSBs and are pending deletion once the AST has
574 for (i
= 0; i
< ls
->ls_lkbtbl_size
; i
++) {
575 read_lock(&ls
->ls_lkbtbl
[i
].lock
);
576 if (!list_empty(&ls
->ls_lkbtbl
[i
].list
)) {
578 list_for_each_entry(lkb
, &ls
->ls_lkbtbl
[i
].list
,
580 if (!lkb
->lkb_nodeid
) {
581 read_unlock(&ls
->ls_lkbtbl
[i
].lock
);
586 read_unlock(&ls
->ls_lkbtbl
[i
].lock
);
591 static int release_lockspace(struct dlm_ls
*ls
, int force
)
595 struct list_head
*head
;
597 int busy
= lockspace_busy(ls
);
605 dlm_recoverd_stop(ls
);
607 remove_lockspace(ls
);
609 dlm_delete_debug_file(ls
);
613 kfree(ls
->ls_recover_buf
);
616 * Free direntry structs.
620 kfree(ls
->ls_dirtbl
);
623 * Free all lkb's on lkbtbl[] lists.
626 for (i
= 0; i
< ls
->ls_lkbtbl_size
; i
++) {
627 head
= &ls
->ls_lkbtbl
[i
].list
;
628 while (!list_empty(head
)) {
629 lkb
= list_entry(head
->next
, struct dlm_lkb
,
632 list_del(&lkb
->lkb_idtbl_list
);
636 if (lkb
->lkb_lvbptr
&& lkb
->lkb_flags
& DLM_IFL_MSTCPY
)
637 free_lvb(lkb
->lkb_lvbptr
);
644 kfree(ls
->ls_lkbtbl
);
647 * Free all rsb's on rsbtbl[] lists
650 for (i
= 0; i
< ls
->ls_rsbtbl_size
; i
++) {
651 head
= &ls
->ls_rsbtbl
[i
].list
;
652 while (!list_empty(head
)) {
653 rsb
= list_entry(head
->next
, struct dlm_rsb
,
656 list_del(&rsb
->res_hashchain
);
660 head
= &ls
->ls_rsbtbl
[i
].toss
;
661 while (!list_empty(head
)) {
662 rsb
= list_entry(head
->next
, struct dlm_rsb
,
664 list_del(&rsb
->res_hashchain
);
669 kfree(ls
->ls_rsbtbl
);
672 * Free structures on any other lists
675 kfree(ls
->ls_recover_args
);
676 dlm_clear_free_entries(ls
);
677 dlm_clear_members(ls
);
678 dlm_clear_members_gone(ls
);
679 kfree(ls
->ls_node_array
);
680 kobject_unregister(&ls
->ls_kobj
);
683 mutex_lock(&ls_lock
);
687 mutex_unlock(&ls_lock
);
689 module_put(THIS_MODULE
);
694 * Called when a system has released all its locks and is not going to use the
695 * lockspace any longer. We free everything we're managing for this lockspace.
696 * Remaining nodes will go through the recovery process as if we'd died. The
697 * lockspace must continue to function as usual, participating in recoveries,
698 * until this returns.
700 * Force has 4 possible values:
701 * 0 - don't destroy locksapce if it has any LKBs
702 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
703 * 2 - destroy lockspace regardless of LKBs
704 * 3 - destroy lockspace as part of a forced shutdown
707 int dlm_release_lockspace(void *lockspace
, int force
)
711 ls
= dlm_find_lockspace_local(lockspace
);
714 dlm_put_lockspace(ls
);
715 return release_lockspace(ls
, force
);