2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/filesys.h"
22 #include "dbwrap/dbwrap.h"
23 #include "dbwrap/dbwrap_open.h"
26 #include "ctdbd_conn.h"
27 #include "../lib/util/select.h"
28 #include "system/select.h"
31 static NTSTATUS
g_lock_force_unlock(struct g_lock_ctx
*ctx
, const char *name
,
32 struct server_id pid
);
35 struct db_context
*db
;
36 struct messaging_context
*msg
;
40 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
41 * lockname. The record contains an array of "struct g_lock_rec"
42 * structures. Waiters have the lock_type with G_LOCK_PENDING or'ed.
46 enum g_lock_type lock_type
;
50 struct g_lock_ctx
*g_lock_ctx_init(TALLOC_CTX
*mem_ctx
,
51 struct messaging_context
*msg
)
53 struct g_lock_ctx
*result
;
55 result
= talloc(mem_ctx
, struct g_lock_ctx
);
61 result
->db
= db_open(result
, lock_path("g_lock.tdb"), 0,
62 TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
,
63 O_RDWR
|O_CREAT
, 0600);
64 if (result
->db
== NULL
) {
65 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb\n"));
72 static bool g_lock_conflicts(enum g_lock_type lock_type
,
73 const struct g_lock_rec
*rec
)
75 enum g_lock_type rec_lock
= rec
->lock_type
;
77 if ((rec_lock
& G_LOCK_PENDING
) != 0) {
82 * Only tested write locks so far. Very likely this routine
83 * needs to be fixed for read locks....
85 if ((lock_type
== G_LOCK_READ
) && (rec_lock
== G_LOCK_READ
)) {
91 static bool g_lock_parse(TALLOC_CTX
*mem_ctx
, TDB_DATA data
,
92 int *pnum_locks
, struct g_lock_rec
**plocks
)
95 struct g_lock_rec
*locks
;
97 if ((data
.dsize
% sizeof(struct g_lock_rec
)) != 0) {
98 DEBUG(1, ("invalid lock record length %d\n", (int)data
.dsize
));
102 num_locks
= data
.dsize
/ sizeof(struct g_lock_rec
);
103 locks
= talloc_array(mem_ctx
, struct g_lock_rec
, num_locks
);
105 DEBUG(1, ("talloc failed\n"));
109 memcpy(locks
, data
.dptr
, data
.dsize
);
111 DEBUG(10, ("locks:\n"));
112 for (i
=0; i
<num_locks
; i
++) {
113 DEBUGADD(10, ("%s: %s %s\n",
114 server_id_str(talloc_tos(), &locks
[i
].pid
),
115 ((locks
[i
].lock_type
& 1) == G_LOCK_READ
) ?
117 (locks
[i
].lock_type
& G_LOCK_PENDING
) ?
118 "(pending)" : "(owner)"));
120 if (((locks
[i
].lock_type
& G_LOCK_PENDING
) == 0)
121 && !process_exists(locks
[i
].pid
)) {
123 DEBUGADD(10, ("lock owner %s died -- discarding\n",
124 server_id_str(talloc_tos(),
127 if (i
< (num_locks
-1)) {
128 locks
[i
] = locks
[num_locks
-1];
135 *pnum_locks
= num_locks
;
139 static void g_lock_cleanup(int *pnum_locks
, struct g_lock_rec
*locks
)
143 num_locks
= *pnum_locks
;
145 DEBUG(10, ("g_lock_cleanup: %d locks\n", num_locks
));
147 for (i
=0; i
<num_locks
; i
++) {
148 if (process_exists(locks
[i
].pid
)) {
151 DEBUGADD(10, ("%s does not exist -- discarding\n",
152 server_id_str(talloc_tos(), &locks
[i
].pid
)));
154 if (i
< (num_locks
-1)) {
155 locks
[i
] = locks
[num_locks
-1];
159 *pnum_locks
= num_locks
;
163 static struct g_lock_rec
*g_lock_addrec(TALLOC_CTX
*mem_ctx
,
164 struct g_lock_rec
*locks
,
166 const struct server_id pid
,
167 enum g_lock_type lock_type
)
169 struct g_lock_rec
*result
;
170 int num_locks
= *pnum_locks
;
172 result
= talloc_realloc(mem_ctx
, locks
, struct g_lock_rec
,
174 if (result
== NULL
) {
178 result
[num_locks
].pid
= pid
;
179 result
[num_locks
].lock_type
= lock_type
;
184 static void g_lock_got_retry(struct messaging_context
*msg
,
187 struct server_id server_id
,
190 static NTSTATUS
g_lock_trylock(struct g_lock_ctx
*ctx
, const char *name
,
191 enum g_lock_type lock_type
)
193 struct db_record
*rec
= NULL
;
194 struct g_lock_rec
*locks
= NULL
;
196 struct server_id self
;
199 NTSTATUS status
= NT_STATUS_OK
;
200 NTSTATUS store_status
;
203 rec
= ctx
->db
->fetch_locked(ctx
->db
, talloc_tos(),
204 string_term_tdb_data(name
));
206 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
207 status
= NT_STATUS_LOCK_NOT_GRANTED
;
211 if (!g_lock_parse(talloc_tos(), rec
->value
, &num_locks
, &locks
)) {
212 DEBUG(10, ("g_lock_parse for %s failed\n", name
));
213 status
= NT_STATUS_INTERNAL_ERROR
;
217 self
= messaging_server_id(ctx
->msg
);
220 for (i
=0; i
<num_locks
; i
++) {
221 if (procid_equal(&self
, &locks
[i
].pid
)) {
222 if (our_index
!= -1) {
223 DEBUG(1, ("g_lock_trylock: Added ourself "
225 status
= NT_STATUS_INTERNAL_ERROR
;
228 if ((locks
[i
].lock_type
& G_LOCK_PENDING
) == 0) {
229 DEBUG(1, ("g_lock_trylock: Found ourself not "
231 status
= NT_STATUS_INTERNAL_ERROR
;
237 /* never conflict with ourself */
240 if (g_lock_conflicts(lock_type
, &locks
[i
])) {
241 struct server_id pid
= locks
[i
].pid
;
243 if (!process_exists(pid
)) {
246 status
= g_lock_force_unlock(ctx
, name
, pid
);
247 if (!NT_STATUS_IS_OK(status
)) {
248 DEBUG(1, ("Could not unlock dead lock "
254 lock_type
|= G_LOCK_PENDING
;
258 if (our_index
== -1) {
259 /* First round, add ourself */
261 locks
= g_lock_addrec(talloc_tos(), locks
, &num_locks
,
264 DEBUG(10, ("g_lock_addrec failed\n"));
265 status
= NT_STATUS_NO_MEMORY
;
270 * Retry. We were pending last time. Overwrite the
271 * stored lock_type with what we calculated, we might
272 * have acquired the lock this time.
274 locks
[our_index
].lock_type
= lock_type
;
277 if (NT_STATUS_IS_OK(status
) && ((lock_type
& G_LOCK_PENDING
) == 0)) {
279 * Walk through the list of locks, search for dead entries
281 g_lock_cleanup(&num_locks
, locks
);
284 data
= make_tdb_data((uint8_t *)locks
, num_locks
* sizeof(*locks
));
285 store_status
= rec
->store(rec
, data
, 0);
286 if (!NT_STATUS_IS_OK(store_status
)) {
287 DEBUG(1, ("rec->store failed: %s\n",
288 nt_errstr(store_status
)));
289 status
= store_status
;
296 if (NT_STATUS_IS_OK(status
) && (lock_type
& G_LOCK_PENDING
) != 0) {
297 return STATUS_PENDING
;
303 NTSTATUS
g_lock_lock(struct g_lock_ctx
*ctx
, const char *name
,
304 enum g_lock_type lock_type
, struct timeval timeout
)
306 struct tevent_timer
*te
= NULL
;
309 struct timeval timeout_end
;
310 struct timeval time_now
;
312 DEBUG(10, ("Trying to acquire lock %d for %s\n", (int)lock_type
,
315 if (lock_type
& ~1) {
316 DEBUG(1, ("Got invalid lock type %d for %s\n",
317 (int)lock_type
, name
));
318 return NT_STATUS_INVALID_PARAMETER
;
321 #ifdef CLUSTER_SUPPORT
322 if (lp_clustering()) {
323 status
= ctdb_watch_us(messaging_ctdbd_connection());
324 if (!NT_STATUS_IS_OK(status
)) {
325 DEBUG(10, ("could not register retry with ctdb: %s\n",
332 status
= messaging_register(ctx
->msg
, &retry
, MSG_DBWRAP_G_LOCK_RETRY
,
334 if (!NT_STATUS_IS_OK(status
)) {
335 DEBUG(10, ("messaging_register failed: %s\n",
340 time_now
= timeval_current();
341 timeout_end
= timeval_sum(&time_now
, &timeout
);
344 struct pollfd
*pollfds
;
348 struct timeval timeout_remaining
, select_timeout
;
350 status
= g_lock_trylock(ctx
, name
, lock_type
);
351 if (NT_STATUS_IS_OK(status
)) {
352 DEBUG(10, ("Got lock %s\n", name
));
355 if (!NT_STATUS_EQUAL(status
, STATUS_PENDING
)) {
356 DEBUG(10, ("g_lock_trylock failed: %s\n",
361 DEBUG(10, ("g_lock_trylock: Did not get lock, waiting...\n"));
363 /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
364 * !!! HACK ALERT --- FIX ME !!!
365 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
366 * What we really want to do here is to react to
367 * MSG_DBWRAP_G_LOCK_RETRY messages that are either sent
368 * by a client doing g_lock_unlock or by ourselves when
369 * we receive a CTDB_SRVID_SAMBA_NOTIFY or
370 * CTDB_SRVID_RECONFIGURE message from ctdbd, i.e. when
371 * either a client holding a lock or a complete node
374 * Doing this properly involves calling tevent_loop_once(),
375 * but doing this here with the main ctdbd messaging context
376 * creates a nested event loop when g_lock_lock() is called
377 * from the main event loop, e.g. in a tcon_and_X where the
378 * share_info.tdb needs to be initialized and is locked by
379 * another process, or when the remore registry is accessed
380 * for writing and some other process already holds a lock
381 * on the registry.tdb.
383 * So as a quick fix, we act a little coarsely here: we do
384 * a select on the ctdb connection fd and when it is readable
385 * or we get EINTR, then we retry without actually parsing
386 * any ctdb packages or dispatching messages. This means that
387 * we retry more often than intended by design, but this does
388 * not harm and it is unobtrusive. When we have finished,
389 * the main loop will pick up all the messages and ctdb
390 * packets. The only extra twist is that we cannot use timed
391 * events here but have to handcode a timeout.
395 * We allocate 2 entries here. One is needed anyway for
396 * sys_poll and in the clustering case we might have to add
397 * the ctdb fd. This avoids the realloc then.
399 pollfds
= talloc_array(talloc_tos(), struct pollfd
, 2);
400 if (pollfds
== NULL
) {
401 status
= NT_STATUS_NO_MEMORY
;
406 #ifdef CLUSTER_SUPPORT
407 if (lp_clustering()) {
408 struct ctdbd_connection
*conn
;
409 conn
= messaging_ctdbd_connection();
411 pollfds
[0].fd
= ctdbd_conn_get_fd(conn
);
412 pollfds
[0].events
= POLLIN
|POLLHUP
;
418 time_now
= timeval_current();
419 timeout_remaining
= timeval_until(&time_now
, &timeout_end
);
420 select_timeout
= timeval_set(60, 0);
422 select_timeout
= timeval_min(&select_timeout
,
425 ret
= sys_poll(pollfds
, num_pollfds
,
426 timeval_to_msec(select_timeout
));
429 * We're not *really interested in the actual flags. We just
430 * need to retry this whole thing.
433 TALLOC_FREE(pollfds
);
437 if (errno
!= EINTR
) {
438 DEBUG(1, ("error calling select: %s\n",
440 status
= NT_STATUS_INTERNAL_ERROR
;
445 * This means a signal was received.
446 * It might have been a MSG_DBWRAP_G_LOCK_RETRY message.
449 } else if (ret
== 0) {
450 if (timeval_expired(&timeout_end
)) {
451 DEBUG(10, ("g_lock_lock timed out\n"));
452 status
= NT_STATUS_LOCK_NOT_GRANTED
;
455 DEBUG(10, ("select returned 0 but timeout not "
456 "not expired, retrying\n"));
458 } else if (ret
!= 1) {
459 DEBUG(1, ("invalid return code of select: %d\n", ret
));
460 status
= NT_STATUS_INTERNAL_ERROR
;
465 * This means ctdbd has sent us some data.
466 * Might be a CTDB_SRVID_RECONFIGURE or a
467 * CTDB_SRVID_SAMBA_NOTIFY message.
472 #ifdef CLUSTER_SUPPORT
476 if (!NT_STATUS_IS_OK(status
)) {
477 NTSTATUS unlock_status
;
479 unlock_status
= g_lock_unlock(ctx
, name
);
481 if (!NT_STATUS_IS_OK(unlock_status
)) {
482 DEBUG(1, ("Could not remove ourself from the locking "
483 "db: %s\n", nt_errstr(status
)));
487 messaging_deregister(ctx
->msg
, MSG_DBWRAP_G_LOCK_RETRY
, &retry
);
493 static void g_lock_got_retry(struct messaging_context
*msg
,
496 struct server_id server_id
,
499 bool *pretry
= (bool *)private_data
;
501 DEBUG(10, ("Got retry message from pid %s\n",
502 server_id_str(talloc_tos(), &server_id
)));
507 static NTSTATUS
g_lock_force_unlock(struct g_lock_ctx
*ctx
, const char *name
,
508 struct server_id pid
)
510 struct db_record
*rec
= NULL
;
511 struct g_lock_rec
*locks
= NULL
;
513 enum g_lock_type lock_type
;
516 rec
= ctx
->db
->fetch_locked(ctx
->db
, talloc_tos(),
517 string_term_tdb_data(name
));
519 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
520 status
= NT_STATUS_INTERNAL_ERROR
;
524 if (!g_lock_parse(talloc_tos(), rec
->value
, &num_locks
, &locks
)) {
525 DEBUG(10, ("g_lock_parse for %s failed\n", name
));
526 status
= NT_STATUS_FILE_INVALID
;
530 for (i
=0; i
<num_locks
; i
++) {
531 if (procid_equal(&pid
, &locks
[i
].pid
)) {
536 if (i
== num_locks
) {
537 DEBUG(10, ("g_lock_force_unlock: Lock not found\n"));
538 status
= NT_STATUS_NOT_FOUND
;
542 lock_type
= locks
[i
].lock_type
;
544 if (i
< (num_locks
-1)) {
545 locks
[i
] = locks
[num_locks
-1];
549 if (num_locks
== 0) {
550 status
= rec
->delete_rec(rec
);
553 data
= make_tdb_data((uint8_t *)locks
,
554 sizeof(struct g_lock_rec
) * num_locks
);
555 status
= rec
->store(rec
, data
, 0);
558 if (!NT_STATUS_IS_OK(status
)) {
559 DEBUG(1, ("g_lock_force_unlock: Could not store record: %s\n",
566 if ((lock_type
& G_LOCK_PENDING
) == 0) {
570 * We've been the lock holder. Others to retry. Don't
571 * tell all others to avoid a thundering herd. In case
572 * this leads to a complete stall because we miss some
573 * processes, the loop in g_lock_lock tries at least
577 for (i
=0; i
<num_locks
; i
++) {
578 if ((locks
[i
].lock_type
& G_LOCK_PENDING
) == 0) {
581 if (!process_exists(locks
[i
].pid
)) {
586 * Ping all waiters to retry
588 status
= messaging_send(ctx
->msg
, locks
[i
].pid
,
589 MSG_DBWRAP_G_LOCK_RETRY
,
591 if (!NT_STATUS_IS_OK(status
)) {
592 DEBUG(1, ("sending retry to %s failed: %s\n",
593 server_id_str(talloc_tos(),
599 if (num_wakeups
> 5) {
606 * For the error path, TALLOC_FREE(rec) as well. In the good
607 * path we have already freed it.
615 NTSTATUS
g_lock_unlock(struct g_lock_ctx
*ctx
, const char *name
)
619 status
= g_lock_force_unlock(ctx
, name
, messaging_server_id(ctx
->msg
));
621 #ifdef CLUSTER_SUPPORT
622 if (lp_clustering()) {
623 ctdb_unwatch(messaging_ctdbd_connection());
629 struct g_lock_locks_state
{
630 int (*fn
)(const char *name
, void *private_data
);
634 static int g_lock_locks_fn(struct db_record
*rec
, void *priv
)
636 struct g_lock_locks_state
*state
= (struct g_lock_locks_state
*)priv
;
638 if ((rec
->key
.dsize
== 0) || (rec
->key
.dptr
[rec
->key
.dsize
-1] != 0)) {
639 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
642 return state
->fn((char *)rec
->key
.dptr
, state
->private_data
);
645 int g_lock_locks(struct g_lock_ctx
*ctx
,
646 int (*fn
)(const char *name
, void *private_data
),
649 struct g_lock_locks_state state
;
652 state
.private_data
= private_data
;
654 return ctx
->db
->traverse_read(ctx
->db
, g_lock_locks_fn
, &state
);
657 NTSTATUS
g_lock_dump(struct g_lock_ctx
*ctx
, const char *name
,
658 int (*fn
)(struct server_id pid
,
659 enum g_lock_type lock_type
,
665 struct g_lock_rec
*locks
= NULL
;
668 if (ctx
->db
->fetch(ctx
->db
, talloc_tos(), string_term_tdb_data(name
),
670 return NT_STATUS_NOT_FOUND
;
673 if ((data
.dsize
== 0) || (data
.dptr
== NULL
)) {
677 ret
= g_lock_parse(talloc_tos(), data
, &num_locks
, &locks
);
679 TALLOC_FREE(data
.dptr
);
682 DEBUG(10, ("g_lock_parse for %s failed\n", name
));
683 return NT_STATUS_INTERNAL_ERROR
;
686 for (i
=0; i
<num_locks
; i
++) {
687 if (fn(locks
[i
].pid
, locks
[i
].lock_type
, private_data
) != 0) {
695 struct g_lock_get_state
{
697 struct server_id
*pid
;
700 static int g_lock_get_fn(struct server_id pid
, enum g_lock_type lock_type
,
703 struct g_lock_get_state
*state
= (struct g_lock_get_state
*)priv
;
705 if ((lock_type
& G_LOCK_PENDING
) != 0) {
714 NTSTATUS
g_lock_get(struct g_lock_ctx
*ctx
, const char *name
,
715 struct server_id
*pid
)
717 struct g_lock_get_state state
;
723 status
= g_lock_dump(ctx
, name
, g_lock_get_fn
, &state
);
724 if (!NT_STATUS_IS_OK(status
)) {
728 return NT_STATUS_NOT_FOUND
;
733 static bool g_lock_init_all(TALLOC_CTX
*mem_ctx
,
734 struct tevent_context
**pev
,
735 struct messaging_context
**pmsg
,
736 const struct server_id self
,
737 struct g_lock_ctx
**pg_ctx
)
739 struct tevent_context
*ev
= NULL
;
740 struct messaging_context
*msg
= NULL
;
741 struct g_lock_ctx
*g_ctx
= NULL
;
743 ev
= tevent_context_init(mem_ctx
);
745 d_fprintf(stderr
, "ERROR: could not init event context\n");
748 msg
= messaging_init(mem_ctx
, self
, ev
);
750 d_fprintf(stderr
, "ERROR: could not init messaging context\n");
753 g_ctx
= g_lock_ctx_init(mem_ctx
, msg
);
755 d_fprintf(stderr
, "ERROR: could not init g_lock context\n");
770 NTSTATUS
g_lock_do(const char *name
, enum g_lock_type lock_type
,
771 struct timeval timeout
, const struct server_id self
,
772 void (*fn
)(void *private_data
), void *private_data
)
774 struct tevent_context
*ev
= NULL
;
775 struct messaging_context
*msg
= NULL
;
776 struct g_lock_ctx
*g_ctx
= NULL
;
779 if (!g_lock_init_all(talloc_tos(), &ev
, &msg
, self
, &g_ctx
)) {
780 status
= NT_STATUS_ACCESS_DENIED
;
784 status
= g_lock_lock(g_ctx
, name
, lock_type
, timeout
);
785 if (!NT_STATUS_IS_OK(status
)) {
789 g_lock_unlock(g_ctx
, name
);