s4:repl - change also here the counter variables to "unsigned"
[Samba/nascimento.git] / source3 / lib / g_lock.c
blobe4c6d7c660e4d4abbfc7eed4e2d1728a5cb2f0f9
1 /*
2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "g_lock.h"
23 static NTSTATUS g_lock_force_unlock(struct g_lock_ctx *ctx, const char *name,
24 struct server_id pid);
26 struct g_lock_ctx {
27 struct db_context *db;
28 struct messaging_context *msg;
32 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
33 * lockname. The record contains an array of "struct g_lock_rec"
34 * structures. Waiters have the lock_type with G_LOCK_PENDING or'ed.
37 struct g_lock_rec {
38 enum g_lock_type lock_type;
39 struct server_id pid;
42 struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
43 struct messaging_context *msg)
45 struct g_lock_ctx *result;
47 result = talloc(mem_ctx, struct g_lock_ctx);
48 if (result == NULL) {
49 return NULL;
51 result->msg = msg;
53 result->db = db_open(result, lock_path("g_lock.tdb"), 0,
54 TDB_CLEAR_IF_FIRST, O_RDWR|O_CREAT, 0700);
55 if (result->db == NULL) {
56 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb"));
57 TALLOC_FREE(result);
58 return NULL;
60 return result;
63 static bool g_lock_conflicts(enum g_lock_type lock_type,
64 const struct g_lock_rec *rec)
66 enum g_lock_type rec_lock = rec->lock_type;
68 if ((rec_lock & G_LOCK_PENDING) != 0) {
69 return false;
73 * Only tested write locks so far. Very likely this routine
74 * needs to be fixed for read locks....
76 if ((lock_type == G_LOCK_READ) && (rec_lock == G_LOCK_READ)) {
77 return false;
79 return true;
82 static bool g_lock_parse(TALLOC_CTX *mem_ctx, TDB_DATA data,
83 int *pnum_locks, struct g_lock_rec **plocks)
85 int i, num_locks;
86 struct g_lock_rec *locks;
88 if ((data.dsize % sizeof(struct g_lock_rec)) != 0) {
89 DEBUG(1, ("invalid lock record length %d\n", (int)data.dsize));
90 return false;
93 num_locks = data.dsize / sizeof(struct g_lock_rec);
94 locks = talloc_array(mem_ctx, struct g_lock_rec, num_locks);
95 if (locks == NULL) {
96 DEBUG(1, ("talloc failed\n"));
97 return false;
100 memcpy(locks, data.dptr, data.dsize);
102 DEBUG(10, ("locks:\n"));
103 for (i=0; i<num_locks; i++) {
104 DEBUGADD(10, ("%s: %s %s\n",
105 procid_str(talloc_tos(), &locks[i].pid),
106 ((locks[i].lock_type & 1) == G_LOCK_READ) ?
107 "read" : "write",
108 (locks[i].lock_type & G_LOCK_PENDING) ?
109 "(pending)" : "(owner)"));
111 if (((locks[i].lock_type & G_LOCK_PENDING) == 0)
112 && !process_exists(locks[i].pid)) {
114 DEBUGADD(10, ("lock owner %s died -- discarding\n",
115 procid_str(talloc_tos(),
116 &locks[i].pid)));
118 if (i < (num_locks-1)) {
119 locks[i] = locks[num_locks-1];
121 num_locks -= 1;
125 *plocks = locks;
126 *pnum_locks = num_locks;
127 return true;
130 static void g_lock_cleanup(int *pnum_locks, struct g_lock_rec *locks)
132 int i, num_locks;
134 num_locks = *pnum_locks;
136 DEBUG(10, ("g_lock_cleanup: %d locks\n", num_locks));
138 for (i=0; i<num_locks; i++) {
139 if (process_exists(locks[i].pid)) {
140 continue;
142 DEBUGADD(10, ("%s does not exist -- discarding\n",
143 procid_str(talloc_tos(), &locks[i].pid)));
145 if (i < (num_locks-1)) {
146 locks[i] = locks[num_locks-1];
148 num_locks -= 1;
150 *pnum_locks = num_locks;
151 return;
154 static struct g_lock_rec *g_lock_addrec(TALLOC_CTX *mem_ctx,
155 struct g_lock_rec *locks,
156 int *pnum_locks,
157 const struct server_id pid,
158 enum g_lock_type lock_type)
160 struct g_lock_rec *result;
161 int num_locks = *pnum_locks;
163 result = talloc_realloc(mem_ctx, locks, struct g_lock_rec,
164 num_locks+1);
165 if (result == NULL) {
166 return NULL;
169 result[num_locks].pid = pid;
170 result[num_locks].lock_type = lock_type;
171 *pnum_locks += 1;
172 return result;
175 static void g_lock_got_retry(struct messaging_context *msg,
176 void *private_data,
177 uint32_t msg_type,
178 struct server_id server_id,
179 DATA_BLOB *data);
181 static NTSTATUS g_lock_trylock(struct g_lock_ctx *ctx, const char *name,
182 enum g_lock_type lock_type)
184 struct db_record *rec = NULL;
185 struct g_lock_rec *locks = NULL;
186 int i, num_locks;
187 struct server_id self;
188 int our_index;
189 TDB_DATA data;
190 NTSTATUS status = NT_STATUS_OK;
191 NTSTATUS store_status;
193 again:
194 rec = ctx->db->fetch_locked(ctx->db, talloc_tos(),
195 string_term_tdb_data(name));
196 if (rec == NULL) {
197 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name));
198 status = NT_STATUS_LOCK_NOT_GRANTED;
199 goto done;
202 if (!g_lock_parse(talloc_tos(), rec->value, &num_locks, &locks)) {
203 DEBUG(10, ("g_lock_parse for %s failed\n", name));
204 status = NT_STATUS_INTERNAL_ERROR;
205 goto done;
208 self = procid_self();
209 our_index = -1;
211 for (i=0; i<num_locks; i++) {
212 if (procid_equal(&self, &locks[i].pid)) {
213 if (our_index != -1) {
214 DEBUG(1, ("g_lock_trylock: Added ourself "
215 "twice!\n"));
216 status = NT_STATUS_INTERNAL_ERROR;
217 goto done;
219 if ((locks[i].lock_type & G_LOCK_PENDING) == 0) {
220 DEBUG(1, ("g_lock_trylock: Found ourself not "
221 "pending!\n"));
222 status = NT_STATUS_INTERNAL_ERROR;
223 goto done;
226 our_index = i;
228 /* never conflict with ourself */
229 continue;
231 if (g_lock_conflicts(lock_type, &locks[i])) {
232 struct server_id pid = locks[i].pid;
234 if (!process_exists(pid)) {
235 TALLOC_FREE(locks);
236 TALLOC_FREE(rec);
237 status = g_lock_force_unlock(ctx, name, pid);
238 if (!NT_STATUS_IS_OK(status)) {
239 DEBUG(1, ("Could not unlock dead lock "
240 "holder!\n"));
241 goto done;
243 goto again;
245 lock_type |= G_LOCK_PENDING;
249 if (our_index == -1) {
250 /* First round, add ourself */
252 locks = g_lock_addrec(talloc_tos(), locks, &num_locks,
253 self, lock_type);
254 if (locks == NULL) {
255 DEBUG(10, ("g_lock_addrec failed\n"));
256 status = NT_STATUS_NO_MEMORY;
257 goto done;
259 } else {
261 * Retry. We were pending last time. Overwrite the
262 * stored lock_type with what we calculated, we might
263 * have acquired the lock this time.
265 locks[our_index].lock_type = lock_type;
268 if (NT_STATUS_IS_OK(status) && ((lock_type & G_LOCK_PENDING) == 0)) {
270 * Walk through the list of locks, search for dead entries
272 g_lock_cleanup(&num_locks, locks);
275 data = make_tdb_data((uint8_t *)locks, num_locks * sizeof(*locks));
276 store_status = rec->store(rec, data, 0);
277 if (!NT_STATUS_IS_OK(store_status)) {
278 DEBUG(1, ("rec->store failed: %s\n",
279 nt_errstr(store_status)));
280 status = store_status;
283 done:
284 TALLOC_FREE(locks);
285 TALLOC_FREE(rec);
287 if (NT_STATUS_IS_OK(status) && (lock_type & G_LOCK_PENDING) != 0) {
288 return STATUS_PENDING;
291 return NT_STATUS_OK;
294 NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, const char *name,
295 enum g_lock_type lock_type, struct timeval timeout)
297 struct tevent_timer *te = NULL;
298 NTSTATUS status;
299 bool retry = false;
300 struct timeval timeout_end;
301 struct timeval time_now;
303 DEBUG(10, ("Trying to acquire lock %d for %s\n", (int)lock_type,
304 name));
306 if (lock_type & ~1) {
307 DEBUG(1, ("Got invalid lock type %d for %s\n",
308 (int)lock_type, name));
309 return NT_STATUS_INVALID_PARAMETER;
312 #ifdef CLUSTER_SUPPORT
313 if (lp_clustering()) {
314 status = ctdb_watch_us(messaging_ctdbd_connection());
315 if (!NT_STATUS_IS_OK(status)) {
316 DEBUG(10, ("could not register retry with ctdb: %s\n",
317 nt_errstr(status)));
318 goto done;
321 #endif
323 status = messaging_register(ctx->msg, &retry, MSG_DBWRAP_G_LOCK_RETRY,
324 g_lock_got_retry);
325 if (!NT_STATUS_IS_OK(status)) {
326 DEBUG(10, ("messaging_register failed: %s\n",
327 nt_errstr(status)));
328 return status;
331 time_now = timeval_current();
332 timeout_end = timeval_sum(&time_now, &timeout);
334 while (true) {
335 #ifdef CLUSTER_SUPPORT
336 fd_set _r_fds;
337 #endif
338 fd_set *r_fds = NULL;
339 int max_fd = 0;
340 int ret;
341 struct timeval timeout_remaining, select_timeout;
343 status = g_lock_trylock(ctx, name, lock_type);
344 if (NT_STATUS_IS_OK(status)) {
345 DEBUG(10, ("Got lock %s\n", name));
346 break;
348 if (!NT_STATUS_EQUAL(status, STATUS_PENDING)) {
349 DEBUG(10, ("g_lock_trylock failed: %s\n",
350 nt_errstr(status)));
351 break;
354 DEBUG(10, ("g_lock_trylock: Did not get lock, waiting...\n"));
356 /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
357 * !!! HACK ALERT --- FIX ME !!!
358 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
359 * What we really want to do here is to react to
360 * MSG_DBWRAP_G_LOCK_RETRY messages that are either sent
361 * by a client doing g_lock_unlock or by ourselves when
362 * we receive a CTDB_SRVID_SAMBA_NOTIFY or
363 * CTDB_SRVID_RECONFIGURE message from ctdbd, i.e. when
364 * either a client holding a lock or a complete node
365 * has died.
367 * Doing this properly involves calling tevent_loop_once(),
368 * but doing this here with the main ctdbd messaging context
369 * creates a nested event loop when g_lock_lock() is called
370 * from the main event loop, e.g. in a tcon_and_X where the
371 * share_info.tdb needs to be initialized and is locked by
372 * another process, or when the remore registry is accessed
373 * for writing and some other process already holds a lock
374 * on the registry.tdb.
376 * So as a quick fix, we act a little coarsely here: we do
377 * a select on the ctdb connection fd and when it is readable
378 * or we get EINTR, then we retry without actually parsing
379 * any ctdb packages or dispatching messages. This means that
380 * we retry more often than intended by design, but this does
381 * not harm and it is unobtrusive. When we have finished,
382 * the main loop will pick up all the messages and ctdb
383 * packets. The only extra twist is that we cannot use timed
384 * events here but have to handcode a timeout.
387 #ifdef CLUSTER_SUPPORT
388 if (lp_clustering()) {
389 struct ctdbd_connection *conn = messaging_ctdbd_connection();
391 r_fds = &_r_fds;
392 FD_ZERO(r_fds);
393 max_fd = ctdbd_conn_get_fd(conn);
394 FD_SET(max_fd, r_fds);
396 #endif
398 time_now = timeval_current();
399 timeout_remaining = timeval_until(&time_now, &timeout_end);
400 select_timeout = timeval_set(60, 0);
402 select_timeout = timeval_min(&select_timeout,
403 &timeout_remaining);
405 ret = sys_select(max_fd + 1, r_fds, NULL, NULL,
406 &select_timeout);
407 if (ret == -1) {
408 if (errno != EINTR) {
409 DEBUG(1, ("error calling select: %s\n",
410 strerror(errno)));
411 status = NT_STATUS_INTERNAL_ERROR;
412 break;
415 * errno == EINTR:
416 * This means a signal was received.
417 * It might have been a MSG_DBWRAP_G_LOCK_RETRY message.
418 * ==> retry
420 } else if (ret == 0) {
421 if (timeval_expired(&timeout_end)) {
422 DEBUG(10, ("g_lock_lock timed out\n"));
423 status = NT_STATUS_LOCK_NOT_GRANTED;
424 break;
425 } else {
426 DEBUG(10, ("select returned 0 but timeout not "
427 "not expired, retrying\n"));
429 } else if (ret != 1) {
430 DEBUG(1, ("invalid return code of select: %d\n", ret));
431 status = NT_STATUS_INTERNAL_ERROR;
432 break;
435 * ret == 1:
436 * This means ctdbd has sent us some data.
437 * Might be a CTDB_SRVID_RECONFIGURE or a
438 * CTDB_SRVID_SAMBA_NOTIFY message.
439 * ==> retry
443 #ifdef CLUSTER_SUPPORT
444 done:
445 #endif
447 if (!NT_STATUS_IS_OK(status)) {
448 NTSTATUS unlock_status;
450 unlock_status = g_lock_unlock(ctx, name);
452 if (!NT_STATUS_IS_OK(unlock_status)) {
453 DEBUG(1, ("Could not remove ourself from the locking "
454 "db: %s\n", nt_errstr(status)));
458 messaging_deregister(ctx->msg, MSG_DBWRAP_G_LOCK_RETRY, &retry);
459 TALLOC_FREE(te);
461 return status;
464 static void g_lock_got_retry(struct messaging_context *msg,
465 void *private_data,
466 uint32_t msg_type,
467 struct server_id server_id,
468 DATA_BLOB *data)
470 bool *pretry = (bool *)private_data;
472 DEBUG(10, ("Got retry message from pid %s\n",
473 procid_str(talloc_tos(), &server_id)));
475 *pretry = true;
478 static NTSTATUS g_lock_force_unlock(struct g_lock_ctx *ctx, const char *name,
479 struct server_id pid)
481 struct db_record *rec = NULL;
482 struct g_lock_rec *locks = NULL;
483 int i, num_locks;
484 enum g_lock_type lock_type;
485 NTSTATUS status;
487 rec = ctx->db->fetch_locked(ctx->db, talloc_tos(),
488 string_term_tdb_data(name));
489 if (rec == NULL) {
490 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name));
491 status = NT_STATUS_INTERNAL_ERROR;
492 goto done;
495 if (!g_lock_parse(talloc_tos(), rec->value, &num_locks, &locks)) {
496 DEBUG(10, ("g_lock_parse for %s failed\n", name));
497 status = NT_STATUS_INTERNAL_ERROR;
498 goto done;
501 for (i=0; i<num_locks; i++) {
502 if (procid_equal(&pid, &locks[i].pid)) {
503 break;
507 if (i == num_locks) {
508 DEBUG(10, ("g_lock_force_unlock: Lock not found\n"));
509 status = NT_STATUS_INTERNAL_ERROR;
510 goto done;
513 lock_type = locks[i].lock_type;
515 if (i < (num_locks-1)) {
516 locks[i] = locks[num_locks-1];
518 num_locks -= 1;
520 if (num_locks == 0) {
521 status = rec->delete_rec(rec);
522 } else {
523 TDB_DATA data;
524 data = make_tdb_data((uint8_t *)locks,
525 sizeof(struct g_lock_rec) * num_locks);
526 status = rec->store(rec, data, 0);
529 if (!NT_STATUS_IS_OK(status)) {
530 DEBUG(1, ("g_lock_force_unlock: Could not store record: %s\n",
531 nt_errstr(status)));
532 goto done;
535 TALLOC_FREE(rec);
537 if ((lock_type & G_LOCK_PENDING) == 0) {
538 int num_wakeups = 0;
541 * We've been the lock holder. Others to retry. Don't
542 * tell all others to avoid a thundering herd. In case
543 * this leads to a complete stall because we miss some
544 * processes, the loop in g_lock_lock tries at least
545 * once a minute.
548 for (i=0; i<num_locks; i++) {
549 if ((locks[i].lock_type & G_LOCK_PENDING) == 0) {
550 continue;
552 if (!process_exists(locks[i].pid)) {
553 continue;
557 * Ping all waiters to retry
559 status = messaging_send(ctx->msg, locks[i].pid,
560 MSG_DBWRAP_G_LOCK_RETRY,
561 &data_blob_null);
562 if (!NT_STATUS_IS_OK(status)) {
563 DEBUG(1, ("sending retry to %s failed: %s\n",
564 procid_str(talloc_tos(),
565 &locks[i].pid),
566 nt_errstr(status)));
567 } else {
568 num_wakeups += 1;
570 if (num_wakeups > 5) {
571 break;
575 done:
577 * For the error path, TALLOC_FREE(rec) as well. In the good
578 * path we have already freed it.
580 TALLOC_FREE(rec);
582 TALLOC_FREE(locks);
583 return status;
586 NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, const char *name)
588 NTSTATUS status;
590 status = g_lock_force_unlock(ctx, name, procid_self());
592 #ifdef CLUSTER_SUPPORT
593 if (lp_clustering()) {
594 ctdb_unwatch(messaging_ctdbd_connection());
596 #endif
597 return status;
600 struct g_lock_locks_state {
601 int (*fn)(const char *name, void *private_data);
602 void *private_data;
605 static int g_lock_locks_fn(struct db_record *rec, void *priv)
607 struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
609 if ((rec->key.dsize == 0) || (rec->key.dptr[rec->key.dsize-1] != 0)) {
610 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
611 return 0;
613 return state->fn((char *)rec->key.dptr, state->private_data);
616 int g_lock_locks(struct g_lock_ctx *ctx,
617 int (*fn)(const char *name, void *private_data),
618 void *private_data)
620 struct g_lock_locks_state state;
622 state.fn = fn;
623 state.private_data = private_data;
625 return ctx->db->traverse_read(ctx->db, g_lock_locks_fn, &state);
628 NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, const char *name,
629 int (*fn)(struct server_id pid,
630 enum g_lock_type lock_type,
631 void *private_data),
632 void *private_data)
634 TDB_DATA data;
635 int i, num_locks;
636 struct g_lock_rec *locks = NULL;
637 bool ret;
639 if (ctx->db->fetch(ctx->db, talloc_tos(), string_term_tdb_data(name),
640 &data) != 0) {
641 return NT_STATUS_NOT_FOUND;
644 if ((data.dsize == 0) || (data.dptr == NULL)) {
645 return NT_STATUS_OK;
648 ret = g_lock_parse(talloc_tos(), data, &num_locks, &locks);
650 TALLOC_FREE(data.dptr);
652 if (!ret) {
653 DEBUG(10, ("g_lock_parse for %s failed\n", name));
654 return NT_STATUS_INTERNAL_ERROR;
657 for (i=0; i<num_locks; i++) {
658 if (fn(locks[i].pid, locks[i].lock_type, private_data) != 0) {
659 break;
662 TALLOC_FREE(locks);
663 return NT_STATUS_OK;
666 struct g_lock_get_state {
667 bool found;
668 struct server_id *pid;
671 static int g_lock_get_fn(struct server_id pid, enum g_lock_type lock_type,
672 void *priv)
674 struct g_lock_get_state *state = (struct g_lock_get_state *)priv;
676 if ((lock_type & G_LOCK_PENDING) != 0) {
677 return 0;
680 state->found = true;
681 *state->pid = pid;
682 return 1;
685 NTSTATUS g_lock_get(struct g_lock_ctx *ctx, const char *name,
686 struct server_id *pid)
688 struct g_lock_get_state state;
689 NTSTATUS status;
691 state.found = false;
692 state.pid = pid;
694 status = g_lock_dump(ctx, name, g_lock_get_fn, &state);
695 if (!NT_STATUS_IS_OK(status)) {
696 return status;
698 if (!state.found) {
699 return NT_STATUS_NOT_FOUND;
701 return NT_STATUS_OK;