s3: Fix Coverity ID 2682: NULL_RETURNS
[Samba/bb.git] / source3 / lib / g_lock.c
blob1011584a254c2fbed12c8ac5426a35efb65f89ee
1 /*
2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/filesys.h"
22 #include "dbwrap/dbwrap.h"
23 #include "dbwrap/dbwrap_open.h"
24 #include "g_lock.h"
25 #include "util_tdb.h"
26 #include "ctdbd_conn.h"
27 #include "../lib/util/select.h"
28 #include "system/select.h"
29 #include "messages.h"
31 static NTSTATUS g_lock_force_unlock(struct g_lock_ctx *ctx, const char *name,
32 struct server_id pid);
34 struct g_lock_ctx {
35 struct db_context *db;
36 struct messaging_context *msg;
40 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
41 * lockname. The record contains an array of "struct g_lock_rec"
42 * structures. Waiters have the lock_type with G_LOCK_PENDING or'ed.
45 struct g_lock_rec {
46 enum g_lock_type lock_type;
47 struct server_id pid;
50 struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
51 struct messaging_context *msg)
53 struct g_lock_ctx *result;
55 result = talloc(mem_ctx, struct g_lock_ctx);
56 if (result == NULL) {
57 return NULL;
59 result->msg = msg;
61 result->db = db_open(result, lock_path("g_lock.tdb"), 0,
62 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
63 O_RDWR|O_CREAT, 0600,
64 DBWRAP_LOCK_ORDER_2);
65 if (result->db == NULL) {
66 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb\n"));
67 TALLOC_FREE(result);
68 return NULL;
70 return result;
73 static bool g_lock_conflicts(enum g_lock_type lock_type,
74 const struct g_lock_rec *rec)
76 enum g_lock_type rec_lock = rec->lock_type;
78 if ((rec_lock & G_LOCK_PENDING) != 0) {
79 return false;
83 * Only tested write locks so far. Very likely this routine
84 * needs to be fixed for read locks....
86 if ((lock_type == G_LOCK_READ) && (rec_lock == G_LOCK_READ)) {
87 return false;
89 return true;
92 static bool g_lock_parse(TALLOC_CTX *mem_ctx, TDB_DATA data,
93 int *pnum_locks, struct g_lock_rec **plocks)
95 int i, num_locks;
96 struct g_lock_rec *locks;
98 if ((data.dsize % sizeof(struct g_lock_rec)) != 0) {
99 DEBUG(1, ("invalid lock record length %d\n", (int)data.dsize));
100 return false;
103 num_locks = data.dsize / sizeof(struct g_lock_rec);
104 locks = talloc_array(mem_ctx, struct g_lock_rec, num_locks);
105 if (locks == NULL) {
106 DEBUG(1, ("talloc failed\n"));
107 return false;
110 memcpy(locks, data.dptr, data.dsize);
112 DEBUG(10, ("locks:\n"));
113 for (i=0; i<num_locks; i++) {
114 DEBUGADD(10, ("%s: %s %s\n",
115 server_id_str(talloc_tos(), &locks[i].pid),
116 ((locks[i].lock_type & 1) == G_LOCK_READ) ?
117 "read" : "write",
118 (locks[i].lock_type & G_LOCK_PENDING) ?
119 "(pending)" : "(owner)"));
121 if (((locks[i].lock_type & G_LOCK_PENDING) == 0)
122 && !process_exists(locks[i].pid)) {
124 DEBUGADD(10, ("lock owner %s died -- discarding\n",
125 server_id_str(talloc_tos(),
126 &locks[i].pid)));
128 if (i < (num_locks-1)) {
129 locks[i] = locks[num_locks-1];
131 num_locks -= 1;
135 *plocks = locks;
136 *pnum_locks = num_locks;
137 return true;
140 static void g_lock_cleanup(int *pnum_locks, struct g_lock_rec *locks)
142 int i, num_locks;
144 num_locks = *pnum_locks;
146 DEBUG(10, ("g_lock_cleanup: %d locks\n", num_locks));
148 for (i=0; i<num_locks; i++) {
149 if (process_exists(locks[i].pid)) {
150 continue;
152 DEBUGADD(10, ("%s does not exist -- discarding\n",
153 server_id_str(talloc_tos(), &locks[i].pid)));
155 if (i < (num_locks-1)) {
156 locks[i] = locks[num_locks-1];
158 num_locks -= 1;
160 *pnum_locks = num_locks;
161 return;
164 static struct g_lock_rec *g_lock_addrec(TALLOC_CTX *mem_ctx,
165 struct g_lock_rec *locks,
166 int *pnum_locks,
167 const struct server_id pid,
168 enum g_lock_type lock_type)
170 struct g_lock_rec *result;
171 int num_locks = *pnum_locks;
173 result = talloc_realloc(mem_ctx, locks, struct g_lock_rec,
174 num_locks+1);
175 if (result == NULL) {
176 return NULL;
179 result[num_locks].pid = pid;
180 result[num_locks].lock_type = lock_type;
181 *pnum_locks += 1;
182 return result;
185 static void g_lock_got_retry(struct messaging_context *msg,
186 void *private_data,
187 uint32_t msg_type,
188 struct server_id server_id,
189 DATA_BLOB *data);
191 static NTSTATUS g_lock_trylock(struct g_lock_ctx *ctx, const char *name,
192 enum g_lock_type lock_type)
194 struct db_record *rec = NULL;
195 struct g_lock_rec *locks = NULL;
196 int i, num_locks;
197 struct server_id self;
198 int our_index;
199 TDB_DATA data;
200 NTSTATUS status = NT_STATUS_OK;
201 NTSTATUS store_status;
202 TDB_DATA value;
204 again:
205 rec = dbwrap_fetch_locked(ctx->db, talloc_tos(),
206 string_term_tdb_data(name));
207 if (rec == NULL) {
208 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name));
209 status = NT_STATUS_LOCK_NOT_GRANTED;
210 goto done;
213 value = dbwrap_record_get_value(rec);
214 if (!g_lock_parse(talloc_tos(), value, &num_locks, &locks)) {
215 DEBUG(10, ("g_lock_parse for %s failed\n", name));
216 status = NT_STATUS_INTERNAL_ERROR;
217 goto done;
220 self = messaging_server_id(ctx->msg);
221 our_index = -1;
223 for (i=0; i<num_locks; i++) {
224 if (procid_equal(&self, &locks[i].pid)) {
225 if (our_index != -1) {
226 DEBUG(1, ("g_lock_trylock: Added ourself "
227 "twice!\n"));
228 status = NT_STATUS_INTERNAL_ERROR;
229 goto done;
231 if ((locks[i].lock_type & G_LOCK_PENDING) == 0) {
232 DEBUG(1, ("g_lock_trylock: Found ourself not "
233 "pending!\n"));
234 status = NT_STATUS_INTERNAL_ERROR;
235 goto done;
238 our_index = i;
240 /* never conflict with ourself */
241 continue;
243 if (g_lock_conflicts(lock_type, &locks[i])) {
244 struct server_id pid = locks[i].pid;
246 if (!process_exists(pid)) {
247 TALLOC_FREE(locks);
248 TALLOC_FREE(rec);
249 status = g_lock_force_unlock(ctx, name, pid);
250 if (!NT_STATUS_IS_OK(status)) {
251 DEBUG(1, ("Could not unlock dead lock "
252 "holder!\n"));
253 goto done;
255 goto again;
257 lock_type |= G_LOCK_PENDING;
261 if (our_index == -1) {
262 /* First round, add ourself */
264 locks = g_lock_addrec(talloc_tos(), locks, &num_locks,
265 self, lock_type);
266 if (locks == NULL) {
267 DEBUG(10, ("g_lock_addrec failed\n"));
268 status = NT_STATUS_NO_MEMORY;
269 goto done;
271 } else {
273 * Retry. We were pending last time. Overwrite the
274 * stored lock_type with what we calculated, we might
275 * have acquired the lock this time.
277 locks[our_index].lock_type = lock_type;
280 if (NT_STATUS_IS_OK(status) && ((lock_type & G_LOCK_PENDING) == 0)) {
282 * Walk through the list of locks, search for dead entries
284 g_lock_cleanup(&num_locks, locks);
287 data = make_tdb_data((uint8_t *)locks, num_locks * sizeof(*locks));
288 store_status = dbwrap_record_store(rec, data, 0);
289 if (!NT_STATUS_IS_OK(store_status)) {
290 DEBUG(1, ("rec->store failed: %s\n",
291 nt_errstr(store_status)));
292 status = store_status;
295 done:
296 TALLOC_FREE(locks);
297 TALLOC_FREE(rec);
299 if (NT_STATUS_IS_OK(status) && (lock_type & G_LOCK_PENDING) != 0) {
300 return STATUS_PENDING;
303 return NT_STATUS_OK;
306 NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, const char *name,
307 enum g_lock_type lock_type, struct timeval timeout)
309 struct tevent_timer *te = NULL;
310 NTSTATUS status;
311 bool retry = false;
312 struct timeval timeout_end;
313 struct timeval time_now;
315 DEBUG(10, ("Trying to acquire lock %d for %s\n", (int)lock_type,
316 name));
318 if (lock_type & ~1) {
319 DEBUG(1, ("Got invalid lock type %d for %s\n",
320 (int)lock_type, name));
321 return NT_STATUS_INVALID_PARAMETER;
324 #ifdef CLUSTER_SUPPORT
325 if (lp_clustering()) {
326 status = ctdb_watch_us(messaging_ctdbd_connection());
327 if (!NT_STATUS_IS_OK(status)) {
328 DEBUG(10, ("could not register retry with ctdb: %s\n",
329 nt_errstr(status)));
330 goto done;
333 #endif
335 status = messaging_register(ctx->msg, &retry, MSG_DBWRAP_G_LOCK_RETRY,
336 g_lock_got_retry);
337 if (!NT_STATUS_IS_OK(status)) {
338 DEBUG(10, ("messaging_register failed: %s\n",
339 nt_errstr(status)));
340 return status;
343 time_now = timeval_current();
344 timeout_end = timeval_sum(&time_now, &timeout);
346 while (true) {
347 struct pollfd *pollfds;
348 int num_pollfds;
349 int saved_errno;
350 int ret;
351 struct timeval timeout_remaining, select_timeout;
353 status = g_lock_trylock(ctx, name, lock_type);
354 if (NT_STATUS_IS_OK(status)) {
355 DEBUG(10, ("Got lock %s\n", name));
356 break;
358 if (!NT_STATUS_EQUAL(status, STATUS_PENDING)) {
359 DEBUG(10, ("g_lock_trylock failed: %s\n",
360 nt_errstr(status)));
361 break;
364 DEBUG(10, ("g_lock_trylock: Did not get lock, waiting...\n"));
366 /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
367 * !!! HACK ALERT --- FIX ME !!!
368 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
369 * What we really want to do here is to react to
370 * MSG_DBWRAP_G_LOCK_RETRY messages that are either sent
371 * by a client doing g_lock_unlock or by ourselves when
372 * we receive a CTDB_SRVID_SAMBA_NOTIFY or
373 * CTDB_SRVID_RECONFIGURE message from ctdbd, i.e. when
374 * either a client holding a lock or a complete node
375 * has died.
377 * Doing this properly involves calling tevent_loop_once(),
378 * but doing this here with the main ctdbd messaging context
379 * creates a nested event loop when g_lock_lock() is called
380 * from the main event loop, e.g. in a tcon_and_X where the
381 * share_info.tdb needs to be initialized and is locked by
382 * another process, or when the remore registry is accessed
383 * for writing and some other process already holds a lock
384 * on the registry.tdb.
386 * So as a quick fix, we act a little coarsely here: we do
387 * a select on the ctdb connection fd and when it is readable
388 * or we get EINTR, then we retry without actually parsing
389 * any ctdb packages or dispatching messages. This means that
390 * we retry more often than intended by design, but this does
391 * not harm and it is unobtrusive. When we have finished,
392 * the main loop will pick up all the messages and ctdb
393 * packets. The only extra twist is that we cannot use timed
394 * events here but have to handcode a timeout.
398 * We allocate 1 entries here. In the clustering case
399 * we might have to add the ctdb fd. This avoids the
400 * realloc then.
402 pollfds = talloc_array(talloc_tos(), struct pollfd, 1);
403 if (pollfds == NULL) {
404 status = NT_STATUS_NO_MEMORY;
405 break;
407 num_pollfds = 0;
409 #ifdef CLUSTER_SUPPORT
410 if (lp_clustering()) {
411 struct ctdbd_connection *conn;
412 conn = messaging_ctdbd_connection();
414 pollfds[0].fd = ctdbd_conn_get_fd(conn);
415 pollfds[0].events = POLLIN|POLLHUP;
417 num_pollfds += 1;
419 #endif
421 time_now = timeval_current();
422 timeout_remaining = timeval_until(&time_now, &timeout_end);
423 select_timeout = timeval_set(60, 0);
425 select_timeout = timeval_min(&select_timeout,
426 &timeout_remaining);
428 ret = poll(pollfds, num_pollfds,
429 timeval_to_msec(select_timeout));
432 * We're not *really interested in the actual flags. We just
433 * need to retry this whole thing.
435 saved_errno = errno;
436 TALLOC_FREE(pollfds);
437 errno = saved_errno;
439 if (ret == -1) {
440 if (errno != EINTR) {
441 DEBUG(1, ("error calling select: %s\n",
442 strerror(errno)));
443 status = NT_STATUS_INTERNAL_ERROR;
444 break;
447 * errno == EINTR:
448 * This means a signal was received.
449 * It might have been a MSG_DBWRAP_G_LOCK_RETRY message.
450 * ==> retry
452 } else if (ret == 0) {
453 if (timeval_expired(&timeout_end)) {
454 DEBUG(10, ("g_lock_lock timed out\n"));
455 status = NT_STATUS_LOCK_NOT_GRANTED;
456 break;
457 } else {
458 DEBUG(10, ("select returned 0 but timeout not "
459 "not expired, retrying\n"));
461 } else if (ret != 1) {
462 DEBUG(1, ("invalid return code of select: %d\n", ret));
463 status = NT_STATUS_INTERNAL_ERROR;
464 break;
467 * ret == 1:
468 * This means ctdbd has sent us some data.
469 * Might be a CTDB_SRVID_RECONFIGURE or a
470 * CTDB_SRVID_SAMBA_NOTIFY message.
471 * ==> retry
475 #ifdef CLUSTER_SUPPORT
476 done:
477 #endif
479 if (!NT_STATUS_IS_OK(status)) {
480 NTSTATUS unlock_status;
482 unlock_status = g_lock_unlock(ctx, name);
484 if (!NT_STATUS_IS_OK(unlock_status)) {
485 DEBUG(1, ("Could not remove ourself from the locking "
486 "db: %s\n", nt_errstr(status)));
490 messaging_deregister(ctx->msg, MSG_DBWRAP_G_LOCK_RETRY, &retry);
491 TALLOC_FREE(te);
493 return status;
496 static void g_lock_got_retry(struct messaging_context *msg,
497 void *private_data,
498 uint32_t msg_type,
499 struct server_id server_id,
500 DATA_BLOB *data)
502 bool *pretry = (bool *)private_data;
504 DEBUG(10, ("Got retry message from pid %s\n",
505 server_id_str(talloc_tos(), &server_id)));
507 *pretry = true;
510 static NTSTATUS g_lock_force_unlock(struct g_lock_ctx *ctx, const char *name,
511 struct server_id pid)
513 struct db_record *rec = NULL;
514 struct g_lock_rec *locks = NULL;
515 int i, num_locks;
516 enum g_lock_type lock_type;
517 NTSTATUS status;
518 TDB_DATA value;
520 rec = dbwrap_fetch_locked(ctx->db, talloc_tos(),
521 string_term_tdb_data(name));
522 if (rec == NULL) {
523 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name));
524 status = NT_STATUS_INTERNAL_ERROR;
525 goto done;
528 value = dbwrap_record_get_value(rec);
530 if (!g_lock_parse(talloc_tos(), value, &num_locks, &locks)) {
531 DEBUG(10, ("g_lock_parse for %s failed\n", name));
532 status = NT_STATUS_FILE_INVALID;
533 goto done;
536 for (i=0; i<num_locks; i++) {
537 if (procid_equal(&pid, &locks[i].pid)) {
538 break;
542 if (i == num_locks) {
543 DEBUG(10, ("g_lock_force_unlock: Lock not found\n"));
544 status = NT_STATUS_NOT_FOUND;
545 goto done;
548 lock_type = locks[i].lock_type;
550 if (i < (num_locks-1)) {
551 locks[i] = locks[num_locks-1];
553 num_locks -= 1;
555 if (num_locks == 0) {
556 status = dbwrap_record_delete(rec);
557 } else {
558 TDB_DATA data;
559 data = make_tdb_data((uint8_t *)locks,
560 sizeof(struct g_lock_rec) * num_locks);
561 status = dbwrap_record_store(rec, data, 0);
564 if (!NT_STATUS_IS_OK(status)) {
565 DEBUG(1, ("g_lock_force_unlock: Could not store record: %s\n",
566 nt_errstr(status)));
567 goto done;
570 TALLOC_FREE(rec);
572 if ((lock_type & G_LOCK_PENDING) == 0) {
573 int num_wakeups = 0;
576 * We've been the lock holder. Others to retry. Don't
577 * tell all others to avoid a thundering herd. In case
578 * this leads to a complete stall because we miss some
579 * processes, the loop in g_lock_lock tries at least
580 * once a minute.
583 for (i=0; i<num_locks; i++) {
584 if ((locks[i].lock_type & G_LOCK_PENDING) == 0) {
585 continue;
587 if (!process_exists(locks[i].pid)) {
588 continue;
592 * Ping all waiters to retry
594 status = messaging_send(ctx->msg, locks[i].pid,
595 MSG_DBWRAP_G_LOCK_RETRY,
596 &data_blob_null);
597 if (!NT_STATUS_IS_OK(status)) {
598 DEBUG(1, ("sending retry to %s failed: %s\n",
599 server_id_str(talloc_tos(),
600 &locks[i].pid),
601 nt_errstr(status)));
602 } else {
603 num_wakeups += 1;
605 if (num_wakeups > 5) {
606 break;
610 done:
612 * For the error path, TALLOC_FREE(rec) as well. In the good
613 * path we have already freed it.
615 TALLOC_FREE(rec);
617 TALLOC_FREE(locks);
618 return status;
621 NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, const char *name)
623 NTSTATUS status;
625 status = g_lock_force_unlock(ctx, name, messaging_server_id(ctx->msg));
627 #ifdef CLUSTER_SUPPORT
628 if (lp_clustering()) {
629 ctdb_unwatch(messaging_ctdbd_connection());
631 #endif
632 return status;
635 struct g_lock_locks_state {
636 int (*fn)(const char *name, void *private_data);
637 void *private_data;
640 static int g_lock_locks_fn(struct db_record *rec, void *priv)
642 TDB_DATA key;
643 struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
645 key = dbwrap_record_get_key(rec);
646 if ((key.dsize == 0) || (key.dptr[key.dsize-1] != 0)) {
647 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
648 return 0;
650 return state->fn((char *)key.dptr, state->private_data);
653 int g_lock_locks(struct g_lock_ctx *ctx,
654 int (*fn)(const char *name, void *private_data),
655 void *private_data)
657 struct g_lock_locks_state state;
658 NTSTATUS status;
659 int count;
661 state.fn = fn;
662 state.private_data = private_data;
664 status = dbwrap_traverse_read(ctx->db, g_lock_locks_fn, &state, &count);
665 if (!NT_STATUS_IS_OK(status)) {
666 return -1;
667 } else {
668 return count;
672 NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, const char *name,
673 int (*fn)(struct server_id pid,
674 enum g_lock_type lock_type,
675 void *private_data),
676 void *private_data)
678 TDB_DATA data;
679 int i, num_locks;
680 struct g_lock_rec *locks = NULL;
681 bool ret;
682 NTSTATUS status;
684 status = dbwrap_fetch_bystring(ctx->db, talloc_tos(), name, &data);
685 if (!NT_STATUS_IS_OK(status)) {
686 return status;
689 if ((data.dsize == 0) || (data.dptr == NULL)) {
690 return NT_STATUS_OK;
693 ret = g_lock_parse(talloc_tos(), data, &num_locks, &locks);
695 TALLOC_FREE(data.dptr);
697 if (!ret) {
698 DEBUG(10, ("g_lock_parse for %s failed\n", name));
699 return NT_STATUS_INTERNAL_ERROR;
702 for (i=0; i<num_locks; i++) {
703 if (fn(locks[i].pid, locks[i].lock_type, private_data) != 0) {
704 break;
707 TALLOC_FREE(locks);
708 return NT_STATUS_OK;
711 struct g_lock_get_state {
712 bool found;
713 struct server_id *pid;
716 static int g_lock_get_fn(struct server_id pid, enum g_lock_type lock_type,
717 void *priv)
719 struct g_lock_get_state *state = (struct g_lock_get_state *)priv;
721 if ((lock_type & G_LOCK_PENDING) != 0) {
722 return 0;
725 state->found = true;
726 *state->pid = pid;
727 return 1;
730 NTSTATUS g_lock_get(struct g_lock_ctx *ctx, const char *name,
731 struct server_id *pid)
733 struct g_lock_get_state state;
734 NTSTATUS status;
736 state.found = false;
737 state.pid = pid;
739 status = g_lock_dump(ctx, name, g_lock_get_fn, &state);
740 if (!NT_STATUS_IS_OK(status)) {
741 return status;
743 if (!state.found) {
744 return NT_STATUS_NOT_FOUND;
746 return NT_STATUS_OK;
749 static bool g_lock_init_all(TALLOC_CTX *mem_ctx,
750 struct tevent_context **pev,
751 struct messaging_context **pmsg,
752 struct g_lock_ctx **pg_ctx)
754 struct tevent_context *ev = NULL;
755 struct messaging_context *msg = NULL;
756 struct g_lock_ctx *g_ctx = NULL;
758 ev = tevent_context_init(mem_ctx);
759 if (ev == NULL) {
760 d_fprintf(stderr, "ERROR: could not init event context\n");
761 goto fail;
763 msg = messaging_init(mem_ctx, ev);
764 if (msg == NULL) {
765 d_fprintf(stderr, "ERROR: could not init messaging context\n");
766 goto fail;
768 g_ctx = g_lock_ctx_init(mem_ctx, msg);
769 if (g_ctx == NULL) {
770 d_fprintf(stderr, "ERROR: could not init g_lock context\n");
771 goto fail;
774 *pev = ev;
775 *pmsg = msg;
776 *pg_ctx = g_ctx;
777 return true;
778 fail:
779 TALLOC_FREE(g_ctx);
780 TALLOC_FREE(msg);
781 TALLOC_FREE(ev);
782 return false;
785 NTSTATUS g_lock_do(const char *name, enum g_lock_type lock_type,
786 struct timeval timeout,
787 void (*fn)(void *private_data), void *private_data)
789 struct tevent_context *ev = NULL;
790 struct messaging_context *msg = NULL;
791 struct g_lock_ctx *g_ctx = NULL;
792 NTSTATUS status;
794 if (!g_lock_init_all(talloc_tos(), &ev, &msg, &g_ctx)) {
795 status = NT_STATUS_ACCESS_DENIED;
796 goto done;
799 status = g_lock_lock(g_ctx, name, lock_type, timeout);
800 if (!NT_STATUS_IS_OK(status)) {
801 goto done;
803 fn(private_data);
804 g_lock_unlock(g_ctx, name);
806 done:
807 TALLOC_FREE(g_ctx);
808 TALLOC_FREE(msg);
809 TALLOC_FREE(ev);
810 return status;