g_lock: simplify g_lock_trylock
[Samba.git] / source3 / lib / g_lock.c
blobe93ef3fc82b3f028078df43650152f24ffe0d07d
1 /*
2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/filesys.h"
22 #include "lib/util/server_id.h"
23 #include "dbwrap/dbwrap.h"
24 #include "dbwrap/dbwrap_open.h"
25 #include "dbwrap/dbwrap_watch.h"
26 #include "g_lock.h"
27 #include "util_tdb.h"
28 #include "../lib/util/tevent_ntstatus.h"
29 #include "messages.h"
30 #include "serverid.h"
32 struct g_lock_ctx {
33 struct db_context *db;
34 struct messaging_context *msg;
38 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
39 * lockname. The record contains an array of "struct g_lock_rec"
40 * structures.
43 #define G_LOCK_REC_LENGTH (SERVER_ID_BUF_LENGTH+1)
45 static void g_lock_rec_put(uint8_t buf[G_LOCK_REC_LENGTH],
46 const struct g_lock_rec rec)
48 SCVAL(buf, 0, rec.lock_type);
49 server_id_put(buf+1, rec.pid);
52 static void g_lock_rec_get(struct g_lock_rec *rec,
53 const uint8_t buf[G_LOCK_REC_LENGTH])
55 rec->lock_type = CVAL(buf, 0);
56 server_id_get(&rec->pid, buf+1);
59 static ssize_t g_lock_put(uint8_t *buf, size_t buflen,
60 const struct g_lock_rec *locks,
61 size_t num_locks,
62 const uint8_t *data, size_t datalen)
64 size_t i, len, ofs;
66 if (num_locks > UINT32_MAX/G_LOCK_REC_LENGTH) {
67 return -1;
70 len = num_locks * G_LOCK_REC_LENGTH;
72 len += sizeof(uint32_t);
73 if (len < sizeof(uint32_t)) {
74 return -1;
77 len += datalen;
78 if (len < datalen) {
79 return -1;
82 if (len > buflen) {
83 return len;
86 ofs = 0;
87 SIVAL(buf, ofs, num_locks);
88 ofs += sizeof(uint32_t);
90 for (i=0; i<num_locks; i++) {
91 g_lock_rec_put(buf+ofs, locks[i]);
92 ofs += G_LOCK_REC_LENGTH;
95 if ((data != NULL) && (datalen != 0)) {
96 memcpy(buf+ofs, data, datalen);
99 return len;
102 static ssize_t g_lock_get(TDB_DATA recval,
103 struct g_lock_rec *locks, size_t num_locks,
104 uint8_t **data, size_t *datalen)
106 size_t found_locks;
108 if (recval.dsize < sizeof(uint32_t)) {
109 /* Fresh or invalid record */
110 found_locks = 0;
111 goto done;
114 found_locks = IVAL(recval.dptr, 0);
115 recval.dptr += sizeof(uint32_t);
116 recval.dsize -= sizeof(uint32_t);
118 if (found_locks > recval.dsize/G_LOCK_REC_LENGTH) {
119 /* Invalid record */
120 return 0;
123 if (found_locks <= num_locks) {
124 size_t i;
126 for (i=0; i<found_locks; i++) {
127 g_lock_rec_get(&locks[i], recval.dptr);
128 recval.dptr += G_LOCK_REC_LENGTH;
129 recval.dsize -= G_LOCK_REC_LENGTH;
131 } else {
133 * Not enough space passed in by the caller, don't
134 * parse the locks.
136 recval.dptr += found_locks * G_LOCK_REC_LENGTH;
137 recval.dsize -= found_locks * G_LOCK_REC_LENGTH;
140 done:
141 if (data != NULL) {
142 *data = recval.dptr;
144 if (datalen != NULL) {
145 *datalen = recval.dsize;
147 return found_locks;
150 static NTSTATUS g_lock_get_talloc(TALLOC_CTX *mem_ctx, TDB_DATA recval,
151 struct g_lock_rec **plocks,
152 size_t *pnum_locks,
153 uint8_t **data, size_t *datalen)
155 struct g_lock_rec *locks;
156 ssize_t num_locks;
158 num_locks = g_lock_get(recval, NULL, 0, NULL, NULL);
159 if (num_locks == -1) {
160 return NT_STATUS_INTERNAL_DB_CORRUPTION;
162 locks = talloc_array(mem_ctx, struct g_lock_rec, num_locks);
163 if (locks == NULL) {
164 return NT_STATUS_NO_MEMORY;
166 g_lock_get(recval, locks, num_locks, data, datalen);
168 *plocks = locks;
169 *pnum_locks = num_locks;
171 return NT_STATUS_OK;
174 struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
175 struct messaging_context *msg)
177 struct g_lock_ctx *result;
178 struct db_context *backend;
179 char *db_path;
181 result = talloc(mem_ctx, struct g_lock_ctx);
182 if (result == NULL) {
183 return NULL;
185 result->msg = msg;
187 db_path = lock_path("g_lock.tdb");
188 if (db_path == NULL) {
189 TALLOC_FREE(result);
190 return NULL;
193 backend = db_open(result, db_path, 0,
194 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
195 O_RDWR|O_CREAT, 0600,
196 DBWRAP_LOCK_ORDER_3,
197 DBWRAP_FLAG_NONE);
198 TALLOC_FREE(db_path);
199 if (backend == NULL) {
200 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb\n"));
201 TALLOC_FREE(result);
202 return NULL;
205 result->db = db_open_watched(result, backend, msg);
206 if (result->db == NULL) {
207 DBG_WARNING("g_lock_init: db_open_watched failed\n");
208 TALLOC_FREE(result);
209 return NULL;
211 return result;
214 static bool g_lock_conflicts(enum g_lock_type l1, enum g_lock_type l2)
217 * Only tested write locks so far. Very likely this routine
218 * needs to be fixed for read locks....
220 if ((l1 == G_LOCK_READ) && (l2 == G_LOCK_READ)) {
221 return false;
223 return true;
226 static NTSTATUS g_lock_record_store(struct db_record *rec,
227 const struct g_lock_rec *locks,
228 size_t num_locks,
229 const uint8_t *data, size_t datalen)
231 ssize_t len;
232 uint8_t *buf;
233 NTSTATUS status;
235 len = g_lock_put(NULL, 0, locks, num_locks, data, datalen);
236 if (len == -1) {
237 return NT_STATUS_BUFFER_TOO_SMALL;
240 buf = talloc_array(rec, uint8_t, len);
241 if (buf == NULL) {
242 return NT_STATUS_NO_MEMORY;
245 g_lock_put(buf, len, locks, num_locks, data, datalen);
247 status = dbwrap_record_store(
248 rec, (TDB_DATA) { .dptr = buf, .dsize = len }, 0);
250 TALLOC_FREE(buf);
252 return status;
255 static NTSTATUS g_lock_trylock(struct db_record *rec, struct server_id self,
256 enum g_lock_type type,
257 struct server_id *blocker)
259 TDB_DATA data, userdata;
260 size_t i, num_locks;
261 struct g_lock_rec *locks, *tmp;
262 NTSTATUS status;
263 bool modified = false;
265 data = dbwrap_record_get_value(rec);
267 status = g_lock_get_talloc(talloc_tos(), data, &locks, &num_locks,
268 &userdata.dptr, &userdata.dsize);
269 if (!NT_STATUS_IS_OK(status)) {
270 return status;
273 if ((type == G_LOCK_READ) && (num_locks > 0)) {
275 * Read locks can stay around forever if the process
276 * dies. Do a heuristic check for process existence:
277 * Check one random process for existence. Hopefully
278 * this will keep runaway read locks under control.
280 i = generate_random() % num_locks;
282 if (!serverid_exists(&locks[i].pid)) {
283 locks[i] = locks[num_locks-1];
284 num_locks -=1;
285 modified = true;
289 for (i=0; i<num_locks; i++) {
290 struct g_lock_rec *lock = &locks[i];
292 if (serverid_equal(&self, &lock->pid)) {
293 if (lock->lock_type == type) {
294 status = NT_STATUS_WAS_LOCKED;
295 goto done;
298 * Remove "our" lock entry. Re-add it later
299 * with our new lock type.
301 locks[i] = locks[num_locks-1];
302 num_locks -= 1;
303 continue;
306 if (g_lock_conflicts(type, lock->lock_type)) {
307 struct server_id pid = lock->pid;
310 * As the serverid_exists might recurse into
311 * the g_lock code, we use
312 * SERVERID_UNIQUE_ID_NOT_TO_VERIFY to avoid the loop
314 pid.unique_id = SERVERID_UNIQUE_ID_NOT_TO_VERIFY;
316 if (serverid_exists(&pid)) {
317 status = NT_STATUS_LOCK_NOT_GRANTED;
318 *blocker = lock->pid;
319 goto done;
323 * Delete stale conflicting entry
325 locks[i] = locks[num_locks-1];
326 num_locks -= 1;
327 modified = true;
331 tmp = talloc_realloc(talloc_tos(), locks, struct g_lock_rec,
332 num_locks+1);
333 if (tmp == NULL) {
334 status = NT_STATUS_NO_MEMORY;
335 goto done;
337 locks = tmp;
339 locks[num_locks] = (struct g_lock_rec) {
340 .pid = self, .lock_type = type
342 num_locks +=1 ;
344 modified = true;
346 status = NT_STATUS_OK;
347 done:
348 if (modified) {
349 NTSTATUS store_status;
350 store_status = g_lock_record_store(
351 rec, locks, num_locks, userdata.dptr, userdata.dsize);
352 if (!NT_STATUS_IS_OK(store_status)) {
353 DBG_WARNING("g_lock_record_store failed: %s\n",
354 nt_errstr(store_status));
355 status = store_status;
358 TALLOC_FREE(locks);
359 return status;
362 struct g_lock_lock_state {
363 struct tevent_context *ev;
364 struct g_lock_ctx *ctx;
365 const char *name;
366 enum g_lock_type type;
369 static void g_lock_lock_retry(struct tevent_req *subreq);
371 struct tevent_req *g_lock_lock_send(TALLOC_CTX *mem_ctx,
372 struct tevent_context *ev,
373 struct g_lock_ctx *ctx,
374 const char *name,
375 enum g_lock_type type)
377 struct tevent_req *req, *subreq;
378 struct g_lock_lock_state *state;
379 struct db_record *rec;
380 struct server_id self, blocker;
381 NTSTATUS status;
383 req = tevent_req_create(mem_ctx, &state, struct g_lock_lock_state);
384 if (req == NULL) {
385 return NULL;
387 state->ev = ev;
388 state->ctx = ctx;
389 state->name = name;
390 state->type = type;
392 rec = dbwrap_fetch_locked(ctx->db, talloc_tos(),
393 string_term_tdb_data(state->name));
394 if (rec == NULL) {
395 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name));
396 tevent_req_nterror(req, NT_STATUS_LOCK_NOT_GRANTED);
397 return tevent_req_post(req, ev);
400 self = messaging_server_id(state->ctx->msg);
402 status = g_lock_trylock(rec, self, state->type, &blocker);
403 if (NT_STATUS_IS_OK(status)) {
404 TALLOC_FREE(rec);
405 tevent_req_done(req);
406 return tevent_req_post(req, ev);
408 if (!NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
409 TALLOC_FREE(rec);
410 tevent_req_nterror(req, status);
411 return tevent_req_post(req, ev);
413 subreq = dbwrap_watched_watch_send(state, state->ev, rec, blocker);
414 TALLOC_FREE(rec);
415 if (tevent_req_nomem(subreq, req)) {
416 return tevent_req_post(req, ev);
418 if (!tevent_req_set_endtime(
419 subreq, state->ev,
420 timeval_current_ofs(5 + sys_random() % 5, 0))) {
421 tevent_req_oom(req);
422 return tevent_req_post(req, ev);
424 tevent_req_set_callback(subreq, g_lock_lock_retry, req);
425 return req;
428 static void g_lock_lock_retry(struct tevent_req *subreq)
430 struct tevent_req *req = tevent_req_callback_data(
431 subreq, struct tevent_req);
432 struct g_lock_lock_state *state = tevent_req_data(
433 req, struct g_lock_lock_state);
434 struct server_id self = messaging_server_id(state->ctx->msg);
435 struct server_id blocker;
436 struct db_record *rec;
437 NTSTATUS status;
439 status = dbwrap_watched_watch_recv(subreq, talloc_tos(), &rec, NULL,
440 NULL);
441 TALLOC_FREE(subreq);
443 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
444 rec = dbwrap_fetch_locked(
445 state->ctx->db, talloc_tos(),
446 string_term_tdb_data(state->name));
447 if (rec == NULL) {
448 status = map_nt_error_from_unix(errno);
449 } else {
450 status = NT_STATUS_OK;
454 if (tevent_req_nterror(req, status)) {
455 return;
457 status = g_lock_trylock(rec, self, state->type, &blocker);
458 if (NT_STATUS_IS_OK(status)) {
459 TALLOC_FREE(rec);
460 tevent_req_done(req);
461 return;
463 if (!NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
464 TALLOC_FREE(rec);
465 tevent_req_nterror(req, status);
466 return;
468 subreq = dbwrap_watched_watch_send(state, state->ev, rec, blocker);
469 TALLOC_FREE(rec);
470 if (tevent_req_nomem(subreq, req)) {
471 return;
473 if (!tevent_req_set_endtime(
474 subreq, state->ev,
475 timeval_current_ofs(5 + sys_random() % 5, 0))) {
476 tevent_req_oom(req);
477 return;
479 tevent_req_set_callback(subreq, g_lock_lock_retry, req);
480 return;
484 NTSTATUS g_lock_lock_recv(struct tevent_req *req)
486 return tevent_req_simple_recv_ntstatus(req);
489 NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, const char *name,
490 enum g_lock_type type, struct timeval timeout)
492 TALLOC_CTX *frame = talloc_stackframe();
493 struct tevent_context *ev;
494 struct tevent_req *req;
495 struct timeval end;
496 NTSTATUS status = NT_STATUS_NO_MEMORY;
498 ev = samba_tevent_context_init(frame);
499 if (ev == NULL) {
500 goto fail;
502 req = g_lock_lock_send(frame, ev, ctx, name, type);
503 if (req == NULL) {
504 goto fail;
506 end = timeval_current_ofs(timeout.tv_sec, timeout.tv_usec);
507 if (!tevent_req_set_endtime(req, ev, end)) {
508 goto fail;
510 if (!tevent_req_poll_ntstatus(req, ev, &status)) {
511 goto fail;
513 status = g_lock_lock_recv(req);
514 fail:
515 TALLOC_FREE(frame);
516 return status;
519 NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, const char *name)
521 struct server_id self = messaging_server_id(ctx->msg);
522 struct db_record *rec = NULL;
523 struct g_lock_rec *locks = NULL;
524 size_t i, num_locks;
525 NTSTATUS status;
526 TDB_DATA value, userdata;
528 rec = dbwrap_fetch_locked(ctx->db, talloc_tos(),
529 string_term_tdb_data(name));
530 if (rec == NULL) {
531 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name));
532 status = NT_STATUS_INTERNAL_ERROR;
533 goto done;
536 value = dbwrap_record_get_value(rec);
538 status = g_lock_get_talloc(talloc_tos(), value, &locks, &num_locks,
539 &userdata.dptr, &userdata.dsize);
540 if (!NT_STATUS_IS_OK(status)) {
541 DBG_DEBUG("g_lock_get for %s failed: %s\n", name,
542 nt_errstr(status));
543 status = NT_STATUS_FILE_INVALID;
544 goto done;
546 for (i=0; i<num_locks; i++) {
547 if (serverid_equal(&self, &locks[i].pid)) {
548 break;
551 if (i == num_locks) {
552 DBG_DEBUG("Lock not found, num_locks=%zu\n", num_locks);
553 status = NT_STATUS_NOT_FOUND;
554 goto done;
557 locks[i] = locks[num_locks-1];
558 num_locks -= 1;
560 if ((num_locks == 0) && (userdata.dsize == 0)) {
561 status = dbwrap_record_delete(rec);
562 } else {
563 status = g_lock_record_store(
564 rec, locks, num_locks, userdata.dptr, userdata.dsize);
566 if (!NT_STATUS_IS_OK(status)) {
567 DBG_WARNING("Could not store record: %s\n", nt_errstr(status));
568 goto done;
571 status = NT_STATUS_OK;
572 done:
573 TALLOC_FREE(rec);
574 TALLOC_FREE(locks);
575 return status;
578 NTSTATUS g_lock_write_data(struct g_lock_ctx *ctx, const char *name,
579 const uint8_t *buf, size_t buflen)
581 struct server_id self = messaging_server_id(ctx->msg);
582 struct db_record *rec = NULL;
583 struct g_lock_rec *locks = NULL;
584 size_t i, num_locks;
585 NTSTATUS status;
586 TDB_DATA value;
588 rec = dbwrap_fetch_locked(ctx->db, talloc_tos(),
589 string_term_tdb_data(name));
590 if (rec == NULL) {
591 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name));
592 status = NT_STATUS_INTERNAL_ERROR;
593 goto done;
596 value = dbwrap_record_get_value(rec);
598 status = g_lock_get_talloc(talloc_tos(), value, &locks, &num_locks,
599 NULL, NULL);
600 if (!NT_STATUS_IS_OK(status)) {
601 DBG_DEBUG("g_lock_get for %s failed: %s\n", name,
602 nt_errstr(status));
603 status = NT_STATUS_FILE_INVALID;
604 goto done;
607 for (i=0; i<num_locks; i++) {
608 if (server_id_equal(&self, &locks[i].pid) &&
609 (locks[i].lock_type == G_LOCK_WRITE)) {
610 break;
613 if (i == num_locks) {
614 DBG_DEBUG("Not locked by us\n");
615 status = NT_STATUS_NOT_LOCKED;
616 goto done;
619 status = g_lock_record_store(rec, locks, num_locks, buf, buflen);
621 done:
622 TALLOC_FREE(locks);
623 TALLOC_FREE(rec);
624 return status;
627 struct g_lock_locks_state {
628 int (*fn)(const char *name, void *private_data);
629 void *private_data;
632 static int g_lock_locks_fn(struct db_record *rec, void *priv)
634 TDB_DATA key;
635 struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
637 key = dbwrap_record_get_key(rec);
638 if ((key.dsize == 0) || (key.dptr[key.dsize-1] != 0)) {
639 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
640 return 0;
642 return state->fn((char *)key.dptr, state->private_data);
645 int g_lock_locks(struct g_lock_ctx *ctx,
646 int (*fn)(const char *name, void *private_data),
647 void *private_data)
649 struct g_lock_locks_state state;
650 NTSTATUS status;
651 int count;
653 state.fn = fn;
654 state.private_data = private_data;
656 status = dbwrap_traverse_read(ctx->db, g_lock_locks_fn, &state, &count);
657 if (!NT_STATUS_IS_OK(status)) {
658 return -1;
660 return count;
663 NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, const char *name,
664 void (*fn)(const struct g_lock_rec *locks,
665 size_t num_locks,
666 const uint8_t *data,
667 size_t datalen,
668 void *private_data),
669 void *private_data)
671 TDB_DATA data;
672 size_t num_locks;
673 struct g_lock_rec *locks = NULL;
674 uint8_t *userdata;
675 size_t userdatalen;
676 NTSTATUS status;
678 status = dbwrap_fetch_bystring(ctx->db, talloc_tos(), name, &data);
679 if (!NT_STATUS_IS_OK(status)) {
680 return status;
683 if ((data.dsize == 0) || (data.dptr == NULL)) {
684 return NT_STATUS_OK;
687 status = g_lock_get_talloc(talloc_tos(), data, &locks, &num_locks,
688 &userdata, &userdatalen);
690 if (!NT_STATUS_IS_OK(status)) {
691 DBG_DEBUG("g_lock_get for %s failed: %s\n", name,
692 nt_errstr(status));
693 TALLOC_FREE(data.dptr);
694 return NT_STATUS_INTERNAL_ERROR;
697 fn(locks, num_locks, userdata, userdatalen, private_data);
699 TALLOC_FREE(locks);
700 TALLOC_FREE(data.dptr);
701 return NT_STATUS_OK;
704 static bool g_lock_init_all(TALLOC_CTX *mem_ctx,
705 struct tevent_context **pev,
706 struct messaging_context **pmsg,
707 struct g_lock_ctx **pg_ctx)
709 struct tevent_context *ev = NULL;
710 struct messaging_context *msg = NULL;
711 struct g_lock_ctx *g_ctx = NULL;
713 ev = samba_tevent_context_init(mem_ctx);
714 if (ev == NULL) {
715 d_fprintf(stderr, "ERROR: could not init event context\n");
716 goto fail;
718 msg = messaging_init(mem_ctx, ev);
719 if (msg == NULL) {
720 d_fprintf(stderr, "ERROR: could not init messaging context\n");
721 goto fail;
723 g_ctx = g_lock_ctx_init(mem_ctx, msg);
724 if (g_ctx == NULL) {
725 d_fprintf(stderr, "ERROR: could not init g_lock context\n");
726 goto fail;
729 *pev = ev;
730 *pmsg = msg;
731 *pg_ctx = g_ctx;
732 return true;
733 fail:
734 TALLOC_FREE(g_ctx);
735 TALLOC_FREE(msg);
736 TALLOC_FREE(ev);
737 return false;
740 NTSTATUS g_lock_do(const char *name, enum g_lock_type lock_type,
741 struct timeval timeout,
742 void (*fn)(void *private_data), void *private_data)
744 struct tevent_context *ev = NULL;
745 struct messaging_context *msg = NULL;
746 struct g_lock_ctx *g_ctx = NULL;
747 NTSTATUS status;
749 if (!g_lock_init_all(talloc_tos(), &ev, &msg, &g_ctx)) {
750 status = NT_STATUS_ACCESS_DENIED;
751 goto done;
754 status = g_lock_lock(g_ctx, name, lock_type, timeout);
755 if (!NT_STATUS_IS_OK(status)) {
756 goto done;
758 fn(private_data);
759 g_lock_unlock(g_ctx, name);
761 done:
762 TALLOC_FREE(g_ctx);
763 TALLOC_FREE(msg);
764 TALLOC_FREE(ev);
765 return status;