2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/filesys.h"
22 #include "lib/util/server_id.h"
23 #include "dbwrap/dbwrap.h"
24 #include "dbwrap/dbwrap_open.h"
25 #include "dbwrap/dbwrap_watch.h"
28 #include "../lib/util/tevent_ntstatus.h"
33 struct db_context
*db
;
34 struct messaging_context
*msg
;
38 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
39 * lockname. The record contains an array of "struct g_lock_rec"
43 #define G_LOCK_REC_LENGTH (SERVER_ID_BUF_LENGTH+1)
45 static void g_lock_rec_put(uint8_t buf
[G_LOCK_REC_LENGTH
],
46 const struct g_lock_rec rec
)
48 SCVAL(buf
, 0, rec
.lock_type
);
49 server_id_put(buf
+1, rec
.pid
);
52 static void g_lock_rec_get(struct g_lock_rec
*rec
,
53 const uint8_t buf
[G_LOCK_REC_LENGTH
])
55 rec
->lock_type
= CVAL(buf
, 0);
56 server_id_get(&rec
->pid
, buf
+1);
66 static bool g_lock_parse(uint8_t *buf
, size_t buflen
, struct g_lock
*lck
)
68 size_t found_recs
, data_ofs
;
70 if (buflen
< sizeof(uint32_t)) {
71 *lck
= (struct g_lock
) {0};
75 found_recs
= IVAL(buf
, 0);
77 if (found_recs
> buflen
/G_LOCK_REC_LENGTH
) {
81 buf
+= sizeof(uint32_t);
82 buflen
-= sizeof(uint32_t);
83 data_ofs
= found_recs
* G_LOCK_REC_LENGTH
;
85 *lck
= (struct g_lock
) {
86 .recsbuf
= buf
, .num_recs
= found_recs
,
87 .data
= buf
+data_ofs
, .datalen
= buflen
-data_ofs
93 static void g_lock_get_rec(struct g_lock
*lck
, size_t i
,
94 struct g_lock_rec
*rec
)
96 if (i
>= lck
->num_recs
) {
99 g_lock_rec_get(rec
, lck
->recsbuf
+ i
*G_LOCK_REC_LENGTH
);
102 static void g_lock_rec_del(struct g_lock
*lck
, size_t i
)
104 if (i
>= lck
->num_recs
) {
108 if (i
< lck
->num_recs
) {
109 uint8_t *recptr
= lck
->recsbuf
+ i
*G_LOCK_REC_LENGTH
;
110 memcpy(recptr
, lck
->recsbuf
+ lck
->num_recs
*G_LOCK_REC_LENGTH
,
115 static NTSTATUS
g_lock_store(struct db_record
*rec
, struct g_lock
*lck
,
116 struct g_lock_rec
*add
)
119 uint8_t addbuf
[G_LOCK_REC_LENGTH
];
121 struct TDB_DATA dbufs
[] = {
122 { .dptr
= sizebuf
, .dsize
= sizeof(sizebuf
) },
123 { .dptr
= lck
->recsbuf
,
124 .dsize
= lck
->num_recs
* G_LOCK_REC_LENGTH
},
126 { .dptr
= lck
->data
, .dsize
= lck
->datalen
}
130 g_lock_rec_put(addbuf
, *add
);
132 dbufs
[2] = (TDB_DATA
) {
133 .dptr
= addbuf
, .dsize
= G_LOCK_REC_LENGTH
139 SIVAL(sizebuf
, 0, lck
->num_recs
);
141 return dbwrap_record_storev(rec
, dbufs
, ARRAY_SIZE(dbufs
), 0);
144 struct g_lock_ctx
*g_lock_ctx_init(TALLOC_CTX
*mem_ctx
,
145 struct messaging_context
*msg
)
147 struct g_lock_ctx
*result
;
148 struct db_context
*backend
;
151 result
= talloc(mem_ctx
, struct g_lock_ctx
);
152 if (result
== NULL
) {
157 db_path
= lock_path("g_lock.tdb");
158 if (db_path
== NULL
) {
163 backend
= db_open(result
, db_path
, 0,
164 TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
,
165 O_RDWR
|O_CREAT
, 0600,
168 TALLOC_FREE(db_path
);
169 if (backend
== NULL
) {
170 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb\n"));
175 result
->db
= db_open_watched(result
, backend
, msg
);
176 if (result
->db
== NULL
) {
177 DBG_WARNING("g_lock_init: db_open_watched failed\n");
184 static bool g_lock_conflicts(enum g_lock_type l1
, enum g_lock_type l2
)
187 * Only tested write locks so far. Very likely this routine
188 * needs to be fixed for read locks....
190 if ((l1
== G_LOCK_READ
) && (l2
== G_LOCK_READ
)) {
196 static NTSTATUS
g_lock_trylock(struct db_record
*rec
, struct server_id self
,
197 enum g_lock_type type
,
198 struct server_id
*blocker
)
204 bool modified
= false;
207 data
= dbwrap_record_get_value(rec
);
209 ok
= g_lock_parse(data
.dptr
, data
.dsize
, &lck
);
211 return NT_STATUS_INTERNAL_DB_CORRUPTION
;
214 if ((type
== G_LOCK_READ
) && (lck
.num_recs
> 0)) {
215 struct g_lock_rec check_rec
;
218 * Read locks can stay around forever if the process
219 * dies. Do a heuristic check for process existence:
220 * Check one random process for existence. Hopefully
221 * this will keep runaway read locks under control.
223 i
= generate_random() % lck
.num_recs
;
225 g_lock_get_rec(&lck
, i
, &check_rec
);
227 if (!serverid_exists(&check_rec
.pid
)) {
228 g_lock_rec_del(&lck
, i
);
233 for (i
=0; i
<lck
.num_recs
; i
++) {
234 struct g_lock_rec lock
;
236 g_lock_get_rec(&lck
, i
, &lock
);
238 if (serverid_equal(&self
, &lock
.pid
)) {
239 if (lock
.lock_type
== type
) {
240 status
= NT_STATUS_WAS_LOCKED
;
244 * Remove "our" lock entry. Re-add it later
245 * with our new lock type.
247 g_lock_rec_del(&lck
, i
);
251 if (g_lock_conflicts(type
, lock
.lock_type
)) {
252 struct server_id pid
= lock
.pid
;
255 * As the serverid_exists might recurse into
256 * the g_lock code, we use
257 * SERVERID_UNIQUE_ID_NOT_TO_VERIFY to avoid the loop
259 pid
.unique_id
= SERVERID_UNIQUE_ID_NOT_TO_VERIFY
;
261 if (serverid_exists(&pid
)) {
262 status
= NT_STATUS_LOCK_NOT_GRANTED
;
268 * Delete stale conflicting entry
270 g_lock_rec_del(&lck
, i
);
277 status
= NT_STATUS_OK
;
280 struct g_lock_rec mylock
= { .pid
= self
, .lock_type
= type
};
281 NTSTATUS store_status
;
282 store_status
= g_lock_store(rec
, &lck
, &mylock
);
283 if (!NT_STATUS_IS_OK(store_status
)) {
284 DBG_WARNING("g_lock_record_store failed: %s\n",
285 nt_errstr(store_status
));
286 status
= store_status
;
292 struct g_lock_lock_state
{
293 struct tevent_context
*ev
;
294 struct g_lock_ctx
*ctx
;
296 enum g_lock_type type
;
299 static void g_lock_lock_retry(struct tevent_req
*subreq
);
301 struct g_lock_lock_fn_state
{
302 struct g_lock_lock_state
*state
;
303 struct server_id self
;
305 struct tevent_req
*watch_req
;
309 static void g_lock_lock_fn(struct db_record
*rec
, void *private_data
)
311 struct g_lock_lock_fn_state
*state
= private_data
;
312 struct server_id blocker
;
314 state
->status
= g_lock_trylock(rec
, state
->self
, state
->state
->type
,
316 if (!NT_STATUS_EQUAL(state
->status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
320 state
->watch_req
= dbwrap_watched_watch_send(
321 state
->state
, state
->state
->ev
, rec
, blocker
);
324 struct tevent_req
*g_lock_lock_send(TALLOC_CTX
*mem_ctx
,
325 struct tevent_context
*ev
,
326 struct g_lock_ctx
*ctx
,
328 enum g_lock_type type
)
330 struct tevent_req
*req
;
331 struct g_lock_lock_state
*state
;
332 struct g_lock_lock_fn_state fn_state
;
335 req
= tevent_req_create(mem_ctx
, &state
, struct g_lock_lock_state
);
344 fn_state
= (struct g_lock_lock_fn_state
) {
345 .state
= state
, .self
= messaging_server_id(ctx
->msg
)
348 status
= dbwrap_do_locked(ctx
->db
, string_term_tdb_data(name
),
349 g_lock_lock_fn
, &fn_state
);
350 if (tevent_req_nterror(req
, status
)) {
351 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
353 return tevent_req_post(req
, ev
);
356 if (NT_STATUS_IS_OK(fn_state
.status
)) {
357 tevent_req_done(req
);
358 return tevent_req_post(req
, ev
);
360 if (!NT_STATUS_EQUAL(fn_state
.status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
361 tevent_req_nterror(req
, fn_state
.status
);
362 return tevent_req_post(req
, ev
);
365 if (tevent_req_nomem(fn_state
.watch_req
, req
)) {
366 return tevent_req_post(req
, ev
);
369 if (!tevent_req_set_endtime(
370 fn_state
.watch_req
, state
->ev
,
371 timeval_current_ofs(5 + sys_random() % 5, 0))) {
373 return tevent_req_post(req
, ev
);
375 tevent_req_set_callback(fn_state
.watch_req
, g_lock_lock_retry
, req
);
379 static void g_lock_lock_retry(struct tevent_req
*subreq
)
381 struct tevent_req
*req
= tevent_req_callback_data(
382 subreq
, struct tevent_req
);
383 struct g_lock_lock_state
*state
= tevent_req_data(
384 req
, struct g_lock_lock_state
);
385 struct g_lock_lock_fn_state fn_state
;
388 status
= dbwrap_watched_watch_recv(subreq
, NULL
, NULL
, NULL
, NULL
);
389 DBG_DEBUG("watch_recv returned %s\n", nt_errstr(status
));
392 if (!NT_STATUS_IS_OK(status
) &&
393 !NT_STATUS_EQUAL(status
, NT_STATUS_IO_TIMEOUT
)) {
394 tevent_req_nterror(req
, status
);
398 fn_state
= (struct g_lock_lock_fn_state
) {
399 .state
= state
, .self
= messaging_server_id(state
->ctx
->msg
)
402 status
= dbwrap_do_locked(state
->ctx
->db
,
403 string_term_tdb_data(state
->name
),
404 g_lock_lock_fn
, &fn_state
);
405 if (tevent_req_nterror(req
, status
)) {
406 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
411 if (NT_STATUS_IS_OK(fn_state
.status
)) {
412 tevent_req_done(req
);
415 if (!NT_STATUS_EQUAL(fn_state
.status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
416 tevent_req_nterror(req
, fn_state
.status
);
420 if (tevent_req_nomem(fn_state
.watch_req
, req
)) {
424 if (!tevent_req_set_endtime(
425 fn_state
.watch_req
, state
->ev
,
426 timeval_current_ofs(5 + sys_random() % 5, 0))) {
430 tevent_req_set_callback(fn_state
.watch_req
, g_lock_lock_retry
, req
);
433 NTSTATUS
g_lock_lock_recv(struct tevent_req
*req
)
435 return tevent_req_simple_recv_ntstatus(req
);
438 NTSTATUS
g_lock_lock(struct g_lock_ctx
*ctx
, const char *name
,
439 enum g_lock_type type
, struct timeval timeout
)
441 TALLOC_CTX
*frame
= talloc_stackframe();
442 struct tevent_context
*ev
;
443 struct tevent_req
*req
;
445 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
447 ev
= samba_tevent_context_init(frame
);
451 req
= g_lock_lock_send(frame
, ev
, ctx
, name
, type
);
455 end
= timeval_current_ofs(timeout
.tv_sec
, timeout
.tv_usec
);
456 if (!tevent_req_set_endtime(req
, ev
, end
)) {
459 if (!tevent_req_poll_ntstatus(req
, ev
, &status
)) {
462 status
= g_lock_lock_recv(req
);
468 struct g_lock_unlock_state
{
470 struct server_id self
;
474 static void g_lock_unlock_fn(struct db_record
*rec
,
477 struct g_lock_unlock_state
*state
= private_data
;
483 value
= dbwrap_record_get_value(rec
);
485 ok
= g_lock_parse(value
.dptr
, value
.dsize
, &lck
);
487 DBG_DEBUG("g_lock_get for %s failed\n", state
->name
);
488 state
->status
= NT_STATUS_FILE_INVALID
;
491 for (i
=0; i
<lck
.num_recs
; i
++) {
492 struct g_lock_rec lockrec
;
493 g_lock_get_rec(&lck
, i
, &lockrec
);
494 if (serverid_equal(&state
->self
, &lockrec
.pid
)) {
498 if (i
== lck
.num_recs
) {
499 DBG_DEBUG("Lock not found, num_rec=%zu\n", lck
.num_recs
);
500 state
->status
= NT_STATUS_NOT_FOUND
;
504 g_lock_rec_del(&lck
, i
);
506 if ((lck
.num_recs
== 0) && (lck
.datalen
== 0)) {
507 state
->status
= dbwrap_record_delete(rec
);
510 state
->status
= g_lock_store(rec
, &lck
, NULL
);
513 NTSTATUS
g_lock_unlock(struct g_lock_ctx
*ctx
, const char *name
)
515 struct g_lock_unlock_state state
= {
516 .self
= messaging_server_id(ctx
->msg
), .name
= name
520 status
= dbwrap_do_locked(ctx
->db
, string_term_tdb_data(name
),
521 g_lock_unlock_fn
, &state
);
522 if (!NT_STATUS_IS_OK(status
)) {
523 DBG_WARNING("dbwrap_do_locked failed: %s\n",
527 if (!NT_STATUS_IS_OK(state
.status
)) {
528 DBG_WARNING("g_lock_unlock_fn failed: %s\n",
529 nt_errstr(state
.status
));
536 struct g_lock_write_data_state
{
538 struct server_id self
;
544 static void g_lock_write_data_fn(struct db_record
*rec
,
547 struct g_lock_write_data_state
*state
= private_data
;
553 value
= dbwrap_record_get_value(rec
);
555 ok
= g_lock_parse(value
.dptr
, value
.dsize
, &lck
);
557 DBG_DEBUG("g_lock_parse for %s failed\n", state
->name
);
558 state
->status
= NT_STATUS_INTERNAL_DB_CORRUPTION
;
561 for (i
=0; i
<lck
.num_recs
; i
++) {
562 struct g_lock_rec lockrec
;
563 g_lock_get_rec(&lck
, i
, &lockrec
);
564 if ((lockrec
.lock_type
== G_LOCK_WRITE
) &&
565 serverid_equal(&state
->self
, &lockrec
.pid
)) {
569 if (i
== lck
.num_recs
) {
570 DBG_DEBUG("Not locked by us\n");
571 state
->status
= NT_STATUS_NOT_LOCKED
;
575 lck
.data
= discard_const_p(uint8_t, state
->data
);
576 lck
.datalen
= state
->datalen
;
577 state
->status
= g_lock_store(rec
, &lck
, NULL
);
580 NTSTATUS
g_lock_write_data(struct g_lock_ctx
*ctx
, const char *name
,
581 const uint8_t *buf
, size_t buflen
)
583 struct g_lock_write_data_state state
= {
584 .name
= name
, .self
= messaging_server_id(ctx
->msg
),
585 .data
= buf
, .datalen
= buflen
589 status
= dbwrap_do_locked(ctx
->db
, string_term_tdb_data(name
),
590 g_lock_write_data_fn
, &state
);
591 if (!NT_STATUS_IS_OK(status
)) {
592 DBG_WARNING("dbwrap_do_locked failed: %s\n",
596 if (!NT_STATUS_IS_OK(state
.status
)) {
597 DBG_WARNING("g_lock_write_data_fn failed: %s\n",
598 nt_errstr(state
.status
));
605 struct g_lock_locks_state
{
606 int (*fn
)(const char *name
, void *private_data
);
610 static int g_lock_locks_fn(struct db_record
*rec
, void *priv
)
613 struct g_lock_locks_state
*state
= (struct g_lock_locks_state
*)priv
;
615 key
= dbwrap_record_get_key(rec
);
616 if ((key
.dsize
== 0) || (key
.dptr
[key
.dsize
-1] != 0)) {
617 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
620 return state
->fn((char *)key
.dptr
, state
->private_data
);
623 int g_lock_locks(struct g_lock_ctx
*ctx
,
624 int (*fn
)(const char *name
, void *private_data
),
627 struct g_lock_locks_state state
;
632 state
.private_data
= private_data
;
634 status
= dbwrap_traverse_read(ctx
->db
, g_lock_locks_fn
, &state
, &count
);
635 if (!NT_STATUS_IS_OK(status
)) {
641 struct g_lock_dump_state
{
644 void (*fn
)(const struct g_lock_rec
*locks
,
653 static void g_lock_dump_fn(TDB_DATA key
, TDB_DATA data
,
656 struct g_lock_dump_state
*state
= private_data
;
657 struct g_lock_rec
*recs
;
662 ok
= g_lock_parse(data
.dptr
, data
.dsize
, &lck
);
664 DBG_DEBUG("g_lock_parse failed for %s\n",
666 state
->status
= NT_STATUS_INTERNAL_DB_CORRUPTION
;
670 recs
= talloc_array(state
->mem_ctx
, struct g_lock_rec
, lck
.num_recs
);
672 DBG_DEBUG("talloc failed\n");
673 state
->status
= NT_STATUS_NO_MEMORY
;
677 for (i
=0; i
<lck
.num_recs
; i
++) {
678 g_lock_get_rec(&lck
, i
, &recs
[i
]);
681 state
->fn(recs
, lck
.num_recs
, lck
.data
, lck
.datalen
,
682 state
->private_data
);
686 state
->status
= NT_STATUS_OK
;
689 NTSTATUS
g_lock_dump(struct g_lock_ctx
*ctx
, const char *name
,
690 void (*fn
)(const struct g_lock_rec
*locks
,
697 struct g_lock_dump_state state
= {
698 .mem_ctx
= ctx
, .name
= name
,
699 .fn
= fn
, .private_data
= private_data
703 status
= dbwrap_parse_record(ctx
->db
, string_term_tdb_data(name
),
704 g_lock_dump_fn
, &state
);
705 if (!NT_STATUS_IS_OK(status
)) {
706 DBG_DEBUG("dbwrap_parse_record returned %s\n",
710 if (!NT_STATUS_IS_OK(state
.status
)) {
711 DBG_DEBUG("g_lock_dump_fn returned %s\n",
712 nt_errstr(state
.status
));
718 static bool g_lock_init_all(TALLOC_CTX
*mem_ctx
,
719 struct tevent_context
**pev
,
720 struct messaging_context
**pmsg
,
721 struct g_lock_ctx
**pg_ctx
)
723 struct tevent_context
*ev
= NULL
;
724 struct messaging_context
*msg
= NULL
;
725 struct g_lock_ctx
*g_ctx
= NULL
;
727 ev
= samba_tevent_context_init(mem_ctx
);
729 d_fprintf(stderr
, "ERROR: could not init event context\n");
732 msg
= messaging_init(mem_ctx
, ev
);
734 d_fprintf(stderr
, "ERROR: could not init messaging context\n");
737 g_ctx
= g_lock_ctx_init(mem_ctx
, msg
);
739 d_fprintf(stderr
, "ERROR: could not init g_lock context\n");
754 NTSTATUS
g_lock_do(const char *name
, enum g_lock_type lock_type
,
755 struct timeval timeout
,
756 void (*fn
)(void *private_data
), void *private_data
)
758 struct tevent_context
*ev
= NULL
;
759 struct messaging_context
*msg
= NULL
;
760 struct g_lock_ctx
*g_ctx
= NULL
;
763 if (!g_lock_init_all(talloc_tos(), &ev
, &msg
, &g_ctx
)) {
764 status
= NT_STATUS_ACCESS_DENIED
;
768 status
= g_lock_lock(g_ctx
, name
, lock_type
, timeout
);
769 if (!NT_STATUS_IS_OK(status
)) {
773 g_lock_unlock(g_ctx
, name
);