2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/filesys.h"
22 #include "lib/util/server_id.h"
23 #include "dbwrap/dbwrap.h"
24 #include "dbwrap/dbwrap_open.h"
25 #include "dbwrap/dbwrap_watch.h"
28 #include "../lib/util/tevent_ntstatus.h"
33 struct db_context
*db
;
34 struct messaging_context
*msg
;
38 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
39 * lockname. The record contains an array of "struct g_lock_rec"
44 enum g_lock_type lock_type
;
48 #define G_LOCK_REC_LENGTH (SERVER_ID_BUF_LENGTH+1)
50 static void g_lock_rec_put(uint8_t buf
[G_LOCK_REC_LENGTH
],
51 const struct g_lock_rec rec
)
53 SCVAL(buf
, 0, rec
.lock_type
);
54 server_id_put(buf
+1, rec
.pid
);
57 static void g_lock_rec_get(struct g_lock_rec
*rec
,
58 const uint8_t buf
[G_LOCK_REC_LENGTH
])
60 rec
->lock_type
= CVAL(buf
, 0);
61 server_id_get(&rec
->pid
, buf
+1);
64 static ssize_t
g_lock_put(uint8_t *buf
, size_t buflen
,
65 const struct g_lock_rec
*locks
,
67 const uint8_t *data
, size_t datalen
)
71 if (num_locks
> UINT32_MAX
/G_LOCK_REC_LENGTH
) {
75 len
= num_locks
* G_LOCK_REC_LENGTH
;
77 len
+= sizeof(uint32_t);
78 if (len
< sizeof(uint32_t)) {
92 SIVAL(buf
, ofs
, num_locks
);
93 ofs
+= sizeof(uint32_t);
95 for (i
=0; i
<num_locks
; i
++) {
96 g_lock_rec_put(buf
+ofs
, locks
[i
]);
97 ofs
+= G_LOCK_REC_LENGTH
;
100 if ((data
!= NULL
) && (datalen
!= 0)) {
101 memcpy(buf
+ofs
, data
, datalen
);
107 static ssize_t
g_lock_get(TDB_DATA recval
,
108 struct g_lock_rec
*locks
, size_t num_locks
,
109 uint8_t **data
, size_t *datalen
)
113 if (recval
.dsize
< sizeof(uint32_t)) {
114 /* Fresh or invalid record */
119 found_locks
= IVAL(recval
.dptr
, 0);
120 recval
.dptr
+= sizeof(uint32_t);
121 recval
.dsize
-= sizeof(uint32_t);
123 if (found_locks
> recval
.dsize
/G_LOCK_REC_LENGTH
) {
128 if (found_locks
<= num_locks
) {
131 for (i
=0; i
<found_locks
; i
++) {
132 g_lock_rec_get(&locks
[i
], recval
.dptr
);
133 recval
.dptr
+= G_LOCK_REC_LENGTH
;
134 recval
.dsize
-= G_LOCK_REC_LENGTH
;
138 * Not enough space passed in by the caller, don't
141 recval
.dptr
+= found_locks
* G_LOCK_REC_LENGTH
;
142 recval
.dsize
-= found_locks
* G_LOCK_REC_LENGTH
;
149 if (datalen
!= NULL
) {
150 *datalen
= recval
.dsize
;
155 static NTSTATUS
g_lock_get_talloc(TALLOC_CTX
*mem_ctx
, TDB_DATA recval
,
156 struct g_lock_rec
**plocks
,
158 uint8_t **data
, size_t *datalen
)
160 struct g_lock_rec
*locks
;
163 num_locks
= g_lock_get(recval
, NULL
, 0, NULL
, NULL
);
164 if (num_locks
== -1) {
165 return NT_STATUS_INTERNAL_DB_CORRUPTION
;
167 locks
= talloc_array(mem_ctx
, struct g_lock_rec
, num_locks
);
169 return NT_STATUS_NO_MEMORY
;
171 g_lock_get(recval
, locks
, num_locks
, data
, datalen
);
174 *pnum_locks
= num_locks
;
179 struct g_lock_ctx
*g_lock_ctx_init(TALLOC_CTX
*mem_ctx
,
180 struct messaging_context
*msg
)
182 struct g_lock_ctx
*result
;
183 struct db_context
*backend
;
186 result
= talloc(mem_ctx
, struct g_lock_ctx
);
187 if (result
== NULL
) {
192 db_path
= lock_path("g_lock.tdb");
193 if (db_path
== NULL
) {
198 backend
= db_open(result
, db_path
, 0,
199 TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
,
200 O_RDWR
|O_CREAT
, 0600,
203 TALLOC_FREE(db_path
);
204 if (backend
== NULL
) {
205 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb\n"));
210 result
->db
= db_open_watched(result
, backend
, msg
);
211 if (result
->db
== NULL
) {
212 DBG_WARNING("g_lock_init: db_open_watched failed\n");
219 static bool g_lock_conflicts(enum g_lock_type l1
, enum g_lock_type l2
)
222 * Only tested write locks so far. Very likely this routine
223 * needs to be fixed for read locks....
225 if ((l1
== G_LOCK_READ
) && (l2
== G_LOCK_READ
)) {
231 static NTSTATUS
g_lock_record_store(struct db_record
*rec
,
232 const struct g_lock_rec
*locks
,
234 const uint8_t *data
, size_t datalen
)
240 len
= g_lock_put(NULL
, 0, locks
, num_locks
, data
, datalen
);
242 return NT_STATUS_BUFFER_TOO_SMALL
;
245 buf
= talloc_array(rec
, uint8_t, len
);
247 return NT_STATUS_NO_MEMORY
;
250 g_lock_put(buf
, len
, locks
, num_locks
, data
, datalen
);
252 status
= dbwrap_record_store(
253 rec
, (TDB_DATA
) { .dptr
= buf
, .dsize
= len
}, 0);
260 static NTSTATUS
g_lock_trylock(struct db_record
*rec
, struct server_id self
,
261 enum g_lock_type type
,
262 struct server_id
*blocker
)
264 TDB_DATA data
, userdata
;
266 struct g_lock_rec
*locks
, *tmp
;
268 bool modified
= false;
270 data
= dbwrap_record_get_value(rec
);
272 status
= g_lock_get_talloc(talloc_tos(), data
, &locks
, &num_locks
,
273 &userdata
.dptr
, &userdata
.dsize
);
274 if (!NT_STATUS_IS_OK(status
)) {
278 for (i
=0; i
<num_locks
; i
++) {
279 if (serverid_equal(&self
, &locks
[i
].pid
)) {
280 status
= NT_STATUS_INTERNAL_ERROR
;
283 if (g_lock_conflicts(type
, locks
[i
].lock_type
)) {
284 struct server_id pid
= locks
[i
].pid
;
287 * As the serverid_exists might recurse into
288 * the g_lock code, we use
289 * SERVERID_UNIQUE_ID_NOT_TO_VERIFY to avoid the loop
291 pid
.unique_id
= SERVERID_UNIQUE_ID_NOT_TO_VERIFY
;
293 if (serverid_exists(&pid
)) {
294 status
= NT_STATUS_LOCK_NOT_GRANTED
;
295 *blocker
= locks
[i
].pid
;
300 * Delete stale conflicting entry
302 locks
[i
] = locks
[num_locks
-1];
308 tmp
= talloc_realloc(talloc_tos(), locks
, struct g_lock_rec
,
311 status
= NT_STATUS_NO_MEMORY
;
316 ZERO_STRUCT(locks
[num_locks
]);
317 locks
[num_locks
].pid
= self
;
318 locks
[num_locks
].lock_type
= type
;
322 status
= NT_STATUS_OK
;
325 NTSTATUS store_status
;
326 store_status
= g_lock_record_store(
327 rec
, locks
, num_locks
, userdata
.dptr
, userdata
.dsize
);
328 if (!NT_STATUS_IS_OK(store_status
)) {
329 DBG_WARNING("g_lock_record_store failed: %s\n",
330 nt_errstr(store_status
));
331 status
= store_status
;
338 struct g_lock_lock_state
{
339 struct tevent_context
*ev
;
340 struct g_lock_ctx
*ctx
;
342 enum g_lock_type type
;
345 static void g_lock_lock_retry(struct tevent_req
*subreq
);
347 struct tevent_req
*g_lock_lock_send(TALLOC_CTX
*mem_ctx
,
348 struct tevent_context
*ev
,
349 struct g_lock_ctx
*ctx
,
351 enum g_lock_type type
)
353 struct tevent_req
*req
, *subreq
;
354 struct g_lock_lock_state
*state
;
355 struct db_record
*rec
;
356 struct server_id self
, blocker
;
359 req
= tevent_req_create(mem_ctx
, &state
, struct g_lock_lock_state
);
368 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
369 string_term_tdb_data(state
->name
));
371 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
372 tevent_req_nterror(req
, NT_STATUS_LOCK_NOT_GRANTED
);
373 return tevent_req_post(req
, ev
);
376 self
= messaging_server_id(state
->ctx
->msg
);
378 status
= g_lock_trylock(rec
, self
, state
->type
, &blocker
);
379 if (NT_STATUS_IS_OK(status
)) {
381 tevent_req_done(req
);
382 return tevent_req_post(req
, ev
);
384 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
386 tevent_req_nterror(req
, status
);
387 return tevent_req_post(req
, ev
);
389 subreq
= dbwrap_watched_watch_send(state
, state
->ev
, rec
, blocker
);
391 if (tevent_req_nomem(subreq
, req
)) {
392 return tevent_req_post(req
, ev
);
394 if (!tevent_req_set_endtime(
396 timeval_current_ofs(5 + sys_random() % 5, 0))) {
398 return tevent_req_post(req
, ev
);
400 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
404 static void g_lock_lock_retry(struct tevent_req
*subreq
)
406 struct tevent_req
*req
= tevent_req_callback_data(
407 subreq
, struct tevent_req
);
408 struct g_lock_lock_state
*state
= tevent_req_data(
409 req
, struct g_lock_lock_state
);
410 struct server_id self
= messaging_server_id(state
->ctx
->msg
);
411 struct server_id blocker
;
412 struct db_record
*rec
;
415 status
= dbwrap_watched_watch_recv(subreq
, talloc_tos(), &rec
, NULL
,
419 if (NT_STATUS_EQUAL(status
, NT_STATUS_IO_TIMEOUT
)) {
420 rec
= dbwrap_fetch_locked(
421 state
->ctx
->db
, talloc_tos(),
422 string_term_tdb_data(state
->name
));
424 status
= map_nt_error_from_unix(errno
);
426 status
= NT_STATUS_OK
;
430 if (tevent_req_nterror(req
, status
)) {
433 status
= g_lock_trylock(rec
, self
, state
->type
, &blocker
);
434 if (NT_STATUS_IS_OK(status
)) {
436 tevent_req_done(req
);
439 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
441 tevent_req_nterror(req
, status
);
444 subreq
= dbwrap_watched_watch_send(state
, state
->ev
, rec
, blocker
);
446 if (tevent_req_nomem(subreq
, req
)) {
449 if (!tevent_req_set_endtime(
451 timeval_current_ofs(5 + sys_random() % 5, 0))) {
455 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
460 NTSTATUS
g_lock_lock_recv(struct tevent_req
*req
)
462 return tevent_req_simple_recv_ntstatus(req
);
465 NTSTATUS
g_lock_lock(struct g_lock_ctx
*ctx
, const char *name
,
466 enum g_lock_type type
, struct timeval timeout
)
468 TALLOC_CTX
*frame
= talloc_stackframe();
469 struct tevent_context
*ev
;
470 struct tevent_req
*req
;
472 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
474 ev
= samba_tevent_context_init(frame
);
478 req
= g_lock_lock_send(frame
, ev
, ctx
, name
, type
);
482 end
= timeval_current_ofs(timeout
.tv_sec
, timeout
.tv_usec
);
483 if (!tevent_req_set_endtime(req
, ev
, end
)) {
486 if (!tevent_req_poll_ntstatus(req
, ev
, &status
)) {
489 status
= g_lock_lock_recv(req
);
495 NTSTATUS
g_lock_unlock(struct g_lock_ctx
*ctx
, const char *name
)
497 struct server_id self
= messaging_server_id(ctx
->msg
);
498 struct db_record
*rec
= NULL
;
499 struct g_lock_rec
*locks
= NULL
;
502 TDB_DATA value
, userdata
;
504 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
505 string_term_tdb_data(name
));
507 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
508 status
= NT_STATUS_INTERNAL_ERROR
;
512 value
= dbwrap_record_get_value(rec
);
514 status
= g_lock_get_talloc(talloc_tos(), value
, &locks
, &num_locks
,
515 &userdata
.dptr
, &userdata
.dsize
);
516 if (!NT_STATUS_IS_OK(status
)) {
517 DBG_DEBUG("g_lock_get for %s failed: %s\n", name
,
519 status
= NT_STATUS_FILE_INVALID
;
522 for (i
=0; i
<num_locks
; i
++) {
523 if (serverid_equal(&self
, &locks
[i
].pid
)) {
527 if (i
== num_locks
) {
528 DBG_DEBUG("Lock not found, num_locks=%zu\n", num_locks
);
529 status
= NT_STATUS_NOT_FOUND
;
533 locks
[i
] = locks
[num_locks
-1];
536 if ((num_locks
== 0) && (userdata
.dsize
== 0)) {
537 status
= dbwrap_record_delete(rec
);
539 status
= g_lock_record_store(
540 rec
, locks
, num_locks
, userdata
.dptr
, userdata
.dsize
);
542 if (!NT_STATUS_IS_OK(status
)) {
543 DBG_WARNING("Could not store record: %s\n", nt_errstr(status
));
547 status
= NT_STATUS_OK
;
554 struct g_lock_locks_state
{
555 int (*fn
)(const char *name
, void *private_data
);
559 static int g_lock_locks_fn(struct db_record
*rec
, void *priv
)
562 struct g_lock_locks_state
*state
= (struct g_lock_locks_state
*)priv
;
564 key
= dbwrap_record_get_key(rec
);
565 if ((key
.dsize
== 0) || (key
.dptr
[key
.dsize
-1] != 0)) {
566 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
569 return state
->fn((char *)key
.dptr
, state
->private_data
);
572 int g_lock_locks(struct g_lock_ctx
*ctx
,
573 int (*fn
)(const char *name
, void *private_data
),
576 struct g_lock_locks_state state
;
581 state
.private_data
= private_data
;
583 status
= dbwrap_traverse_read(ctx
->db
, g_lock_locks_fn
, &state
, &count
);
584 if (!NT_STATUS_IS_OK(status
)) {
590 NTSTATUS
g_lock_dump(struct g_lock_ctx
*ctx
, const char *name
,
591 int (*fn
)(struct server_id pid
,
592 enum g_lock_type lock_type
,
598 struct g_lock_rec
*locks
= NULL
;
601 status
= dbwrap_fetch_bystring(ctx
->db
, talloc_tos(), name
, &data
);
602 if (!NT_STATUS_IS_OK(status
)) {
606 if ((data
.dsize
== 0) || (data
.dptr
== NULL
)) {
610 status
= g_lock_get_talloc(talloc_tos(), data
, &locks
, &num_locks
,
613 TALLOC_FREE(data
.dptr
);
615 if (!NT_STATUS_IS_OK(status
)) {
616 DBG_DEBUG("g_lock_get for %s failed: %s\n", name
,
618 return NT_STATUS_INTERNAL_ERROR
;
621 for (i
=0; i
<num_locks
; i
++) {
622 if (fn(locks
[i
].pid
, locks
[i
].lock_type
, private_data
) != 0) {
630 static bool g_lock_init_all(TALLOC_CTX
*mem_ctx
,
631 struct tevent_context
**pev
,
632 struct messaging_context
**pmsg
,
633 struct g_lock_ctx
**pg_ctx
)
635 struct tevent_context
*ev
= NULL
;
636 struct messaging_context
*msg
= NULL
;
637 struct g_lock_ctx
*g_ctx
= NULL
;
639 ev
= samba_tevent_context_init(mem_ctx
);
641 d_fprintf(stderr
, "ERROR: could not init event context\n");
644 msg
= messaging_init(mem_ctx
, ev
);
646 d_fprintf(stderr
, "ERROR: could not init messaging context\n");
649 g_ctx
= g_lock_ctx_init(mem_ctx
, msg
);
651 d_fprintf(stderr
, "ERROR: could not init g_lock context\n");
666 NTSTATUS
g_lock_do(const char *name
, enum g_lock_type lock_type
,
667 struct timeval timeout
,
668 void (*fn
)(void *private_data
), void *private_data
)
670 struct tevent_context
*ev
= NULL
;
671 struct messaging_context
*msg
= NULL
;
672 struct g_lock_ctx
*g_ctx
= NULL
;
675 if (!g_lock_init_all(talloc_tos(), &ev
, &msg
, &g_ctx
)) {
676 status
= NT_STATUS_ACCESS_DENIED
;
680 status
= g_lock_lock(g_ctx
, name
, lock_type
, timeout
);
681 if (!NT_STATUS_IS_OK(status
)) {
685 g_lock_unlock(g_ctx
, name
);