2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/filesys.h"
22 #include "lib/util/server_id.h"
23 #include "dbwrap/dbwrap.h"
24 #include "dbwrap/dbwrap_open.h"
25 #include "dbwrap/dbwrap_watch.h"
28 #include "../lib/util/tevent_ntstatus.h"
33 struct db_context
*db
;
34 struct messaging_context
*msg
;
38 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
39 * lockname. The record contains an array of "struct g_lock_rec"
44 enum g_lock_type lock_type
;
48 #define G_LOCK_REC_LENGTH (SERVER_ID_BUF_LENGTH+1)
50 static void g_lock_rec_put(uint8_t buf
[G_LOCK_REC_LENGTH
],
51 const struct g_lock_rec rec
)
53 SCVAL(buf
, 0, rec
.lock_type
);
54 server_id_put(buf
+1, rec
.pid
);
57 static void g_lock_rec_get(struct g_lock_rec
*rec
,
58 const uint8_t buf
[G_LOCK_REC_LENGTH
])
60 rec
->lock_type
= CVAL(buf
, 0);
61 server_id_get(&rec
->pid
, buf
+1);
64 struct g_lock_ctx
*g_lock_ctx_init(TALLOC_CTX
*mem_ctx
,
65 struct messaging_context
*msg
)
67 struct g_lock_ctx
*result
;
68 struct db_context
*backend
;
71 result
= talloc(mem_ctx
, struct g_lock_ctx
);
77 db_path
= lock_path("g_lock.tdb");
78 if (db_path
== NULL
) {
83 backend
= db_open(result
, db_path
, 0,
84 TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
,
89 if (backend
== NULL
) {
90 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb\n"));
95 result
->db
= db_open_watched(result
, backend
, msg
);
96 if (result
->db
== NULL
) {
97 DBG_WARNING("g_lock_init: db_open_watched failed\n");
104 static bool g_lock_conflicts(enum g_lock_type l1
, enum g_lock_type l2
)
107 * Only tested write locks so far. Very likely this routine
108 * needs to be fixed for read locks....
110 if ((l1
== G_LOCK_READ
) && (l2
== G_LOCK_READ
)) {
116 static bool g_lock_get(TALLOC_CTX
*mem_ctx
, TDB_DATA data
,
117 unsigned *pnum_locks
, struct g_lock_rec
**plocks
)
120 struct g_lock_rec
*locks
;
122 if ((data
.dsize
% G_LOCK_REC_LENGTH
) != 0) {
123 DEBUG(1, ("invalid lock record length %zu\n", data
.dsize
));
126 num_locks
= data
.dsize
/ G_LOCK_REC_LENGTH
;
128 locks
= talloc_array(mem_ctx
, struct g_lock_rec
, num_locks
);
130 DEBUG(1, ("talloc_memdup failed\n"));
134 for (i
=0; i
<num_locks
; i
++) {
135 g_lock_rec_get(&locks
[i
], data
.dptr
);
136 data
.dptr
+= G_LOCK_REC_LENGTH
;
140 *pnum_locks
= num_locks
;
144 static ssize_t
g_lock_unparse(uint8_t *buf
, size_t buflen
,
145 const struct g_lock_rec
*locks
,
150 if (num_locks
> UINT32_MAX
/G_LOCK_REC_LENGTH
) {
154 len
= num_locks
* G_LOCK_REC_LENGTH
;
162 for (i
=0; i
<num_locks
; i
++) {
163 g_lock_rec_put(buf
+ofs
, locks
[i
]);
164 ofs
+= G_LOCK_REC_LENGTH
;
170 static NTSTATUS
g_lock_record_store(struct db_record
*rec
,
171 const struct g_lock_rec
*locks
,
178 len
= g_lock_unparse(NULL
, 0, locks
, num_locks
);
180 return NT_STATUS_BUFFER_TOO_SMALL
;
183 buf
= talloc_array(rec
, uint8_t, len
);
185 return NT_STATUS_NO_MEMORY
;
188 g_lock_unparse(buf
, len
, locks
, num_locks
);
190 status
= dbwrap_record_store(
191 rec
, (TDB_DATA
) { .dptr
= buf
, .dsize
= len
}, 0);
198 static NTSTATUS
g_lock_trylock(struct db_record
*rec
, struct server_id self
,
199 enum g_lock_type type
,
200 struct server_id
*blocker
)
203 unsigned i
, num_locks
;
204 struct g_lock_rec
*locks
, *tmp
;
206 bool modified
= false;
208 data
= dbwrap_record_get_value(rec
);
210 if (!g_lock_get(talloc_tos(), data
, &num_locks
, &locks
)) {
211 return NT_STATUS_INTERNAL_DB_CORRUPTION
;
214 for (i
=0; i
<num_locks
; i
++) {
215 if (serverid_equal(&self
, &locks
[i
].pid
)) {
216 status
= NT_STATUS_INTERNAL_ERROR
;
219 if (g_lock_conflicts(type
, locks
[i
].lock_type
)) {
220 struct server_id pid
= locks
[i
].pid
;
223 * As the serverid_exists might recurse into
224 * the g_lock code, we use
225 * SERVERID_UNIQUE_ID_NOT_TO_VERIFY to avoid the loop
227 pid
.unique_id
= SERVERID_UNIQUE_ID_NOT_TO_VERIFY
;
229 if (serverid_exists(&pid
)) {
230 status
= NT_STATUS_LOCK_NOT_GRANTED
;
231 *blocker
= locks
[i
].pid
;
236 * Delete stale conflicting entry
238 locks
[i
] = locks
[num_locks
-1];
244 tmp
= talloc_realloc(talloc_tos(), locks
, struct g_lock_rec
,
247 status
= NT_STATUS_NO_MEMORY
;
252 ZERO_STRUCT(locks
[num_locks
]);
253 locks
[num_locks
].pid
= self
;
254 locks
[num_locks
].lock_type
= type
;
258 status
= NT_STATUS_OK
;
261 NTSTATUS store_status
;
262 store_status
= g_lock_record_store(rec
, locks
, num_locks
);
263 if (!NT_STATUS_IS_OK(store_status
)) {
264 DBG_WARNING("g_lock_record_store failed: %s\n",
265 nt_errstr(store_status
));
266 status
= store_status
;
273 struct g_lock_lock_state
{
274 struct tevent_context
*ev
;
275 struct g_lock_ctx
*ctx
;
277 enum g_lock_type type
;
280 static void g_lock_lock_retry(struct tevent_req
*subreq
);
282 struct tevent_req
*g_lock_lock_send(TALLOC_CTX
*mem_ctx
,
283 struct tevent_context
*ev
,
284 struct g_lock_ctx
*ctx
,
286 enum g_lock_type type
)
288 struct tevent_req
*req
, *subreq
;
289 struct g_lock_lock_state
*state
;
290 struct db_record
*rec
;
291 struct server_id self
, blocker
;
294 req
= tevent_req_create(mem_ctx
, &state
, struct g_lock_lock_state
);
303 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
304 string_term_tdb_data(state
->name
));
306 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
307 tevent_req_nterror(req
, NT_STATUS_LOCK_NOT_GRANTED
);
308 return tevent_req_post(req
, ev
);
311 self
= messaging_server_id(state
->ctx
->msg
);
313 status
= g_lock_trylock(rec
, self
, state
->type
, &blocker
);
314 if (NT_STATUS_IS_OK(status
)) {
316 tevent_req_done(req
);
317 return tevent_req_post(req
, ev
);
319 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
321 tevent_req_nterror(req
, status
);
322 return tevent_req_post(req
, ev
);
324 subreq
= dbwrap_watched_watch_send(state
, state
->ev
, rec
, blocker
);
326 if (tevent_req_nomem(subreq
, req
)) {
327 return tevent_req_post(req
, ev
);
329 if (!tevent_req_set_endtime(
331 timeval_current_ofs(5 + sys_random() % 5, 0))) {
333 return tevent_req_post(req
, ev
);
335 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
339 static void g_lock_lock_retry(struct tevent_req
*subreq
)
341 struct tevent_req
*req
= tevent_req_callback_data(
342 subreq
, struct tevent_req
);
343 struct g_lock_lock_state
*state
= tevent_req_data(
344 req
, struct g_lock_lock_state
);
345 struct server_id self
= messaging_server_id(state
->ctx
->msg
);
346 struct server_id blocker
;
347 struct db_record
*rec
;
350 status
= dbwrap_watched_watch_recv(subreq
, talloc_tos(), &rec
, NULL
,
354 if (NT_STATUS_EQUAL(status
, NT_STATUS_IO_TIMEOUT
)) {
355 rec
= dbwrap_fetch_locked(
356 state
->ctx
->db
, talloc_tos(),
357 string_term_tdb_data(state
->name
));
359 status
= map_nt_error_from_unix(errno
);
361 status
= NT_STATUS_OK
;
365 if (tevent_req_nterror(req
, status
)) {
368 status
= g_lock_trylock(rec
, self
, state
->type
, &blocker
);
369 if (NT_STATUS_IS_OK(status
)) {
371 tevent_req_done(req
);
374 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
376 tevent_req_nterror(req
, status
);
379 subreq
= dbwrap_watched_watch_send(state
, state
->ev
, rec
, blocker
);
381 if (tevent_req_nomem(subreq
, req
)) {
384 if (!tevent_req_set_endtime(
386 timeval_current_ofs(5 + sys_random() % 5, 0))) {
390 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
395 NTSTATUS
g_lock_lock_recv(struct tevent_req
*req
)
397 return tevent_req_simple_recv_ntstatus(req
);
400 NTSTATUS
g_lock_lock(struct g_lock_ctx
*ctx
, const char *name
,
401 enum g_lock_type type
, struct timeval timeout
)
403 TALLOC_CTX
*frame
= talloc_stackframe();
404 struct tevent_context
*ev
;
405 struct tevent_req
*req
;
407 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
409 ev
= samba_tevent_context_init(frame
);
413 req
= g_lock_lock_send(frame
, ev
, ctx
, name
, type
);
417 end
= timeval_current_ofs(timeout
.tv_sec
, timeout
.tv_usec
);
418 if (!tevent_req_set_endtime(req
, ev
, end
)) {
421 if (!tevent_req_poll_ntstatus(req
, ev
, &status
)) {
424 status
= g_lock_lock_recv(req
);
430 NTSTATUS
g_lock_unlock(struct g_lock_ctx
*ctx
, const char *name
)
432 struct server_id self
= messaging_server_id(ctx
->msg
);
433 struct db_record
*rec
= NULL
;
434 struct g_lock_rec
*locks
= NULL
;
435 unsigned i
, num_locks
;
439 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
440 string_term_tdb_data(name
));
442 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
443 status
= NT_STATUS_INTERNAL_ERROR
;
447 value
= dbwrap_record_get_value(rec
);
449 if (!g_lock_get(talloc_tos(), value
, &num_locks
, &locks
)) {
450 DEBUG(10, ("g_lock_get for %s failed\n", name
));
451 status
= NT_STATUS_FILE_INVALID
;
454 for (i
=0; i
<num_locks
; i
++) {
455 if (serverid_equal(&self
, &locks
[i
].pid
)) {
459 if (i
== num_locks
) {
460 DBG_DEBUG("Lock not found, num_locks=%u\n", num_locks
);
461 status
= NT_STATUS_NOT_FOUND
;
465 locks
[i
] = locks
[num_locks
-1];
468 if (num_locks
== 0) {
469 status
= dbwrap_record_delete(rec
);
471 status
= g_lock_record_store(rec
, locks
, num_locks
);
473 if (!NT_STATUS_IS_OK(status
)) {
474 DBG_WARNING("Could not store record: %s\n", nt_errstr(status
));
478 status
= NT_STATUS_OK
;
485 struct g_lock_locks_state
{
486 int (*fn
)(const char *name
, void *private_data
);
490 static int g_lock_locks_fn(struct db_record
*rec
, void *priv
)
493 struct g_lock_locks_state
*state
= (struct g_lock_locks_state
*)priv
;
495 key
= dbwrap_record_get_key(rec
);
496 if ((key
.dsize
== 0) || (key
.dptr
[key
.dsize
-1] != 0)) {
497 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
500 return state
->fn((char *)key
.dptr
, state
->private_data
);
503 int g_lock_locks(struct g_lock_ctx
*ctx
,
504 int (*fn
)(const char *name
, void *private_data
),
507 struct g_lock_locks_state state
;
512 state
.private_data
= private_data
;
514 status
= dbwrap_traverse_read(ctx
->db
, g_lock_locks_fn
, &state
, &count
);
515 if (!NT_STATUS_IS_OK(status
)) {
521 NTSTATUS
g_lock_dump(struct g_lock_ctx
*ctx
, const char *name
,
522 int (*fn
)(struct server_id pid
,
523 enum g_lock_type lock_type
,
528 unsigned i
, num_locks
;
529 struct g_lock_rec
*locks
= NULL
;
533 status
= dbwrap_fetch_bystring(ctx
->db
, talloc_tos(), name
, &data
);
534 if (!NT_STATUS_IS_OK(status
)) {
538 if ((data
.dsize
== 0) || (data
.dptr
== NULL
)) {
542 ret
= g_lock_get(talloc_tos(), data
, &num_locks
, &locks
);
544 TALLOC_FREE(data
.dptr
);
547 DEBUG(10, ("g_lock_get for %s failed\n", name
));
548 return NT_STATUS_INTERNAL_ERROR
;
551 for (i
=0; i
<num_locks
; i
++) {
552 if (fn(locks
[i
].pid
, locks
[i
].lock_type
, private_data
) != 0) {
560 static bool g_lock_init_all(TALLOC_CTX
*mem_ctx
,
561 struct tevent_context
**pev
,
562 struct messaging_context
**pmsg
,
563 struct g_lock_ctx
**pg_ctx
)
565 struct tevent_context
*ev
= NULL
;
566 struct messaging_context
*msg
= NULL
;
567 struct g_lock_ctx
*g_ctx
= NULL
;
569 ev
= samba_tevent_context_init(mem_ctx
);
571 d_fprintf(stderr
, "ERROR: could not init event context\n");
574 msg
= messaging_init(mem_ctx
, ev
);
576 d_fprintf(stderr
, "ERROR: could not init messaging context\n");
579 g_ctx
= g_lock_ctx_init(mem_ctx
, msg
);
581 d_fprintf(stderr
, "ERROR: could not init g_lock context\n");
596 NTSTATUS
g_lock_do(const char *name
, enum g_lock_type lock_type
,
597 struct timeval timeout
,
598 void (*fn
)(void *private_data
), void *private_data
)
600 struct tevent_context
*ev
= NULL
;
601 struct messaging_context
*msg
= NULL
;
602 struct g_lock_ctx
*g_ctx
= NULL
;
605 if (!g_lock_init_all(talloc_tos(), &ev
, &msg
, &g_ctx
)) {
606 status
= NT_STATUS_ACCESS_DENIED
;
610 status
= g_lock_lock(g_ctx
, name
, lock_type
, timeout
);
611 if (!NT_STATUS_IS_OK(status
)) {
615 g_lock_unlock(g_ctx
, name
);