2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/filesys.h"
22 #include "dbwrap/dbwrap.h"
23 #include "dbwrap/dbwrap_open.h"
24 #include "dbwrap/dbwrap_watch.h"
27 #include "../lib/util/tevent_ntstatus.h"
32 struct db_context
*db
;
33 struct messaging_context
*msg
;
37 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
38 * lockname. The record contains an array of "struct g_lock_rec"
43 enum g_lock_type lock_type
;
47 struct g_lock_ctx
*g_lock_ctx_init(TALLOC_CTX
*mem_ctx
,
48 struct messaging_context
*msg
)
50 struct g_lock_ctx
*result
;
51 struct db_context
*backend
;
54 result
= talloc(mem_ctx
, struct g_lock_ctx
);
60 db_path
= lock_path("g_lock.tdb");
61 if (db_path
== NULL
) {
66 backend
= db_open(result
, db_path
, 0,
67 TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
,
72 if (backend
== NULL
) {
73 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb\n"));
78 result
->db
= db_open_watched(result
, backend
, msg
);
79 if (result
->db
== NULL
) {
80 DBG_WARNING("g_lock_init: db_open_watched failed\n");
87 static bool g_lock_conflicts(enum g_lock_type l1
, enum g_lock_type l2
)
90 * Only tested write locks so far. Very likely this routine
91 * needs to be fixed for read locks....
93 if ((l1
== G_LOCK_READ
) && (l2
== G_LOCK_READ
)) {
99 static bool g_lock_parse(TALLOC_CTX
*mem_ctx
, TDB_DATA data
,
100 unsigned *pnum_locks
, struct g_lock_rec
**plocks
)
103 struct g_lock_rec
*locks
;
105 if ((data
.dsize
% sizeof(struct g_lock_rec
)) != 0) {
106 DEBUG(1, ("invalid lock record length %zu\n", data
.dsize
));
109 num_locks
= data
.dsize
/ sizeof(struct g_lock_rec
);
110 locks
= talloc_memdup(mem_ctx
, data
.dptr
, data
.dsize
);
112 DEBUG(1, ("talloc_memdup failed\n"));
116 *pnum_locks
= num_locks
;
120 static NTSTATUS
g_lock_trylock(struct db_record
*rec
, struct server_id self
,
121 enum g_lock_type type
,
122 struct server_id
*blocker
)
125 unsigned i
, num_locks
;
126 struct g_lock_rec
*locks
, *tmp
;
128 bool modified
= false;
130 data
= dbwrap_record_get_value(rec
);
132 if (!g_lock_parse(talloc_tos(), data
, &num_locks
, &locks
)) {
133 return NT_STATUS_INTERNAL_ERROR
;
136 for (i
=0; i
<num_locks
; i
++) {
137 if (serverid_equal(&self
, &locks
[i
].pid
)) {
138 status
= NT_STATUS_INTERNAL_ERROR
;
141 if (g_lock_conflicts(type
, locks
[i
].lock_type
)) {
142 struct server_id pid
= locks
[i
].pid
;
145 * As the serverid_exists might recurse into
146 * the g_lock code, we use
147 * SERVERID_UNIQUE_ID_NOT_TO_VERIFY to avoid the loop
149 pid
.unique_id
= SERVERID_UNIQUE_ID_NOT_TO_VERIFY
;
151 if (serverid_exists(&pid
)) {
152 status
= NT_STATUS_LOCK_NOT_GRANTED
;
153 *blocker
= locks
[i
].pid
;
158 * Delete stale conflicting entry
160 locks
[i
] = locks
[num_locks
-1];
166 tmp
= talloc_realloc(talloc_tos(), locks
, struct g_lock_rec
,
169 status
= NT_STATUS_NO_MEMORY
;
174 ZERO_STRUCT(locks
[num_locks
]);
175 locks
[num_locks
].pid
= self
;
176 locks
[num_locks
].lock_type
= type
;
180 status
= NT_STATUS_OK
;
183 NTSTATUS store_status
;
185 data
= make_tdb_data((uint8_t *)locks
, num_locks
* sizeof(*locks
));
186 store_status
= dbwrap_record_store(rec
, data
, 0);
187 if (!NT_STATUS_IS_OK(store_status
)) {
188 DEBUG(1, ("rec->store failed: %s\n",
189 nt_errstr(store_status
)));
190 status
= store_status
;
197 struct g_lock_lock_state
{
198 struct tevent_context
*ev
;
199 struct g_lock_ctx
*ctx
;
201 enum g_lock_type type
;
204 static void g_lock_lock_retry(struct tevent_req
*subreq
);
206 struct tevent_req
*g_lock_lock_send(TALLOC_CTX
*mem_ctx
,
207 struct tevent_context
*ev
,
208 struct g_lock_ctx
*ctx
,
210 enum g_lock_type type
)
212 struct tevent_req
*req
, *subreq
;
213 struct g_lock_lock_state
*state
;
214 struct db_record
*rec
;
215 struct server_id self
, blocker
;
218 req
= tevent_req_create(mem_ctx
, &state
, struct g_lock_lock_state
);
227 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
228 string_term_tdb_data(state
->name
));
230 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
231 tevent_req_nterror(req
, NT_STATUS_LOCK_NOT_GRANTED
);
232 return tevent_req_post(req
, ev
);
235 self
= messaging_server_id(state
->ctx
->msg
);
237 status
= g_lock_trylock(rec
, self
, state
->type
, &blocker
);
238 if (NT_STATUS_IS_OK(status
)) {
240 tevent_req_done(req
);
241 return tevent_req_post(req
, ev
);
243 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
245 tevent_req_nterror(req
, status
);
246 return tevent_req_post(req
, ev
);
248 subreq
= dbwrap_watched_watch_send(state
, state
->ev
, rec
, blocker
);
250 if (tevent_req_nomem(subreq
, req
)) {
251 return tevent_req_post(req
, ev
);
253 if (!tevent_req_set_endtime(
255 timeval_current_ofs(5 + sys_random() % 5, 0))) {
257 return tevent_req_post(req
, ev
);
259 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
263 static void g_lock_lock_retry(struct tevent_req
*subreq
)
265 struct tevent_req
*req
= tevent_req_callback_data(
266 subreq
, struct tevent_req
);
267 struct g_lock_lock_state
*state
= tevent_req_data(
268 req
, struct g_lock_lock_state
);
269 struct server_id self
= messaging_server_id(state
->ctx
->msg
);
270 struct server_id blocker
;
271 struct db_record
*rec
;
274 status
= dbwrap_watched_watch_recv(subreq
, talloc_tos(), &rec
, NULL
,
278 if (NT_STATUS_EQUAL(status
, NT_STATUS_IO_TIMEOUT
)) {
279 rec
= dbwrap_fetch_locked(
280 state
->ctx
->db
, talloc_tos(),
281 string_term_tdb_data(state
->name
));
283 status
= map_nt_error_from_unix(errno
);
285 status
= NT_STATUS_OK
;
289 if (tevent_req_nterror(req
, status
)) {
292 status
= g_lock_trylock(rec
, self
, state
->type
, &blocker
);
293 if (NT_STATUS_IS_OK(status
)) {
295 tevent_req_done(req
);
298 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
300 tevent_req_nterror(req
, status
);
303 subreq
= dbwrap_watched_watch_send(state
, state
->ev
, rec
, blocker
);
305 if (tevent_req_nomem(subreq
, req
)) {
308 if (!tevent_req_set_endtime(
310 timeval_current_ofs(5 + sys_random() % 5, 0))) {
314 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
319 NTSTATUS
g_lock_lock_recv(struct tevent_req
*req
)
321 return tevent_req_simple_recv_ntstatus(req
);
324 NTSTATUS
g_lock_lock(struct g_lock_ctx
*ctx
, const char *name
,
325 enum g_lock_type type
, struct timeval timeout
)
327 TALLOC_CTX
*frame
= talloc_stackframe();
328 struct tevent_context
*ev
;
329 struct tevent_req
*req
;
331 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
333 ev
= samba_tevent_context_init(frame
);
337 req
= g_lock_lock_send(frame
, ev
, ctx
, name
, type
);
341 end
= timeval_current_ofs(timeout
.tv_sec
, timeout
.tv_usec
);
342 if (!tevent_req_set_endtime(req
, ev
, end
)) {
345 if (!tevent_req_poll_ntstatus(req
, ev
, &status
)) {
348 status
= g_lock_lock_recv(req
);
354 NTSTATUS
g_lock_unlock(struct g_lock_ctx
*ctx
, const char *name
)
356 struct server_id self
= messaging_server_id(ctx
->msg
);
357 struct db_record
*rec
= NULL
;
358 struct g_lock_rec
*locks
= NULL
;
359 unsigned i
, num_locks
;
363 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
364 string_term_tdb_data(name
));
366 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
367 status
= NT_STATUS_INTERNAL_ERROR
;
371 value
= dbwrap_record_get_value(rec
);
373 if (!g_lock_parse(talloc_tos(), value
, &num_locks
, &locks
)) {
374 DEBUG(10, ("g_lock_parse for %s failed\n", name
));
375 status
= NT_STATUS_FILE_INVALID
;
378 for (i
=0; i
<num_locks
; i
++) {
379 if (serverid_equal(&self
, &locks
[i
].pid
)) {
383 if (i
== num_locks
) {
384 DEBUG(10, ("g_lock_force_unlock: Lock not found\n"));
385 status
= NT_STATUS_NOT_FOUND
;
389 locks
[i
] = locks
[num_locks
-1];
392 if (num_locks
== 0) {
393 status
= dbwrap_record_delete(rec
);
396 data
= make_tdb_data((uint8_t *)locks
,
397 sizeof(struct g_lock_rec
) * num_locks
);
398 status
= dbwrap_record_store(rec
, data
, 0);
400 if (!NT_STATUS_IS_OK(status
)) {
401 DEBUG(1, ("g_lock_force_unlock: Could not store record: %s\n",
406 status
= NT_STATUS_OK
;
413 struct g_lock_locks_state
{
414 int (*fn
)(const char *name
, void *private_data
);
418 static int g_lock_locks_fn(struct db_record
*rec
, void *priv
)
421 struct g_lock_locks_state
*state
= (struct g_lock_locks_state
*)priv
;
423 key
= dbwrap_record_get_key(rec
);
424 if ((key
.dsize
== 0) || (key
.dptr
[key
.dsize
-1] != 0)) {
425 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
428 return state
->fn((char *)key
.dptr
, state
->private_data
);
431 int g_lock_locks(struct g_lock_ctx
*ctx
,
432 int (*fn
)(const char *name
, void *private_data
),
435 struct g_lock_locks_state state
;
440 state
.private_data
= private_data
;
442 status
= dbwrap_traverse_read(ctx
->db
, g_lock_locks_fn
, &state
, &count
);
443 if (!NT_STATUS_IS_OK(status
)) {
450 NTSTATUS
g_lock_dump(struct g_lock_ctx
*ctx
, const char *name
,
451 int (*fn
)(struct server_id pid
,
452 enum g_lock_type lock_type
,
457 unsigned i
, num_locks
;
458 struct g_lock_rec
*locks
= NULL
;
462 status
= dbwrap_fetch_bystring(ctx
->db
, talloc_tos(), name
, &data
);
463 if (!NT_STATUS_IS_OK(status
)) {
467 if ((data
.dsize
== 0) || (data
.dptr
== NULL
)) {
471 ret
= g_lock_parse(talloc_tos(), data
, &num_locks
, &locks
);
473 TALLOC_FREE(data
.dptr
);
476 DEBUG(10, ("g_lock_parse for %s failed\n", name
));
477 return NT_STATUS_INTERNAL_ERROR
;
480 for (i
=0; i
<num_locks
; i
++) {
481 if (fn(locks
[i
].pid
, locks
[i
].lock_type
, private_data
) != 0) {
489 struct g_lock_get_state
{
491 struct server_id
*pid
;
494 static int g_lock_get_fn(struct server_id pid
, enum g_lock_type lock_type
,
497 struct g_lock_get_state
*state
= (struct g_lock_get_state
*)priv
;
503 NTSTATUS
g_lock_get(struct g_lock_ctx
*ctx
, const char *name
,
504 struct server_id
*pid
)
506 struct g_lock_get_state state
;
512 status
= g_lock_dump(ctx
, name
, g_lock_get_fn
, &state
);
513 if (!NT_STATUS_IS_OK(status
)) {
517 return NT_STATUS_NOT_FOUND
;
522 static bool g_lock_init_all(TALLOC_CTX
*mem_ctx
,
523 struct tevent_context
**pev
,
524 struct messaging_context
**pmsg
,
525 struct g_lock_ctx
**pg_ctx
)
527 struct tevent_context
*ev
= NULL
;
528 struct messaging_context
*msg
= NULL
;
529 struct g_lock_ctx
*g_ctx
= NULL
;
531 ev
= samba_tevent_context_init(mem_ctx
);
533 d_fprintf(stderr
, "ERROR: could not init event context\n");
536 msg
= messaging_init(mem_ctx
, ev
);
538 d_fprintf(stderr
, "ERROR: could not init messaging context\n");
541 g_ctx
= g_lock_ctx_init(mem_ctx
, msg
);
543 d_fprintf(stderr
, "ERROR: could not init g_lock context\n");
558 NTSTATUS
g_lock_do(const char *name
, enum g_lock_type lock_type
,
559 struct timeval timeout
,
560 void (*fn
)(void *private_data
), void *private_data
)
562 struct tevent_context
*ev
= NULL
;
563 struct messaging_context
*msg
= NULL
;
564 struct g_lock_ctx
*g_ctx
= NULL
;
567 if (!g_lock_init_all(talloc_tos(), &ev
, &msg
, &g_ctx
)) {
568 status
= NT_STATUS_ACCESS_DENIED
;
572 status
= g_lock_lock(g_ctx
, name
, lock_type
, timeout
);
573 if (!NT_STATUS_IS_OK(status
)) {
577 g_lock_unlock(g_ctx
, name
);