2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/filesys.h"
22 #include "dbwrap/dbwrap.h"
23 #include "dbwrap/dbwrap_open.h"
24 #include "dbwrap/dbwrap_watch.h"
27 #include "ctdbd_conn.h"
28 #include "../lib/util/select.h"
29 #include "../lib/util/tevent_ntstatus.h"
30 #include "system/select.h"
35 struct db_context
*db
;
36 struct messaging_context
*msg
;
40 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
41 * lockname. The record contains an array of "struct g_lock_rec"
46 enum g_lock_type lock_type
;
50 struct g_lock_ctx
*g_lock_ctx_init(TALLOC_CTX
*mem_ctx
,
51 struct messaging_context
*msg
)
53 struct g_lock_ctx
*result
;
56 result
= talloc(mem_ctx
, struct g_lock_ctx
);
62 db_path
= lock_path("g_lock.tdb");
63 if (db_path
== NULL
) {
68 result
->db
= db_open(result
, db_path
, 0,
69 TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
,
74 if (result
->db
== NULL
) {
75 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb\n"));
79 dbwrap_watch_db(result
->db
, msg
);
83 static bool g_lock_conflicts(enum g_lock_type l1
, enum g_lock_type l2
)
86 * Only tested write locks so far. Very likely this routine
87 * needs to be fixed for read locks....
89 if ((l1
== G_LOCK_READ
) && (l2
== G_LOCK_READ
)) {
95 static bool g_lock_parse(TALLOC_CTX
*mem_ctx
, TDB_DATA data
,
96 unsigned *pnum_locks
, struct g_lock_rec
**plocks
)
99 struct g_lock_rec
*locks
;
101 if ((data
.dsize
% sizeof(struct g_lock_rec
)) != 0) {
102 DEBUG(1, ("invalid lock record length %d\n", (int)data
.dsize
));
105 num_locks
= data
.dsize
/ sizeof(struct g_lock_rec
);
106 locks
= talloc_memdup(mem_ctx
, data
.dptr
, data
.dsize
);
108 DEBUG(1, ("talloc_memdup failed\n"));
112 *pnum_locks
= num_locks
;
116 static NTSTATUS
g_lock_trylock(struct db_record
*rec
, struct server_id self
,
117 enum g_lock_type type
)
120 unsigned i
, num_locks
;
121 struct g_lock_rec
*locks
, *tmp
;
123 bool modified
= false;
125 data
= dbwrap_record_get_value(rec
);
127 if (!g_lock_parse(talloc_tos(), data
, &num_locks
, &locks
)) {
128 return NT_STATUS_INTERNAL_ERROR
;
131 for (i
=0; i
<num_locks
; i
++) {
132 if (serverid_equal(&self
, &locks
[i
].pid
)) {
133 status
= NT_STATUS_INTERNAL_ERROR
;
136 if (g_lock_conflicts(type
, locks
[i
].lock_type
)) {
137 struct server_id pid
= locks
[i
].pid
;
140 * As the serverid_exists might recurse into
141 * the g_lock code, we use
142 * SERVERID_UNIQUE_ID_NOT_TO_VERIFY to avoid the loop
144 pid
.unique_id
= SERVERID_UNIQUE_ID_NOT_TO_VERIFY
;
146 if (serverid_exists(&pid
)) {
147 status
= NT_STATUS_LOCK_NOT_GRANTED
;
152 * Delete stale conflicting entry
154 locks
[i
] = locks
[num_locks
-1];
160 tmp
= talloc_realloc(talloc_tos(), locks
, struct g_lock_rec
,
163 status
= NT_STATUS_NO_MEMORY
;
168 ZERO_STRUCT(locks
[num_locks
]);
169 locks
[num_locks
].pid
= self
;
170 locks
[num_locks
].lock_type
= type
;
174 status
= NT_STATUS_OK
;
177 NTSTATUS store_status
;
179 data
= make_tdb_data((uint8_t *)locks
, num_locks
* sizeof(*locks
));
180 store_status
= dbwrap_record_store(rec
, data
, 0);
181 if (!NT_STATUS_IS_OK(store_status
)) {
182 DEBUG(1, ("rec->store failed: %s\n",
183 nt_errstr(store_status
)));
184 status
= store_status
;
191 struct g_lock_lock_state
{
192 struct tevent_context
*ev
;
193 struct g_lock_ctx
*ctx
;
195 enum g_lock_type type
;
198 static void g_lock_lock_retry(struct tevent_req
*subreq
);
200 struct tevent_req
*g_lock_lock_send(TALLOC_CTX
*mem_ctx
,
201 struct tevent_context
*ev
,
202 struct g_lock_ctx
*ctx
,
204 enum g_lock_type type
)
206 struct tevent_req
*req
, *subreq
;
207 struct g_lock_lock_state
*state
;
208 struct db_record
*rec
;
209 struct server_id self
;
212 req
= tevent_req_create(mem_ctx
, &state
, struct g_lock_lock_state
);
221 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
222 string_term_tdb_data(state
->name
));
224 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
225 tevent_req_nterror(req
, NT_STATUS_LOCK_NOT_GRANTED
);
226 return tevent_req_post(req
, ev
);
229 self
= messaging_server_id(state
->ctx
->msg
);
231 status
= g_lock_trylock(rec
, self
, state
->type
);
232 if (NT_STATUS_IS_OK(status
)) {
234 tevent_req_done(req
);
235 return tevent_req_post(req
, ev
);
237 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
239 tevent_req_nterror(req
, status
);
240 return tevent_req_post(req
, ev
);
242 subreq
= dbwrap_record_watch_send(state
, state
->ev
, rec
,
245 if (tevent_req_nomem(subreq
, req
)) {
246 return tevent_req_post(req
, ev
);
248 if (!tevent_req_set_endtime(
250 timeval_current_ofs(5 + sys_random() % 5, 0))) {
252 return tevent_req_post(req
, ev
);
254 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
258 static void g_lock_lock_retry(struct tevent_req
*subreq
)
260 struct tevent_req
*req
= tevent_req_callback_data(
261 subreq
, struct tevent_req
);
262 struct g_lock_lock_state
*state
= tevent_req_data(
263 req
, struct g_lock_lock_state
);
264 struct server_id self
= messaging_server_id(state
->ctx
->msg
);
265 struct db_record
*rec
;
268 status
= dbwrap_record_watch_recv(subreq
, talloc_tos(), &rec
);
271 if (NT_STATUS_EQUAL(status
, NT_STATUS_IO_TIMEOUT
)) {
272 rec
= dbwrap_fetch_locked(
273 state
->ctx
->db
, talloc_tos(),
274 string_term_tdb_data(state
->name
));
276 status
= map_nt_error_from_unix(errno
);
278 status
= NT_STATUS_OK
;
282 if (tevent_req_nterror(req
, status
)) {
285 status
= g_lock_trylock(rec
, self
, state
->type
);
286 if (NT_STATUS_IS_OK(status
)) {
288 tevent_req_done(req
);
291 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
293 tevent_req_nterror(req
, status
);
296 subreq
= dbwrap_record_watch_send(state
, state
->ev
, rec
,
299 if (tevent_req_nomem(subreq
, req
)) {
302 if (!tevent_req_set_endtime(
304 timeval_current_ofs(5 + sys_random() % 5, 0))) {
308 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
313 NTSTATUS
g_lock_lock_recv(struct tevent_req
*req
)
315 return tevent_req_simple_recv_ntstatus(req
);
318 NTSTATUS
g_lock_lock(struct g_lock_ctx
*ctx
, const char *name
,
319 enum g_lock_type type
, struct timeval timeout
)
321 TALLOC_CTX
*frame
= talloc_stackframe();
322 struct tevent_context
*ev
;
323 struct tevent_req
*req
;
325 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
327 ev
= samba_tevent_context_init(frame
);
331 req
= g_lock_lock_send(frame
, ev
, ctx
, name
, type
);
335 end
= timeval_current_ofs(timeout
.tv_sec
, timeout
.tv_usec
);
336 if (!tevent_req_set_endtime(req
, ev
, end
)) {
339 if (!tevent_req_poll_ntstatus(req
, ev
, &status
)) {
342 status
= g_lock_lock_recv(req
);
348 NTSTATUS
g_lock_unlock(struct g_lock_ctx
*ctx
, const char *name
)
350 struct server_id self
= messaging_server_id(ctx
->msg
);
351 struct db_record
*rec
= NULL
;
352 struct g_lock_rec
*locks
= NULL
;
353 unsigned i
, num_locks
;
357 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
358 string_term_tdb_data(name
));
360 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
361 status
= NT_STATUS_INTERNAL_ERROR
;
365 value
= dbwrap_record_get_value(rec
);
367 if (!g_lock_parse(talloc_tos(), value
, &num_locks
, &locks
)) {
368 DEBUG(10, ("g_lock_parse for %s failed\n", name
));
369 status
= NT_STATUS_FILE_INVALID
;
372 for (i
=0; i
<num_locks
; i
++) {
373 if (serverid_equal(&self
, &locks
[i
].pid
)) {
377 if (i
== num_locks
) {
378 DEBUG(10, ("g_lock_force_unlock: Lock not found\n"));
379 status
= NT_STATUS_NOT_FOUND
;
383 locks
[i
] = locks
[num_locks
-1];
386 if (num_locks
== 0) {
387 status
= dbwrap_record_delete(rec
);
390 data
= make_tdb_data((uint8_t *)locks
,
391 sizeof(struct g_lock_rec
) * num_locks
);
392 status
= dbwrap_record_store(rec
, data
, 0);
394 if (!NT_STATUS_IS_OK(status
)) {
395 DEBUG(1, ("g_lock_force_unlock: Could not store record: %s\n",
400 status
= NT_STATUS_OK
;
407 struct g_lock_locks_state
{
408 int (*fn
)(const char *name
, void *private_data
);
412 static int g_lock_locks_fn(struct db_record
*rec
, void *priv
)
415 struct g_lock_locks_state
*state
= (struct g_lock_locks_state
*)priv
;
417 key
= dbwrap_record_get_key(rec
);
418 if ((key
.dsize
== 0) || (key
.dptr
[key
.dsize
-1] != 0)) {
419 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
422 return state
->fn((char *)key
.dptr
, state
->private_data
);
425 int g_lock_locks(struct g_lock_ctx
*ctx
,
426 int (*fn
)(const char *name
, void *private_data
),
429 struct g_lock_locks_state state
;
434 state
.private_data
= private_data
;
436 status
= dbwrap_traverse_read(ctx
->db
, g_lock_locks_fn
, &state
, &count
);
437 if (!NT_STATUS_IS_OK(status
)) {
444 NTSTATUS
g_lock_dump(struct g_lock_ctx
*ctx
, const char *name
,
445 int (*fn
)(struct server_id pid
,
446 enum g_lock_type lock_type
,
451 unsigned i
, num_locks
;
452 struct g_lock_rec
*locks
= NULL
;
456 status
= dbwrap_fetch_bystring(ctx
->db
, talloc_tos(), name
, &data
);
457 if (!NT_STATUS_IS_OK(status
)) {
461 if ((data
.dsize
== 0) || (data
.dptr
== NULL
)) {
465 ret
= g_lock_parse(talloc_tos(), data
, &num_locks
, &locks
);
467 TALLOC_FREE(data
.dptr
);
470 DEBUG(10, ("g_lock_parse for %s failed\n", name
));
471 return NT_STATUS_INTERNAL_ERROR
;
474 for (i
=0; i
<num_locks
; i
++) {
475 if (fn(locks
[i
].pid
, locks
[i
].lock_type
, private_data
) != 0) {
483 struct g_lock_get_state
{
485 struct server_id
*pid
;
488 static int g_lock_get_fn(struct server_id pid
, enum g_lock_type lock_type
,
491 struct g_lock_get_state
*state
= (struct g_lock_get_state
*)priv
;
497 NTSTATUS
g_lock_get(struct g_lock_ctx
*ctx
, const char *name
,
498 struct server_id
*pid
)
500 struct g_lock_get_state state
;
506 status
= g_lock_dump(ctx
, name
, g_lock_get_fn
, &state
);
507 if (!NT_STATUS_IS_OK(status
)) {
511 return NT_STATUS_NOT_FOUND
;
516 static bool g_lock_init_all(TALLOC_CTX
*mem_ctx
,
517 struct tevent_context
**pev
,
518 struct messaging_context
**pmsg
,
519 struct g_lock_ctx
**pg_ctx
)
521 struct tevent_context
*ev
= NULL
;
522 struct messaging_context
*msg
= NULL
;
523 struct g_lock_ctx
*g_ctx
= NULL
;
525 ev
= samba_tevent_context_init(mem_ctx
);
527 d_fprintf(stderr
, "ERROR: could not init event context\n");
530 msg
= messaging_init(mem_ctx
, ev
);
532 d_fprintf(stderr
, "ERROR: could not init messaging context\n");
535 g_ctx
= g_lock_ctx_init(mem_ctx
, msg
);
537 d_fprintf(stderr
, "ERROR: could not init g_lock context\n");
552 NTSTATUS
g_lock_do(const char *name
, enum g_lock_type lock_type
,
553 struct timeval timeout
,
554 void (*fn
)(void *private_data
), void *private_data
)
556 struct tevent_context
*ev
= NULL
;
557 struct messaging_context
*msg
= NULL
;
558 struct g_lock_ctx
*g_ctx
= NULL
;
561 if (!g_lock_init_all(talloc_tos(), &ev
, &msg
, &g_ctx
)) {
562 status
= NT_STATUS_ACCESS_DENIED
;
566 status
= g_lock_lock(g_ctx
, name
, lock_type
, timeout
);
567 if (!NT_STATUS_IS_OK(status
)) {
571 g_lock_unlock(g_ctx
, name
);