2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/filesys.h"
22 #include "lib/util/server_id.h"
23 #include "dbwrap/dbwrap.h"
24 #include "dbwrap/dbwrap_open.h"
25 #include "dbwrap/dbwrap_watch.h"
28 #include "../lib/util/tevent_ntstatus.h"
33 struct db_context
*db
;
34 struct messaging_context
*msg
;
38 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
39 * lockname. The record contains an array of "struct g_lock_rec"
43 #define G_LOCK_REC_LENGTH (SERVER_ID_BUF_LENGTH+1)
45 static void g_lock_rec_put(uint8_t buf
[G_LOCK_REC_LENGTH
],
46 const struct g_lock_rec rec
)
48 SCVAL(buf
, 0, rec
.lock_type
);
49 server_id_put(buf
+1, rec
.pid
);
52 static void g_lock_rec_get(struct g_lock_rec
*rec
,
53 const uint8_t buf
[G_LOCK_REC_LENGTH
])
55 rec
->lock_type
= CVAL(buf
, 0);
56 server_id_get(&rec
->pid
, buf
+1);
59 static ssize_t
g_lock_put(uint8_t *buf
, size_t buflen
,
60 const struct g_lock_rec
*locks
,
62 const uint8_t *data
, size_t datalen
)
66 if (num_locks
> UINT32_MAX
/G_LOCK_REC_LENGTH
) {
70 len
= num_locks
* G_LOCK_REC_LENGTH
;
72 len
+= sizeof(uint32_t);
73 if (len
< sizeof(uint32_t)) {
87 SIVAL(buf
, ofs
, num_locks
);
88 ofs
+= sizeof(uint32_t);
90 for (i
=0; i
<num_locks
; i
++) {
91 g_lock_rec_put(buf
+ofs
, locks
[i
]);
92 ofs
+= G_LOCK_REC_LENGTH
;
95 if ((data
!= NULL
) && (datalen
!= 0)) {
96 memcpy(buf
+ofs
, data
, datalen
);
102 static ssize_t
g_lock_get(TDB_DATA recval
,
103 struct g_lock_rec
*locks
, size_t num_locks
,
104 uint8_t **data
, size_t *datalen
)
108 if (recval
.dsize
< sizeof(uint32_t)) {
109 /* Fresh or invalid record */
114 found_locks
= IVAL(recval
.dptr
, 0);
115 recval
.dptr
+= sizeof(uint32_t);
116 recval
.dsize
-= sizeof(uint32_t);
118 if (found_locks
> recval
.dsize
/G_LOCK_REC_LENGTH
) {
123 if (found_locks
<= num_locks
) {
126 for (i
=0; i
<found_locks
; i
++) {
127 g_lock_rec_get(&locks
[i
], recval
.dptr
);
128 recval
.dptr
+= G_LOCK_REC_LENGTH
;
129 recval
.dsize
-= G_LOCK_REC_LENGTH
;
133 * Not enough space passed in by the caller, don't
136 recval
.dptr
+= found_locks
* G_LOCK_REC_LENGTH
;
137 recval
.dsize
-= found_locks
* G_LOCK_REC_LENGTH
;
144 if (datalen
!= NULL
) {
145 *datalen
= recval
.dsize
;
150 static NTSTATUS
g_lock_get_talloc(TALLOC_CTX
*mem_ctx
, TDB_DATA recval
,
151 struct g_lock_rec
**plocks
,
153 uint8_t **data
, size_t *datalen
)
155 struct g_lock_rec
*locks
;
158 num_locks
= g_lock_get(recval
, NULL
, 0, NULL
, NULL
);
159 if (num_locks
== -1) {
160 return NT_STATUS_INTERNAL_DB_CORRUPTION
;
162 locks
= talloc_array(mem_ctx
, struct g_lock_rec
, num_locks
);
164 return NT_STATUS_NO_MEMORY
;
166 g_lock_get(recval
, locks
, num_locks
, data
, datalen
);
169 *pnum_locks
= num_locks
;
174 struct g_lock_ctx
*g_lock_ctx_init(TALLOC_CTX
*mem_ctx
,
175 struct messaging_context
*msg
)
177 struct g_lock_ctx
*result
;
178 struct db_context
*backend
;
181 result
= talloc(mem_ctx
, struct g_lock_ctx
);
182 if (result
== NULL
) {
187 db_path
= lock_path("g_lock.tdb");
188 if (db_path
== NULL
) {
193 backend
= db_open(result
, db_path
, 0,
194 TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
,
195 O_RDWR
|O_CREAT
, 0600,
198 TALLOC_FREE(db_path
);
199 if (backend
== NULL
) {
200 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb\n"));
205 result
->db
= db_open_watched(result
, backend
, msg
);
206 if (result
->db
== NULL
) {
207 DBG_WARNING("g_lock_init: db_open_watched failed\n");
214 static bool g_lock_conflicts(enum g_lock_type l1
, enum g_lock_type l2
)
217 * Only tested write locks so far. Very likely this routine
218 * needs to be fixed for read locks....
220 if ((l1
== G_LOCK_READ
) && (l2
== G_LOCK_READ
)) {
226 static NTSTATUS
g_lock_record_store(struct db_record
*rec
,
227 const struct g_lock_rec
*locks
,
229 const uint8_t *data
, size_t datalen
)
235 len
= g_lock_put(NULL
, 0, locks
, num_locks
, data
, datalen
);
237 return NT_STATUS_BUFFER_TOO_SMALL
;
240 buf
= talloc_array(rec
, uint8_t, len
);
242 return NT_STATUS_NO_MEMORY
;
245 g_lock_put(buf
, len
, locks
, num_locks
, data
, datalen
);
247 status
= dbwrap_record_store(
248 rec
, (TDB_DATA
) { .dptr
= buf
, .dsize
= len
}, 0);
255 static NTSTATUS
g_lock_trylock(struct db_record
*rec
, struct server_id self
,
256 enum g_lock_type type
,
257 struct server_id
*blocker
)
259 TDB_DATA data
, userdata
;
260 size_t i
, num_locks
, my_lock
;
261 struct g_lock_rec
*locks
, *tmp
;
263 bool modified
= false;
265 data
= dbwrap_record_get_value(rec
);
267 status
= g_lock_get_talloc(talloc_tos(), data
, &locks
, &num_locks
,
268 &userdata
.dptr
, &userdata
.dsize
);
269 if (!NT_STATUS_IS_OK(status
)) {
273 my_lock
= num_locks
; /* doesn't exist yet */
275 if ((type
== G_LOCK_READ
) && (num_locks
> 0)) {
277 * Read locks can stay around forever if the process
278 * dies. Do a heuristic check for process existence:
279 * Check one random process for existence. Hopefully
280 * this will keep runaway read locks under control.
282 i
= generate_random() % num_locks
;
284 if (!serverid_exists(&locks
[i
].pid
)) {
285 locks
[i
] = locks
[num_locks
-1];
291 for (i
=0; i
<num_locks
; i
++) {
292 struct g_lock_rec
*lock
= &locks
[i
];
294 if (serverid_equal(&self
, &lock
->pid
)) {
295 if (lock
->lock_type
== type
) {
296 status
= NT_STATUS_WAS_LOCKED
;
306 while (i
< num_locks
) {
312 if (g_lock_conflicts(type
, locks
[i
].lock_type
)) {
313 struct server_id pid
= locks
[i
].pid
;
316 * As the serverid_exists might recurse into
317 * the g_lock code, we use
318 * SERVERID_UNIQUE_ID_NOT_TO_VERIFY to avoid the loop
320 pid
.unique_id
= SERVERID_UNIQUE_ID_NOT_TO_VERIFY
;
322 if (serverid_exists(&pid
)) {
323 status
= NT_STATUS_LOCK_NOT_GRANTED
;
324 *blocker
= locks
[i
].pid
;
329 * Delete stale conflicting entry
331 locks
[i
] = locks
[num_locks
-1];
339 if (my_lock
>= num_locks
) {
340 tmp
= talloc_realloc(talloc_tos(), locks
, struct g_lock_rec
,
343 status
= NT_STATUS_NO_MEMORY
;
351 locks
[my_lock
] = (struct g_lock_rec
){ .pid
= self
, .lock_type
= type
};
354 status
= NT_STATUS_OK
;
357 NTSTATUS store_status
;
358 store_status
= g_lock_record_store(
359 rec
, locks
, num_locks
, userdata
.dptr
, userdata
.dsize
);
360 if (!NT_STATUS_IS_OK(store_status
)) {
361 DBG_WARNING("g_lock_record_store failed: %s\n",
362 nt_errstr(store_status
));
363 status
= store_status
;
370 struct g_lock_lock_state
{
371 struct tevent_context
*ev
;
372 struct g_lock_ctx
*ctx
;
374 enum g_lock_type type
;
377 static void g_lock_lock_retry(struct tevent_req
*subreq
);
379 struct tevent_req
*g_lock_lock_send(TALLOC_CTX
*mem_ctx
,
380 struct tevent_context
*ev
,
381 struct g_lock_ctx
*ctx
,
383 enum g_lock_type type
)
385 struct tevent_req
*req
, *subreq
;
386 struct g_lock_lock_state
*state
;
387 struct db_record
*rec
;
388 struct server_id self
, blocker
;
391 req
= tevent_req_create(mem_ctx
, &state
, struct g_lock_lock_state
);
400 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
401 string_term_tdb_data(state
->name
));
403 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
404 tevent_req_nterror(req
, NT_STATUS_LOCK_NOT_GRANTED
);
405 return tevent_req_post(req
, ev
);
408 self
= messaging_server_id(state
->ctx
->msg
);
410 status
= g_lock_trylock(rec
, self
, state
->type
, &blocker
);
411 if (NT_STATUS_IS_OK(status
)) {
413 tevent_req_done(req
);
414 return tevent_req_post(req
, ev
);
416 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
418 tevent_req_nterror(req
, status
);
419 return tevent_req_post(req
, ev
);
421 subreq
= dbwrap_watched_watch_send(state
, state
->ev
, rec
, blocker
);
423 if (tevent_req_nomem(subreq
, req
)) {
424 return tevent_req_post(req
, ev
);
426 if (!tevent_req_set_endtime(
428 timeval_current_ofs(5 + sys_random() % 5, 0))) {
430 return tevent_req_post(req
, ev
);
432 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
436 static void g_lock_lock_retry(struct tevent_req
*subreq
)
438 struct tevent_req
*req
= tevent_req_callback_data(
439 subreq
, struct tevent_req
);
440 struct g_lock_lock_state
*state
= tevent_req_data(
441 req
, struct g_lock_lock_state
);
442 struct server_id self
= messaging_server_id(state
->ctx
->msg
);
443 struct server_id blocker
;
444 struct db_record
*rec
;
447 status
= dbwrap_watched_watch_recv(subreq
, talloc_tos(), &rec
, NULL
,
451 if (NT_STATUS_EQUAL(status
, NT_STATUS_IO_TIMEOUT
)) {
452 rec
= dbwrap_fetch_locked(
453 state
->ctx
->db
, talloc_tos(),
454 string_term_tdb_data(state
->name
));
456 status
= map_nt_error_from_unix(errno
);
458 status
= NT_STATUS_OK
;
462 if (tevent_req_nterror(req
, status
)) {
465 status
= g_lock_trylock(rec
, self
, state
->type
, &blocker
);
466 if (NT_STATUS_IS_OK(status
)) {
468 tevent_req_done(req
);
471 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
473 tevent_req_nterror(req
, status
);
476 subreq
= dbwrap_watched_watch_send(state
, state
->ev
, rec
, blocker
);
478 if (tevent_req_nomem(subreq
, req
)) {
481 if (!tevent_req_set_endtime(
483 timeval_current_ofs(5 + sys_random() % 5, 0))) {
487 tevent_req_set_callback(subreq
, g_lock_lock_retry
, req
);
492 NTSTATUS
g_lock_lock_recv(struct tevent_req
*req
)
494 return tevent_req_simple_recv_ntstatus(req
);
497 NTSTATUS
g_lock_lock(struct g_lock_ctx
*ctx
, const char *name
,
498 enum g_lock_type type
, struct timeval timeout
)
500 TALLOC_CTX
*frame
= talloc_stackframe();
501 struct tevent_context
*ev
;
502 struct tevent_req
*req
;
504 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
506 ev
= samba_tevent_context_init(frame
);
510 req
= g_lock_lock_send(frame
, ev
, ctx
, name
, type
);
514 end
= timeval_current_ofs(timeout
.tv_sec
, timeout
.tv_usec
);
515 if (!tevent_req_set_endtime(req
, ev
, end
)) {
518 if (!tevent_req_poll_ntstatus(req
, ev
, &status
)) {
521 status
= g_lock_lock_recv(req
);
527 NTSTATUS
g_lock_unlock(struct g_lock_ctx
*ctx
, const char *name
)
529 struct server_id self
= messaging_server_id(ctx
->msg
);
530 struct db_record
*rec
= NULL
;
531 struct g_lock_rec
*locks
= NULL
;
534 TDB_DATA value
, userdata
;
536 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
537 string_term_tdb_data(name
));
539 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
540 status
= NT_STATUS_INTERNAL_ERROR
;
544 value
= dbwrap_record_get_value(rec
);
546 status
= g_lock_get_talloc(talloc_tos(), value
, &locks
, &num_locks
,
547 &userdata
.dptr
, &userdata
.dsize
);
548 if (!NT_STATUS_IS_OK(status
)) {
549 DBG_DEBUG("g_lock_get for %s failed: %s\n", name
,
551 status
= NT_STATUS_FILE_INVALID
;
554 for (i
=0; i
<num_locks
; i
++) {
555 if (serverid_equal(&self
, &locks
[i
].pid
)) {
559 if (i
== num_locks
) {
560 DBG_DEBUG("Lock not found, num_locks=%zu\n", num_locks
);
561 status
= NT_STATUS_NOT_FOUND
;
565 locks
[i
] = locks
[num_locks
-1];
568 if ((num_locks
== 0) && (userdata
.dsize
== 0)) {
569 status
= dbwrap_record_delete(rec
);
571 status
= g_lock_record_store(
572 rec
, locks
, num_locks
, userdata
.dptr
, userdata
.dsize
);
574 if (!NT_STATUS_IS_OK(status
)) {
575 DBG_WARNING("Could not store record: %s\n", nt_errstr(status
));
579 status
= NT_STATUS_OK
;
586 NTSTATUS
g_lock_write_data(struct g_lock_ctx
*ctx
, const char *name
,
587 const uint8_t *buf
, size_t buflen
)
589 struct server_id self
= messaging_server_id(ctx
->msg
);
590 struct db_record
*rec
= NULL
;
591 struct g_lock_rec
*locks
= NULL
;
596 rec
= dbwrap_fetch_locked(ctx
->db
, talloc_tos(),
597 string_term_tdb_data(name
));
599 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name
));
600 status
= NT_STATUS_INTERNAL_ERROR
;
604 value
= dbwrap_record_get_value(rec
);
606 status
= g_lock_get_talloc(talloc_tos(), value
, &locks
, &num_locks
,
608 if (!NT_STATUS_IS_OK(status
)) {
609 DBG_DEBUG("g_lock_get for %s failed: %s\n", name
,
611 status
= NT_STATUS_FILE_INVALID
;
615 for (i
=0; i
<num_locks
; i
++) {
616 if (server_id_equal(&self
, &locks
[i
].pid
) &&
617 (locks
[i
].lock_type
== G_LOCK_WRITE
)) {
621 if (i
== num_locks
) {
622 DBG_DEBUG("Not locked by us\n");
623 status
= NT_STATUS_NOT_LOCKED
;
627 status
= g_lock_record_store(rec
, locks
, num_locks
, buf
, buflen
);
635 struct g_lock_locks_state
{
636 int (*fn
)(const char *name
, void *private_data
);
640 static int g_lock_locks_fn(struct db_record
*rec
, void *priv
)
643 struct g_lock_locks_state
*state
= (struct g_lock_locks_state
*)priv
;
645 key
= dbwrap_record_get_key(rec
);
646 if ((key
.dsize
== 0) || (key
.dptr
[key
.dsize
-1] != 0)) {
647 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
650 return state
->fn((char *)key
.dptr
, state
->private_data
);
653 int g_lock_locks(struct g_lock_ctx
*ctx
,
654 int (*fn
)(const char *name
, void *private_data
),
657 struct g_lock_locks_state state
;
662 state
.private_data
= private_data
;
664 status
= dbwrap_traverse_read(ctx
->db
, g_lock_locks_fn
, &state
, &count
);
665 if (!NT_STATUS_IS_OK(status
)) {
671 NTSTATUS
g_lock_dump(struct g_lock_ctx
*ctx
, const char *name
,
672 void (*fn
)(const struct g_lock_rec
*locks
,
681 struct g_lock_rec
*locks
= NULL
;
686 status
= dbwrap_fetch_bystring(ctx
->db
, talloc_tos(), name
, &data
);
687 if (!NT_STATUS_IS_OK(status
)) {
691 if ((data
.dsize
== 0) || (data
.dptr
== NULL
)) {
695 status
= g_lock_get_talloc(talloc_tos(), data
, &locks
, &num_locks
,
696 &userdata
, &userdatalen
);
698 if (!NT_STATUS_IS_OK(status
)) {
699 DBG_DEBUG("g_lock_get for %s failed: %s\n", name
,
701 TALLOC_FREE(data
.dptr
);
702 return NT_STATUS_INTERNAL_ERROR
;
705 fn(locks
, num_locks
, userdata
, userdatalen
, private_data
);
708 TALLOC_FREE(data
.dptr
);
712 static bool g_lock_init_all(TALLOC_CTX
*mem_ctx
,
713 struct tevent_context
**pev
,
714 struct messaging_context
**pmsg
,
715 struct g_lock_ctx
**pg_ctx
)
717 struct tevent_context
*ev
= NULL
;
718 struct messaging_context
*msg
= NULL
;
719 struct g_lock_ctx
*g_ctx
= NULL
;
721 ev
= samba_tevent_context_init(mem_ctx
);
723 d_fprintf(stderr
, "ERROR: could not init event context\n");
726 msg
= messaging_init(mem_ctx
, ev
);
728 d_fprintf(stderr
, "ERROR: could not init messaging context\n");
731 g_ctx
= g_lock_ctx_init(mem_ctx
, msg
);
733 d_fprintf(stderr
, "ERROR: could not init g_lock context\n");
748 NTSTATUS
g_lock_do(const char *name
, enum g_lock_type lock_type
,
749 struct timeval timeout
,
750 void (*fn
)(void *private_data
), void *private_data
)
752 struct tevent_context
*ev
= NULL
;
753 struct messaging_context
*msg
= NULL
;
754 struct g_lock_ctx
*g_ctx
= NULL
;
757 if (!g_lock_init_all(talloc_tos(), &ev
, &msg
, &g_ctx
)) {
758 status
= NT_STATUS_ACCESS_DENIED
;
762 status
= g_lock_lock(g_ctx
, name
, lock_type
, timeout
);
763 if (!NT_STATUS_IS_OK(status
)) {
767 g_lock_unlock(g_ctx
, name
);