s3: Use cli_writeall instead of cli_write
[Samba.git] / source3 / lib / g_lock.c
blob184da9b4fdec8f7af6d054d794308619f892d722
1 /*
2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/filesys.h"
22 #include "g_lock.h"
23 #include "ctdbd_conn.h"
24 #include "../lib/util/select.h"
25 #include "system/select.h"
26 #include "messages.h"
28 static NTSTATUS g_lock_force_unlock(struct g_lock_ctx *ctx, const char *name,
29 struct server_id pid);
31 struct g_lock_ctx {
32 struct db_context *db;
33 struct messaging_context *msg;
37 * The "g_lock.tdb" file contains records, indexed by the 0-terminated
38 * lockname. The record contains an array of "struct g_lock_rec"
39 * structures. Waiters have the lock_type with G_LOCK_PENDING or'ed.
42 struct g_lock_rec {
43 enum g_lock_type lock_type;
44 struct server_id pid;
47 struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
48 struct messaging_context *msg)
50 struct g_lock_ctx *result;
52 result = talloc(mem_ctx, struct g_lock_ctx);
53 if (result == NULL) {
54 return NULL;
56 result->msg = msg;
58 result->db = db_open(result, lock_path("g_lock.tdb"), 0,
59 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH, O_RDWR|O_CREAT, 0700);
60 if (result->db == NULL) {
61 DEBUG(1, ("g_lock_init: Could not open g_lock.tdb"));
62 TALLOC_FREE(result);
63 return NULL;
65 return result;
68 static bool g_lock_conflicts(enum g_lock_type lock_type,
69 const struct g_lock_rec *rec)
71 enum g_lock_type rec_lock = rec->lock_type;
73 if ((rec_lock & G_LOCK_PENDING) != 0) {
74 return false;
78 * Only tested write locks so far. Very likely this routine
79 * needs to be fixed for read locks....
81 if ((lock_type == G_LOCK_READ) && (rec_lock == G_LOCK_READ)) {
82 return false;
84 return true;
87 static bool g_lock_parse(TALLOC_CTX *mem_ctx, TDB_DATA data,
88 int *pnum_locks, struct g_lock_rec **plocks)
90 int i, num_locks;
91 struct g_lock_rec *locks;
93 if ((data.dsize % sizeof(struct g_lock_rec)) != 0) {
94 DEBUG(1, ("invalid lock record length %d\n", (int)data.dsize));
95 return false;
98 num_locks = data.dsize / sizeof(struct g_lock_rec);
99 locks = talloc_array(mem_ctx, struct g_lock_rec, num_locks);
100 if (locks == NULL) {
101 DEBUG(1, ("talloc failed\n"));
102 return false;
105 memcpy(locks, data.dptr, data.dsize);
107 DEBUG(10, ("locks:\n"));
108 for (i=0; i<num_locks; i++) {
109 DEBUGADD(10, ("%s: %s %s\n",
110 procid_str(talloc_tos(), &locks[i].pid),
111 ((locks[i].lock_type & 1) == G_LOCK_READ) ?
112 "read" : "write",
113 (locks[i].lock_type & G_LOCK_PENDING) ?
114 "(pending)" : "(owner)"));
116 if (((locks[i].lock_type & G_LOCK_PENDING) == 0)
117 && !process_exists(locks[i].pid)) {
119 DEBUGADD(10, ("lock owner %s died -- discarding\n",
120 procid_str(talloc_tos(),
121 &locks[i].pid)));
123 if (i < (num_locks-1)) {
124 locks[i] = locks[num_locks-1];
126 num_locks -= 1;
130 *plocks = locks;
131 *pnum_locks = num_locks;
132 return true;
135 static void g_lock_cleanup(int *pnum_locks, struct g_lock_rec *locks)
137 int i, num_locks;
139 num_locks = *pnum_locks;
141 DEBUG(10, ("g_lock_cleanup: %d locks\n", num_locks));
143 for (i=0; i<num_locks; i++) {
144 if (process_exists(locks[i].pid)) {
145 continue;
147 DEBUGADD(10, ("%s does not exist -- discarding\n",
148 procid_str(talloc_tos(), &locks[i].pid)));
150 if (i < (num_locks-1)) {
151 locks[i] = locks[num_locks-1];
153 num_locks -= 1;
155 *pnum_locks = num_locks;
156 return;
159 static struct g_lock_rec *g_lock_addrec(TALLOC_CTX *mem_ctx,
160 struct g_lock_rec *locks,
161 int *pnum_locks,
162 const struct server_id pid,
163 enum g_lock_type lock_type)
165 struct g_lock_rec *result;
166 int num_locks = *pnum_locks;
168 result = talloc_realloc(mem_ctx, locks, struct g_lock_rec,
169 num_locks+1);
170 if (result == NULL) {
171 return NULL;
174 result[num_locks].pid = pid;
175 result[num_locks].lock_type = lock_type;
176 *pnum_locks += 1;
177 return result;
180 static void g_lock_got_retry(struct messaging_context *msg,
181 void *private_data,
182 uint32_t msg_type,
183 struct server_id server_id,
184 DATA_BLOB *data);
186 static NTSTATUS g_lock_trylock(struct g_lock_ctx *ctx, const char *name,
187 enum g_lock_type lock_type)
189 struct db_record *rec = NULL;
190 struct g_lock_rec *locks = NULL;
191 int i, num_locks;
192 struct server_id self;
193 int our_index;
194 TDB_DATA data;
195 NTSTATUS status = NT_STATUS_OK;
196 NTSTATUS store_status;
198 again:
199 rec = ctx->db->fetch_locked(ctx->db, talloc_tos(),
200 string_term_tdb_data(name));
201 if (rec == NULL) {
202 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name));
203 status = NT_STATUS_LOCK_NOT_GRANTED;
204 goto done;
207 if (!g_lock_parse(talloc_tos(), rec->value, &num_locks, &locks)) {
208 DEBUG(10, ("g_lock_parse for %s failed\n", name));
209 status = NT_STATUS_INTERNAL_ERROR;
210 goto done;
213 self = messaging_server_id(ctx->msg);
214 our_index = -1;
216 for (i=0; i<num_locks; i++) {
217 if (procid_equal(&self, &locks[i].pid)) {
218 if (our_index != -1) {
219 DEBUG(1, ("g_lock_trylock: Added ourself "
220 "twice!\n"));
221 status = NT_STATUS_INTERNAL_ERROR;
222 goto done;
224 if ((locks[i].lock_type & G_LOCK_PENDING) == 0) {
225 DEBUG(1, ("g_lock_trylock: Found ourself not "
226 "pending!\n"));
227 status = NT_STATUS_INTERNAL_ERROR;
228 goto done;
231 our_index = i;
233 /* never conflict with ourself */
234 continue;
236 if (g_lock_conflicts(lock_type, &locks[i])) {
237 struct server_id pid = locks[i].pid;
239 if (!process_exists(pid)) {
240 TALLOC_FREE(locks);
241 TALLOC_FREE(rec);
242 status = g_lock_force_unlock(ctx, name, pid);
243 if (!NT_STATUS_IS_OK(status)) {
244 DEBUG(1, ("Could not unlock dead lock "
245 "holder!\n"));
246 goto done;
248 goto again;
250 lock_type |= G_LOCK_PENDING;
254 if (our_index == -1) {
255 /* First round, add ourself */
257 locks = g_lock_addrec(talloc_tos(), locks, &num_locks,
258 self, lock_type);
259 if (locks == NULL) {
260 DEBUG(10, ("g_lock_addrec failed\n"));
261 status = NT_STATUS_NO_MEMORY;
262 goto done;
264 } else {
266 * Retry. We were pending last time. Overwrite the
267 * stored lock_type with what we calculated, we might
268 * have acquired the lock this time.
270 locks[our_index].lock_type = lock_type;
273 if (NT_STATUS_IS_OK(status) && ((lock_type & G_LOCK_PENDING) == 0)) {
275 * Walk through the list of locks, search for dead entries
277 g_lock_cleanup(&num_locks, locks);
280 data = make_tdb_data((uint8_t *)locks, num_locks * sizeof(*locks));
281 store_status = rec->store(rec, data, 0);
282 if (!NT_STATUS_IS_OK(store_status)) {
283 DEBUG(1, ("rec->store failed: %s\n",
284 nt_errstr(store_status)));
285 status = store_status;
288 done:
289 TALLOC_FREE(locks);
290 TALLOC_FREE(rec);
292 if (NT_STATUS_IS_OK(status) && (lock_type & G_LOCK_PENDING) != 0) {
293 return STATUS_PENDING;
296 return NT_STATUS_OK;
299 NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, const char *name,
300 enum g_lock_type lock_type, struct timeval timeout)
302 struct tevent_timer *te = NULL;
303 NTSTATUS status;
304 bool retry = false;
305 struct timeval timeout_end;
306 struct timeval time_now;
308 DEBUG(10, ("Trying to acquire lock %d for %s\n", (int)lock_type,
309 name));
311 if (lock_type & ~1) {
312 DEBUG(1, ("Got invalid lock type %d for %s\n",
313 (int)lock_type, name));
314 return NT_STATUS_INVALID_PARAMETER;
317 #ifdef CLUSTER_SUPPORT
318 if (lp_clustering()) {
319 status = ctdb_watch_us(messaging_ctdbd_connection());
320 if (!NT_STATUS_IS_OK(status)) {
321 DEBUG(10, ("could not register retry with ctdb: %s\n",
322 nt_errstr(status)));
323 goto done;
326 #endif
328 status = messaging_register(ctx->msg, &retry, MSG_DBWRAP_G_LOCK_RETRY,
329 g_lock_got_retry);
330 if (!NT_STATUS_IS_OK(status)) {
331 DEBUG(10, ("messaging_register failed: %s\n",
332 nt_errstr(status)));
333 return status;
336 time_now = timeval_current();
337 timeout_end = timeval_sum(&time_now, &timeout);
339 while (true) {
340 struct pollfd *pollfds;
341 int num_pollfds;
342 int saved_errno;
343 int ret;
344 struct timeval timeout_remaining, select_timeout;
346 status = g_lock_trylock(ctx, name, lock_type);
347 if (NT_STATUS_IS_OK(status)) {
348 DEBUG(10, ("Got lock %s\n", name));
349 break;
351 if (!NT_STATUS_EQUAL(status, STATUS_PENDING)) {
352 DEBUG(10, ("g_lock_trylock failed: %s\n",
353 nt_errstr(status)));
354 break;
357 DEBUG(10, ("g_lock_trylock: Did not get lock, waiting...\n"));
359 /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
360 * !!! HACK ALERT --- FIX ME !!!
361 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
362 * What we really want to do here is to react to
363 * MSG_DBWRAP_G_LOCK_RETRY messages that are either sent
364 * by a client doing g_lock_unlock or by ourselves when
365 * we receive a CTDB_SRVID_SAMBA_NOTIFY or
366 * CTDB_SRVID_RECONFIGURE message from ctdbd, i.e. when
367 * either a client holding a lock or a complete node
368 * has died.
370 * Doing this properly involves calling tevent_loop_once(),
371 * but doing this here with the main ctdbd messaging context
372 * creates a nested event loop when g_lock_lock() is called
373 * from the main event loop, e.g. in a tcon_and_X where the
374 * share_info.tdb needs to be initialized and is locked by
375 * another process, or when the remore registry is accessed
376 * for writing and some other process already holds a lock
377 * on the registry.tdb.
379 * So as a quick fix, we act a little coarsely here: we do
380 * a select on the ctdb connection fd and when it is readable
381 * or we get EINTR, then we retry without actually parsing
382 * any ctdb packages or dispatching messages. This means that
383 * we retry more often than intended by design, but this does
384 * not harm and it is unobtrusive. When we have finished,
385 * the main loop will pick up all the messages and ctdb
386 * packets. The only extra twist is that we cannot use timed
387 * events here but have to handcode a timeout.
391 * We allocate 2 entries here. One is needed anyway for
392 * sys_poll and in the clustering case we might have to add
393 * the ctdb fd. This avoids the realloc then.
395 pollfds = TALLOC_ARRAY(talloc_tos(), struct pollfd, 2);
396 if (pollfds == NULL) {
397 status = NT_STATUS_NO_MEMORY;
398 break;
400 num_pollfds = 0;
402 #ifdef CLUSTER_SUPPORT
403 if (lp_clustering()) {
404 struct ctdbd_connection *conn;
405 conn = messaging_ctdbd_connection();
407 pollfds[0].fd = ctdbd_conn_get_fd(conn);
408 pollfds[0].events = POLLIN|POLLHUP;
410 num_pollfds += 1;
412 #endif
414 time_now = timeval_current();
415 timeout_remaining = timeval_until(&time_now, &timeout_end);
416 select_timeout = timeval_set(60, 0);
418 select_timeout = timeval_min(&select_timeout,
419 &timeout_remaining);
421 ret = sys_poll(pollfds, num_pollfds,
422 timeval_to_msec(select_timeout));
425 * We're not *really interested in the actual flags. We just
426 * need to retry this whole thing.
428 saved_errno = errno;
429 TALLOC_FREE(pollfds);
430 errno = saved_errno;
432 if (ret == -1) {
433 if (errno != EINTR) {
434 DEBUG(1, ("error calling select: %s\n",
435 strerror(errno)));
436 status = NT_STATUS_INTERNAL_ERROR;
437 break;
440 * errno == EINTR:
441 * This means a signal was received.
442 * It might have been a MSG_DBWRAP_G_LOCK_RETRY message.
443 * ==> retry
445 } else if (ret == 0) {
446 if (timeval_expired(&timeout_end)) {
447 DEBUG(10, ("g_lock_lock timed out\n"));
448 status = NT_STATUS_LOCK_NOT_GRANTED;
449 break;
450 } else {
451 DEBUG(10, ("select returned 0 but timeout not "
452 "not expired, retrying\n"));
454 } else if (ret != 1) {
455 DEBUG(1, ("invalid return code of select: %d\n", ret));
456 status = NT_STATUS_INTERNAL_ERROR;
457 break;
460 * ret == 1:
461 * This means ctdbd has sent us some data.
462 * Might be a CTDB_SRVID_RECONFIGURE or a
463 * CTDB_SRVID_SAMBA_NOTIFY message.
464 * ==> retry
468 #ifdef CLUSTER_SUPPORT
469 done:
470 #endif
472 if (!NT_STATUS_IS_OK(status)) {
473 NTSTATUS unlock_status;
475 unlock_status = g_lock_unlock(ctx, name);
477 if (!NT_STATUS_IS_OK(unlock_status)) {
478 DEBUG(1, ("Could not remove ourself from the locking "
479 "db: %s\n", nt_errstr(status)));
483 messaging_deregister(ctx->msg, MSG_DBWRAP_G_LOCK_RETRY, &retry);
484 TALLOC_FREE(te);
486 return status;
489 static void g_lock_got_retry(struct messaging_context *msg,
490 void *private_data,
491 uint32_t msg_type,
492 struct server_id server_id,
493 DATA_BLOB *data)
495 bool *pretry = (bool *)private_data;
497 DEBUG(10, ("Got retry message from pid %s\n",
498 procid_str(talloc_tos(), &server_id)));
500 *pretry = true;
503 static NTSTATUS g_lock_force_unlock(struct g_lock_ctx *ctx, const char *name,
504 struct server_id pid)
506 struct db_record *rec = NULL;
507 struct g_lock_rec *locks = NULL;
508 int i, num_locks;
509 enum g_lock_type lock_type;
510 NTSTATUS status;
512 rec = ctx->db->fetch_locked(ctx->db, talloc_tos(),
513 string_term_tdb_data(name));
514 if (rec == NULL) {
515 DEBUG(10, ("fetch_locked(\"%s\") failed\n", name));
516 status = NT_STATUS_INTERNAL_ERROR;
517 goto done;
520 if (!g_lock_parse(talloc_tos(), rec->value, &num_locks, &locks)) {
521 DEBUG(10, ("g_lock_parse for %s failed\n", name));
522 status = NT_STATUS_INTERNAL_ERROR;
523 goto done;
526 for (i=0; i<num_locks; i++) {
527 if (procid_equal(&pid, &locks[i].pid)) {
528 break;
532 if (i == num_locks) {
533 DEBUG(10, ("g_lock_force_unlock: Lock not found\n"));
534 status = NT_STATUS_INTERNAL_ERROR;
535 goto done;
538 lock_type = locks[i].lock_type;
540 if (i < (num_locks-1)) {
541 locks[i] = locks[num_locks-1];
543 num_locks -= 1;
545 if (num_locks == 0) {
546 status = rec->delete_rec(rec);
547 } else {
548 TDB_DATA data;
549 data = make_tdb_data((uint8_t *)locks,
550 sizeof(struct g_lock_rec) * num_locks);
551 status = rec->store(rec, data, 0);
554 if (!NT_STATUS_IS_OK(status)) {
555 DEBUG(1, ("g_lock_force_unlock: Could not store record: %s\n",
556 nt_errstr(status)));
557 goto done;
560 TALLOC_FREE(rec);
562 if ((lock_type & G_LOCK_PENDING) == 0) {
563 int num_wakeups = 0;
566 * We've been the lock holder. Others to retry. Don't
567 * tell all others to avoid a thundering herd. In case
568 * this leads to a complete stall because we miss some
569 * processes, the loop in g_lock_lock tries at least
570 * once a minute.
573 for (i=0; i<num_locks; i++) {
574 if ((locks[i].lock_type & G_LOCK_PENDING) == 0) {
575 continue;
577 if (!process_exists(locks[i].pid)) {
578 continue;
582 * Ping all waiters to retry
584 status = messaging_send(ctx->msg, locks[i].pid,
585 MSG_DBWRAP_G_LOCK_RETRY,
586 &data_blob_null);
587 if (!NT_STATUS_IS_OK(status)) {
588 DEBUG(1, ("sending retry to %s failed: %s\n",
589 procid_str(talloc_tos(),
590 &locks[i].pid),
591 nt_errstr(status)));
592 } else {
593 num_wakeups += 1;
595 if (num_wakeups > 5) {
596 break;
600 done:
602 * For the error path, TALLOC_FREE(rec) as well. In the good
603 * path we have already freed it.
605 TALLOC_FREE(rec);
607 TALLOC_FREE(locks);
608 return status;
611 NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, const char *name)
613 NTSTATUS status;
615 status = g_lock_force_unlock(ctx, name, messaging_server_id(ctx->msg));
617 #ifdef CLUSTER_SUPPORT
618 if (lp_clustering()) {
619 ctdb_unwatch(messaging_ctdbd_connection());
621 #endif
622 return status;
625 struct g_lock_locks_state {
626 int (*fn)(const char *name, void *private_data);
627 void *private_data;
630 static int g_lock_locks_fn(struct db_record *rec, void *priv)
632 struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
634 if ((rec->key.dsize == 0) || (rec->key.dptr[rec->key.dsize-1] != 0)) {
635 DEBUG(1, ("invalid key in g_lock.tdb, ignoring\n"));
636 return 0;
638 return state->fn((char *)rec->key.dptr, state->private_data);
641 int g_lock_locks(struct g_lock_ctx *ctx,
642 int (*fn)(const char *name, void *private_data),
643 void *private_data)
645 struct g_lock_locks_state state;
647 state.fn = fn;
648 state.private_data = private_data;
650 return ctx->db->traverse_read(ctx->db, g_lock_locks_fn, &state);
653 NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, const char *name,
654 int (*fn)(struct server_id pid,
655 enum g_lock_type lock_type,
656 void *private_data),
657 void *private_data)
659 TDB_DATA data;
660 int i, num_locks;
661 struct g_lock_rec *locks = NULL;
662 bool ret;
664 if (ctx->db->fetch(ctx->db, talloc_tos(), string_term_tdb_data(name),
665 &data) != 0) {
666 return NT_STATUS_NOT_FOUND;
669 if ((data.dsize == 0) || (data.dptr == NULL)) {
670 return NT_STATUS_OK;
673 ret = g_lock_parse(talloc_tos(), data, &num_locks, &locks);
675 TALLOC_FREE(data.dptr);
677 if (!ret) {
678 DEBUG(10, ("g_lock_parse for %s failed\n", name));
679 return NT_STATUS_INTERNAL_ERROR;
682 for (i=0; i<num_locks; i++) {
683 if (fn(locks[i].pid, locks[i].lock_type, private_data) != 0) {
684 break;
687 TALLOC_FREE(locks);
688 return NT_STATUS_OK;
691 struct g_lock_get_state {
692 bool found;
693 struct server_id *pid;
696 static int g_lock_get_fn(struct server_id pid, enum g_lock_type lock_type,
697 void *priv)
699 struct g_lock_get_state *state = (struct g_lock_get_state *)priv;
701 if ((lock_type & G_LOCK_PENDING) != 0) {
702 return 0;
705 state->found = true;
706 *state->pid = pid;
707 return 1;
710 NTSTATUS g_lock_get(struct g_lock_ctx *ctx, const char *name,
711 struct server_id *pid)
713 struct g_lock_get_state state;
714 NTSTATUS status;
716 state.found = false;
717 state.pid = pid;
719 status = g_lock_dump(ctx, name, g_lock_get_fn, &state);
720 if (!NT_STATUS_IS_OK(status)) {
721 return status;
723 if (!state.found) {
724 return NT_STATUS_NOT_FOUND;
726 return NT_STATUS_OK;
729 static bool g_lock_init_all(TALLOC_CTX *mem_ctx,
730 struct tevent_context **pev,
731 struct messaging_context **pmsg,
732 const struct server_id self,
733 struct g_lock_ctx **pg_ctx)
735 struct tevent_context *ev = NULL;
736 struct messaging_context *msg = NULL;
737 struct g_lock_ctx *g_ctx = NULL;
739 ev = tevent_context_init(mem_ctx);
740 if (ev == NULL) {
741 d_fprintf(stderr, "ERROR: could not init event context\n");
742 goto fail;
744 msg = messaging_init(mem_ctx, self, ev);
745 if (msg == NULL) {
746 d_fprintf(stderr, "ERROR: could not init messaging context\n");
747 goto fail;
749 g_ctx = g_lock_ctx_init(mem_ctx, msg);
750 if (g_ctx == NULL) {
751 d_fprintf(stderr, "ERROR: could not init g_lock context\n");
752 goto fail;
755 *pev = ev;
756 *pmsg = msg;
757 *pg_ctx = g_ctx;
758 return true;
759 fail:
760 TALLOC_FREE(g_ctx);
761 TALLOC_FREE(msg);
762 TALLOC_FREE(ev);
763 return false;
766 NTSTATUS g_lock_do(const char *name, enum g_lock_type lock_type,
767 struct timeval timeout, const struct server_id self,
768 void (*fn)(void *private_data), void *private_data)
770 struct tevent_context *ev = NULL;
771 struct messaging_context *msg = NULL;
772 struct g_lock_ctx *g_ctx = NULL;
773 NTSTATUS status;
775 if (!g_lock_init_all(talloc_tos(), &ev, &msg, self, &g_ctx)) {
776 status = NT_STATUS_ACCESS_DENIED;
777 goto done;
780 status = g_lock_lock(g_ctx, name, lock_type, timeout);
781 if (!NT_STATUS_IS_OK(status)) {
782 goto done;
784 fn(private_data);
785 g_lock_unlock(g_ctx, name);
787 done:
788 TALLOC_FREE(g_ctx);
789 TALLOC_FREE(msg);
790 TALLOC_FREE(ev);
791 return status;