1949 crash during reguid causes stale config
[unleashed.git] / usr / src / uts / common / fs / zfs / txg.c
blob55b1f3884bf3caa459db1c8c7629251d15bb8422
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
26 #include <sys/zfs_context.h>
27 #include <sys/txg_impl.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dsl_pool.h>
31 #include <sys/dsl_scan.h>
32 #include <sys/callb.h>
35 * Pool-wide transaction groups.
38 static void txg_sync_thread(dsl_pool_t *dp);
39 static void txg_quiesce_thread(dsl_pool_t *dp);
41 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
44 * Prepare the txg subsystem.
46 void
47 txg_init(dsl_pool_t *dp, uint64_t txg)
49 tx_state_t *tx = &dp->dp_tx;
50 int c;
51 bzero(tx, sizeof (tx_state_t));
53 tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
55 for (c = 0; c < max_ncpus; c++) {
56 int i;
58 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
59 for (i = 0; i < TXG_SIZE; i++) {
60 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
61 NULL);
62 list_create(&tx->tx_cpu[c].tc_callbacks[i],
63 sizeof (dmu_tx_callback_t),
64 offsetof(dmu_tx_callback_t, dcb_node));
68 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
70 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
71 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
72 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
73 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
74 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
76 tx->tx_open_txg = txg;
80 * Close down the txg subsystem.
82 void
83 txg_fini(dsl_pool_t *dp)
85 tx_state_t *tx = &dp->dp_tx;
86 int c;
88 ASSERT(tx->tx_threads == 0);
90 mutex_destroy(&tx->tx_sync_lock);
92 cv_destroy(&tx->tx_sync_more_cv);
93 cv_destroy(&tx->tx_sync_done_cv);
94 cv_destroy(&tx->tx_quiesce_more_cv);
95 cv_destroy(&tx->tx_quiesce_done_cv);
96 cv_destroy(&tx->tx_exit_cv);
98 for (c = 0; c < max_ncpus; c++) {
99 int i;
101 mutex_destroy(&tx->tx_cpu[c].tc_lock);
102 for (i = 0; i < TXG_SIZE; i++) {
103 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
104 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
108 if (tx->tx_commit_cb_taskq != NULL)
109 taskq_destroy(tx->tx_commit_cb_taskq);
111 kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
113 bzero(tx, sizeof (tx_state_t));
117 * Start syncing transaction groups.
119 void
120 txg_sync_start(dsl_pool_t *dp)
122 tx_state_t *tx = &dp->dp_tx;
124 mutex_enter(&tx->tx_sync_lock);
126 dprintf("pool %p\n", dp);
128 ASSERT(tx->tx_threads == 0);
130 tx->tx_threads = 2;
132 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
133 dp, 0, &p0, TS_RUN, minclsyspri);
136 * The sync thread can need a larger-than-default stack size on
137 * 32-bit x86. This is due in part to nested pools and
138 * scrub_visitbp() recursion.
140 tx->tx_sync_thread = thread_create(NULL, 32<<10, txg_sync_thread,
141 dp, 0, &p0, TS_RUN, minclsyspri);
143 mutex_exit(&tx->tx_sync_lock);
146 static void
147 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
149 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
150 mutex_enter(&tx->tx_sync_lock);
153 static void
154 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
156 ASSERT(*tpp != NULL);
157 *tpp = NULL;
158 tx->tx_threads--;
159 cv_broadcast(&tx->tx_exit_cv);
160 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
161 thread_exit();
164 static void
165 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
167 CALLB_CPR_SAFE_BEGIN(cpr);
169 if (time)
170 (void) cv_timedwait(cv, &tx->tx_sync_lock,
171 ddi_get_lbolt() + time);
172 else
173 cv_wait(cv, &tx->tx_sync_lock);
175 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
179 * Stop syncing transaction groups.
181 void
182 txg_sync_stop(dsl_pool_t *dp)
184 tx_state_t *tx = &dp->dp_tx;
186 dprintf("pool %p\n", dp);
188 * Finish off any work in progress.
190 ASSERT(tx->tx_threads == 2);
193 * We need to ensure that we've vacated the deferred space_maps.
195 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
198 * Wake all sync threads and wait for them to die.
200 mutex_enter(&tx->tx_sync_lock);
202 ASSERT(tx->tx_threads == 2);
204 tx->tx_exiting = 1;
206 cv_broadcast(&tx->tx_quiesce_more_cv);
207 cv_broadcast(&tx->tx_quiesce_done_cv);
208 cv_broadcast(&tx->tx_sync_more_cv);
210 while (tx->tx_threads != 0)
211 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
213 tx->tx_exiting = 0;
215 mutex_exit(&tx->tx_sync_lock);
218 uint64_t
219 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
221 tx_state_t *tx = &dp->dp_tx;
222 tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID];
223 uint64_t txg;
225 mutex_enter(&tc->tc_lock);
227 txg = tx->tx_open_txg;
228 tc->tc_count[txg & TXG_MASK]++;
230 th->th_cpu = tc;
231 th->th_txg = txg;
233 return (txg);
236 void
237 txg_rele_to_quiesce(txg_handle_t *th)
239 tx_cpu_t *tc = th->th_cpu;
241 mutex_exit(&tc->tc_lock);
244 void
245 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
247 tx_cpu_t *tc = th->th_cpu;
248 int g = th->th_txg & TXG_MASK;
250 mutex_enter(&tc->tc_lock);
251 list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
252 mutex_exit(&tc->tc_lock);
255 void
256 txg_rele_to_sync(txg_handle_t *th)
258 tx_cpu_t *tc = th->th_cpu;
259 int g = th->th_txg & TXG_MASK;
261 mutex_enter(&tc->tc_lock);
262 ASSERT(tc->tc_count[g] != 0);
263 if (--tc->tc_count[g] == 0)
264 cv_broadcast(&tc->tc_cv[g]);
265 mutex_exit(&tc->tc_lock);
267 th->th_cpu = NULL; /* defensive */
270 static void
271 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
273 tx_state_t *tx = &dp->dp_tx;
274 int g = txg & TXG_MASK;
275 int c;
278 * Grab all tx_cpu locks so nobody else can get into this txg.
280 for (c = 0; c < max_ncpus; c++)
281 mutex_enter(&tx->tx_cpu[c].tc_lock);
283 ASSERT(txg == tx->tx_open_txg);
284 tx->tx_open_txg++;
287 * Now that we've incremented tx_open_txg, we can let threads
288 * enter the next transaction group.
290 for (c = 0; c < max_ncpus; c++)
291 mutex_exit(&tx->tx_cpu[c].tc_lock);
294 * Quiesce the transaction group by waiting for everyone to txg_exit().
296 for (c = 0; c < max_ncpus; c++) {
297 tx_cpu_t *tc = &tx->tx_cpu[c];
298 mutex_enter(&tc->tc_lock);
299 while (tc->tc_count[g] != 0)
300 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
301 mutex_exit(&tc->tc_lock);
305 static void
306 txg_do_callbacks(list_t *cb_list)
308 dmu_tx_do_callbacks(cb_list, 0);
310 list_destroy(cb_list);
312 kmem_free(cb_list, sizeof (list_t));
316 * Dispatch the commit callbacks registered on this txg to worker threads.
318 static void
319 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
321 int c;
322 tx_state_t *tx = &dp->dp_tx;
323 list_t *cb_list;
325 for (c = 0; c < max_ncpus; c++) {
326 tx_cpu_t *tc = &tx->tx_cpu[c];
327 /* No need to lock tx_cpu_t at this point */
329 int g = txg & TXG_MASK;
331 if (list_is_empty(&tc->tc_callbacks[g]))
332 continue;
334 if (tx->tx_commit_cb_taskq == NULL) {
336 * Commit callback taskq hasn't been created yet.
338 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
339 max_ncpus, minclsyspri, max_ncpus, max_ncpus * 2,
340 TASKQ_PREPOPULATE);
343 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
344 list_create(cb_list, sizeof (dmu_tx_callback_t),
345 offsetof(dmu_tx_callback_t, dcb_node));
347 list_move_tail(&tc->tc_callbacks[g], cb_list);
349 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
350 txg_do_callbacks, cb_list, TQ_SLEEP);
354 static void
355 txg_sync_thread(dsl_pool_t *dp)
357 spa_t *spa = dp->dp_spa;
358 tx_state_t *tx = &dp->dp_tx;
359 callb_cpr_t cpr;
360 uint64_t start, delta;
362 txg_thread_enter(tx, &cpr);
364 start = delta = 0;
365 for (;;) {
366 uint64_t timer, timeout = zfs_txg_timeout * hz;
367 uint64_t txg;
370 * We sync when we're scanning, there's someone waiting
371 * on us, or the quiesce thread has handed off a txg to
372 * us, or we have reached our timeout.
374 timer = (delta >= timeout ? 0 : timeout - delta);
375 while (!dsl_scan_active(dp->dp_scan) &&
376 !tx->tx_exiting && timer > 0 &&
377 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
378 tx->tx_quiesced_txg == 0) {
379 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
380 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
381 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
382 delta = ddi_get_lbolt() - start;
383 timer = (delta > timeout ? 0 : timeout - delta);
387 * Wait until the quiesce thread hands off a txg to us,
388 * prompting it to do so if necessary.
390 while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
391 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
392 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
393 cv_broadcast(&tx->tx_quiesce_more_cv);
394 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
397 if (tx->tx_exiting)
398 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
401 * Consume the quiesced txg which has been handed off to
402 * us. This may cause the quiescing thread to now be
403 * able to quiesce another txg, so we must signal it.
405 txg = tx->tx_quiesced_txg;
406 tx->tx_quiesced_txg = 0;
407 tx->tx_syncing_txg = txg;
408 cv_broadcast(&tx->tx_quiesce_more_cv);
410 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
411 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
412 mutex_exit(&tx->tx_sync_lock);
414 start = ddi_get_lbolt();
415 spa_sync(spa, txg);
416 delta = ddi_get_lbolt() - start;
418 mutex_enter(&tx->tx_sync_lock);
419 tx->tx_synced_txg = txg;
420 tx->tx_syncing_txg = 0;
421 cv_broadcast(&tx->tx_sync_done_cv);
424 * Dispatch commit callbacks to worker threads.
426 txg_dispatch_callbacks(dp, txg);
430 static void
431 txg_quiesce_thread(dsl_pool_t *dp)
433 tx_state_t *tx = &dp->dp_tx;
434 callb_cpr_t cpr;
436 txg_thread_enter(tx, &cpr);
438 for (;;) {
439 uint64_t txg;
442 * We quiesce when there's someone waiting on us.
443 * However, we can only have one txg in "quiescing" or
444 * "quiesced, waiting to sync" state. So we wait until
445 * the "quiesced, waiting to sync" txg has been consumed
446 * by the sync thread.
448 while (!tx->tx_exiting &&
449 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
450 tx->tx_quiesced_txg != 0))
451 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
453 if (tx->tx_exiting)
454 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
456 txg = tx->tx_open_txg;
457 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
458 txg, tx->tx_quiesce_txg_waiting,
459 tx->tx_sync_txg_waiting);
460 mutex_exit(&tx->tx_sync_lock);
461 txg_quiesce(dp, txg);
462 mutex_enter(&tx->tx_sync_lock);
465 * Hand this txg off to the sync thread.
467 dprintf("quiesce done, handing off txg %llu\n", txg);
468 tx->tx_quiesced_txg = txg;
469 cv_broadcast(&tx->tx_sync_more_cv);
470 cv_broadcast(&tx->tx_quiesce_done_cv);
475 * Delay this thread by 'ticks' if we are still in the open transaction
476 * group and there is already a waiting txg quiesing or quiesced. Abort
477 * the delay if this txg stalls or enters the quiesing state.
479 void
480 txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
482 tx_state_t *tx = &dp->dp_tx;
483 clock_t timeout = ddi_get_lbolt() + ticks;
485 /* don't delay if this txg could transition to quiesing immediately */
486 if (tx->tx_open_txg > txg ||
487 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
488 return;
490 mutex_enter(&tx->tx_sync_lock);
491 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
492 mutex_exit(&tx->tx_sync_lock);
493 return;
496 while (ddi_get_lbolt() < timeout &&
497 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
498 (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock,
499 timeout);
501 mutex_exit(&tx->tx_sync_lock);
504 void
505 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
507 tx_state_t *tx = &dp->dp_tx;
509 mutex_enter(&tx->tx_sync_lock);
510 ASSERT(tx->tx_threads == 2);
511 if (txg == 0)
512 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
513 if (tx->tx_sync_txg_waiting < txg)
514 tx->tx_sync_txg_waiting = txg;
515 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
516 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
517 while (tx->tx_synced_txg < txg) {
518 dprintf("broadcasting sync more "
519 "tx_synced=%llu waiting=%llu dp=%p\n",
520 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
521 cv_broadcast(&tx->tx_sync_more_cv);
522 cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
524 mutex_exit(&tx->tx_sync_lock);
527 void
528 txg_wait_open(dsl_pool_t *dp, uint64_t txg)
530 tx_state_t *tx = &dp->dp_tx;
532 mutex_enter(&tx->tx_sync_lock);
533 ASSERT(tx->tx_threads == 2);
534 if (txg == 0)
535 txg = tx->tx_open_txg + 1;
536 if (tx->tx_quiesce_txg_waiting < txg)
537 tx->tx_quiesce_txg_waiting = txg;
538 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
539 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
540 while (tx->tx_open_txg < txg) {
541 cv_broadcast(&tx->tx_quiesce_more_cv);
542 cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
544 mutex_exit(&tx->tx_sync_lock);
547 boolean_t
548 txg_stalled(dsl_pool_t *dp)
550 tx_state_t *tx = &dp->dp_tx;
551 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
554 boolean_t
555 txg_sync_waiting(dsl_pool_t *dp)
557 tx_state_t *tx = &dp->dp_tx;
559 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
560 tx->tx_quiesced_txg != 0);
564 * Per-txg object lists.
566 void
567 txg_list_create(txg_list_t *tl, size_t offset)
569 int t;
571 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
573 tl->tl_offset = offset;
575 for (t = 0; t < TXG_SIZE; t++)
576 tl->tl_head[t] = NULL;
579 void
580 txg_list_destroy(txg_list_t *tl)
582 int t;
584 for (t = 0; t < TXG_SIZE; t++)
585 ASSERT(txg_list_empty(tl, t));
587 mutex_destroy(&tl->tl_lock);
591 txg_list_empty(txg_list_t *tl, uint64_t txg)
593 return (tl->tl_head[txg & TXG_MASK] == NULL);
597 * Add an entry to the list.
598 * Returns 0 if it's a new entry, 1 if it's already there.
601 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
603 int t = txg & TXG_MASK;
604 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
605 int already_on_list;
607 mutex_enter(&tl->tl_lock);
608 already_on_list = tn->tn_member[t];
609 if (!already_on_list) {
610 tn->tn_member[t] = 1;
611 tn->tn_next[t] = tl->tl_head[t];
612 tl->tl_head[t] = tn;
614 mutex_exit(&tl->tl_lock);
616 return (already_on_list);
620 * Add an entry to the end of the list (walks list to find end).
621 * Returns 0 if it's a new entry, 1 if it's already there.
624 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
626 int t = txg & TXG_MASK;
627 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
628 int already_on_list;
630 mutex_enter(&tl->tl_lock);
631 already_on_list = tn->tn_member[t];
632 if (!already_on_list) {
633 txg_node_t **tp;
635 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
636 continue;
638 tn->tn_member[t] = 1;
639 tn->tn_next[t] = NULL;
640 *tp = tn;
642 mutex_exit(&tl->tl_lock);
644 return (already_on_list);
648 * Remove the head of the list and return it.
650 void *
651 txg_list_remove(txg_list_t *tl, uint64_t txg)
653 int t = txg & TXG_MASK;
654 txg_node_t *tn;
655 void *p = NULL;
657 mutex_enter(&tl->tl_lock);
658 if ((tn = tl->tl_head[t]) != NULL) {
659 p = (char *)tn - tl->tl_offset;
660 tl->tl_head[t] = tn->tn_next[t];
661 tn->tn_next[t] = NULL;
662 tn->tn_member[t] = 0;
664 mutex_exit(&tl->tl_lock);
666 return (p);
670 * Remove a specific item from the list and return it.
672 void *
673 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
675 int t = txg & TXG_MASK;
676 txg_node_t *tn, **tp;
678 mutex_enter(&tl->tl_lock);
680 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
681 if ((char *)tn - tl->tl_offset == p) {
682 *tp = tn->tn_next[t];
683 tn->tn_next[t] = NULL;
684 tn->tn_member[t] = 0;
685 mutex_exit(&tl->tl_lock);
686 return (p);
690 mutex_exit(&tl->tl_lock);
692 return (NULL);
696 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
698 int t = txg & TXG_MASK;
699 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
701 return (tn->tn_member[t]);
705 * Walk a txg list -- only safe if you know it's not changing.
707 void *
708 txg_list_head(txg_list_t *tl, uint64_t txg)
710 int t = txg & TXG_MASK;
711 txg_node_t *tn = tl->tl_head[t];
713 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
716 void *
717 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
719 int t = txg & TXG_MASK;
720 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
722 tn = tn->tn_next[t];
724 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);