stop shipping useless ksh93 builtins into /usr/bin
[unleashed.git] / kernel / fs / ufs / lufs_debug.c
blob07c2bf8a5009d1e13d53f1048ca5c126ca308cd8
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
21 #pragma ident "%Z%%M% %I% %E% SMI"
24 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
25 * Use is subject to license terms.
28 #include <sys/systm.h>
29 #include <sys/types.h>
30 #include <sys/vnode.h>
31 #include <sys/buf.h>
32 #include <sys/ddi.h>
33 #include <sys/errno.h>
34 #include <sys/sysmacros.h>
35 #include <sys/debug.h>
36 #include <sys/kmem.h>
37 #include <sys/conf.h>
38 #include <sys/proc.h>
39 #include <sys/cmn_err.h>
40 #include <sys/fs/ufs_inode.h>
41 #include <sys/fs/ufs_filio.h>
42 #include <sys/fs/ufs_log.h>
45 #ifdef DEBUG
48 * DEBUG ROUTINES
49 * THESE ROUTINES ARE ONLY USED WHEN ASSERTS ARE ENABLED
52 static kmutex_t toptracelock;
53 static int toptraceindex;
54 int toptracemax = 1024; /* global so it can be set */
55 struct toptrace {
56 enum delta_type dtyp;
57 kthread_t *thread;
58 dev_t dev;
59 long arg2;
60 long arg3;
61 long long arg1;
62 } *toptrace;
64 static void
65 top_trace(enum delta_type dtyp, dev_t dev, long long arg1, long arg2, long arg3)
67 if (toptrace == NULL) {
68 toptraceindex = 0;
69 toptrace = kmem_zalloc((size_t)
70 (sizeof (struct toptrace) * toptracemax), KM_SLEEP);
72 mutex_enter(&toptracelock);
73 toptrace[toptraceindex].dtyp = dtyp;
74 toptrace[toptraceindex].thread = curthread;
75 toptrace[toptraceindex].dev = dev;
76 toptrace[toptraceindex].arg1 = arg1;
77 toptrace[toptraceindex].arg2 = arg2;
78 toptrace[toptraceindex].arg3 = arg3;
79 if (++toptraceindex == toptracemax)
80 toptraceindex = 0;
81 else {
82 toptrace[toptraceindex].dtyp = (enum delta_type)-1;
83 toptrace[toptraceindex].thread = (kthread_t *)-1;
84 toptrace[toptraceindex].dev = (dev_t)-1;
85 toptrace[toptraceindex].arg1 = -1;
86 toptrace[toptraceindex].arg2 = -1;
89 mutex_exit(&toptracelock);
93 * add a range into the metadata map
95 void
96 top_mataadd(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb)
98 ml_unit_t *ul = ufsvfsp->vfs_log;
100 ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
101 deltamap_add(ul->un_matamap, mof, nb, 0, 0, 0, NULL);
105 * delete a range from the metadata map
107 void
108 top_matadel(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb)
110 ml_unit_t *ul = ufsvfsp->vfs_log;
112 ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
113 ASSERT(!matamap_overlap(ul->un_deltamap, mof, nb));
114 deltamap_del(ul->un_matamap, mof, nb);
118 * clear the entries from the metadata map
120 void
121 top_mataclr(ufsvfs_t *ufsvfsp)
123 ml_unit_t *ul = ufsvfsp->vfs_log;
125 ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
126 map_free_entries(ul->un_matamap);
127 map_free_entries(ul->un_deltamap);
131 top_begin_debug(ml_unit_t *ul, top_t topid, ulong_t size)
133 threadtrans_t *tp;
135 if (ul->un_debug & MT_TRACE)
136 top_trace(DT_BOT, ul->un_dev,
137 (long long)topid, (long)size, (long)0);
139 ASSERT(curthread->t_flag & T_DONTBLOCK);
141 tp = tsd_get(topkey);
142 if (tp == NULL) {
143 tp = kmem_zalloc(sizeof (threadtrans_t), KM_SLEEP);
144 (void) tsd_set(topkey, tp);
146 tp->topid = topid;
147 tp->esize = size;
148 tp->rsize = 0;
149 tp->dev = ul->un_dev;
150 return (1);
154 top_end_debug(ml_unit_t *ul, mt_map_t *mtm, top_t topid, ulong_t size)
156 threadtrans_t *tp;
158 ASSERT(curthread->t_flag & T_DONTBLOCK);
160 ASSERT((tp = (threadtrans_t *)tsd_get(topkey)) != NULL);
162 ASSERT((tp->dev == ul->un_dev) && (tp->topid == topid) &&
163 (tp->esize == size));
165 ASSERT(((ul->un_debug & MT_SIZE) == 0) || (tp->rsize <= tp->esize));
167 mtm->mtm_tops->mtm_top_num[topid]++;
168 mtm->mtm_tops->mtm_top_size_etot[topid] += tp->esize;
169 mtm->mtm_tops->mtm_top_size_rtot[topid] += tp->rsize;
171 if (tp->rsize > mtm->mtm_tops->mtm_top_size_max[topid])
172 mtm->mtm_tops->mtm_top_size_max[topid] = tp->rsize;
173 if (mtm->mtm_tops->mtm_top_size_min[topid] == 0)
174 mtm->mtm_tops->mtm_top_size_min[topid] =
175 tp->rsize;
176 else
177 if (tp->rsize < mtm->mtm_tops->mtm_top_size_min[topid])
178 mtm->mtm_tops->mtm_top_size_min[topid] =
179 tp->rsize;
181 if (ul->un_debug & MT_TRACE)
182 top_trace(DT_EOT, ul->un_dev, (long long)topid,
183 (long)tp->rsize, (long)0);
185 return (1);
189 top_delta_debug(
190 ml_unit_t *ul,
191 offset_t mof,
192 off_t nb,
193 delta_t dtyp)
195 struct threadtrans *tp;
197 ASSERT(curthread->t_flag & T_DONTBLOCK);
200 * check for delta contained fully within matamap
202 ASSERT((ul->un_matamap == NULL) ||
203 matamap_within(ul->un_matamap, mof, nb));
206 * maintain transaction info
208 if (ul->un_debug & MT_TRANSACT)
209 ul->un_logmap->mtm_tops->mtm_delta_num[dtyp]++;
212 * check transaction stuff
214 if (ul->un_debug & MT_TRANSACT) {
215 tp = (struct threadtrans *)tsd_get(topkey);
216 ASSERT(tp);
217 switch (dtyp) {
218 case DT_CANCEL:
219 case DT_ABZERO:
220 if (!matamap_within(ul->un_deltamap, mof, nb))
221 tp->rsize += sizeof (struct delta);
222 break;
223 default:
224 if (!matamap_within(ul->un_deltamap, mof, nb))
225 tp->rsize += nb + sizeof (struct delta);
226 break;
228 } else
229 return (1);
231 if (ul->un_debug & MT_TRACE)
232 top_trace(dtyp, ul->un_dev, mof, (long)nb, (long)0);
234 return (1);
238 top_roll_debug(ml_unit_t *ul)
240 logmap_roll_dev(ul);
241 return (1);
245 top_init_debug(void)
247 mutex_init(&toptracelock, NULL, MUTEX_DEFAULT, NULL);
248 return (1);
251 struct topstats_link {
252 struct topstats_link *ts_next;
253 dev_t ts_dev;
254 struct topstats ts_stats;
256 struct topstats_link *topstats_anchor = NULL;
259 * DEBUG ROUTINES
260 * from debug portion of *_map.c
263 * scan test support
266 logmap_logscan_debug(mt_map_t *mtm, mapentry_t *age)
268 mapentry_t *me;
269 ml_unit_t *ul;
270 off_t head, trimroll, lof;
273 * remember location of youngest rolled delta
275 mutex_enter(&mtm->mtm_mutex);
276 ul = mtm->mtm_ul;
277 head = ul->un_head_lof;
278 trimroll = mtm->mtm_trimrlof;
279 for (me = age; me; me = me->me_agenext) {
280 lof = me->me_lof;
281 if (trimroll == 0)
282 trimroll = lof;
283 if (lof >= head) {
284 if (trimroll >= head && trimroll <= lof)
285 trimroll = lof;
286 } else {
287 if (trimroll <= lof || trimroll >= head)
288 trimroll = lof;
291 mtm->mtm_trimrlof = trimroll;
292 mutex_exit(&mtm->mtm_mutex);
293 return (1);
297 * scan test support
300 logmap_logscan_commit_debug(off_t lof, mt_map_t *mtm)
302 off_t oldtrimc, newtrimc, trimroll;
304 trimroll = mtm->mtm_trimrlof;
305 oldtrimc = mtm->mtm_trimclof;
306 newtrimc = mtm->mtm_trimclof = dbtob(btod(lof));
309 * can't trim prior to transaction w/rolled delta
311 if (trimroll)
312 if (newtrimc >= oldtrimc) {
313 if (trimroll <= newtrimc && trimroll >= oldtrimc)
314 mtm->mtm_trimalof = newtrimc;
315 } else {
316 if (trimroll >= oldtrimc || trimroll <= newtrimc)
317 mtm->mtm_trimalof = newtrimc;
319 return (1);
323 logmap_logscan_add_debug(struct delta *dp, mt_map_t *mtm)
325 if ((dp->d_typ == DT_AB) || (dp->d_typ == DT_INODE))
326 mtm->mtm_trimalof = mtm->mtm_trimclof;
327 return (1);
331 * log-read after log-write
334 map_check_ldl_write(ml_unit_t *ul, caddr_t va, offset_t vamof, mapentry_t *me)
336 caddr_t bufp;
338 ASSERT(me->me_nb);
339 ASSERT((me->me_flags & ME_AGE) == 0);
341 /* Alloc a buf */
342 bufp = kmem_alloc(me->me_nb, KM_SLEEP);
344 /* Do the read */
345 me->me_agenext = NULL;
346 if (ldl_read(ul, bufp, me->me_mof, me->me_nb, me) == 0) {
347 ASSERT(bcmp(bufp, va + (me->me_mof - vamof), me->me_nb) == 0);
350 kmem_free(bufp, me->me_nb);
351 return (1);
355 * Cleanup a map struct
358 map_put_debug(mt_map_t *mtm)
360 struct topstats_link *tsl, **ptsl;
362 if (mtm->mtm_tops == NULL)
363 return (1);
365 /* Don't free this, cause the next snarf will want it */
366 if ((lufs_debug & MT_TRANSACT) != 0)
367 return (1);
369 ptsl = &topstats_anchor;
370 tsl = topstats_anchor;
371 while (tsl) {
372 if (mtm->mtm_tops == &tsl->ts_stats) {
373 mtm->mtm_tops = NULL;
374 *ptsl = tsl->ts_next;
375 kmem_free(tsl, sizeof (*tsl));
376 return (1);
378 ptsl = &tsl->ts_next;
379 tsl = tsl->ts_next;
382 return (1);
386 map_get_debug(ml_unit_t *ul, mt_map_t *mtm)
388 struct topstats_link *tsl;
390 if ((ul->un_debug & MT_TRANSACT) == 0)
391 return (1);
393 if (mtm->mtm_type != logmaptype)
394 return (1);
396 tsl = topstats_anchor;
397 while (tsl) {
398 if (tsl->ts_dev == ul->un_dev) {
399 mtm->mtm_tops = &(tsl->ts_stats);
400 return (1);
402 tsl = tsl->ts_next;
405 tsl = kmem_zalloc(sizeof (*tsl), KM_SLEEP);
406 tsl->ts_dev = ul->un_dev;
407 tsl->ts_next = topstats_anchor;
408 topstats_anchor = tsl;
409 mtm->mtm_tops = &tsl->ts_stats;
410 return (1);
414 * check a map's list
417 map_check_linkage(mt_map_t *mtm)
419 int i;
420 int hashed;
421 int nexted;
422 int preved;
423 int ncancel;
424 mapentry_t *me;
425 off_t olof;
426 off_t firstlof;
427 int wrapped;
429 mutex_enter(&mtm->mtm_mutex);
431 ASSERT(mtm->mtm_nme >= 0);
434 * verify the entries on the hash
436 hashed = 0;
437 for (i = 0; i < mtm->mtm_nhash; ++i) {
438 for (me = *(mtm->mtm_hash+i); me; me = me->me_hash) {
439 ++hashed;
440 ASSERT(me->me_flags & ME_HASH);
441 ASSERT((me->me_flags & ME_LIST) == 0);
444 ASSERT(hashed >= mtm->mtm_nme);
446 * verify the doubly linked list of all entries
448 nexted = 0;
449 for (me = mtm->mtm_next; me != (mapentry_t *)mtm; me = me->me_next)
450 nexted++;
451 preved = 0;
452 for (me = mtm->mtm_prev; me != (mapentry_t *)mtm; me = me->me_prev)
453 preved++;
454 ASSERT(nexted == preved);
455 ASSERT(nexted == hashed);
458 * verify the cancel list
460 ncancel = 0;
461 for (me = mtm->mtm_cancel; me; me = me->me_cancel) {
462 ++ncancel;
463 ASSERT(me->me_flags & ME_CANCEL);
466 * verify the logmap's log offsets
468 if (mtm->mtm_type == logmaptype) {
469 olof = mtm->mtm_next->me_lof;
470 firstlof = olof;
471 wrapped = 0;
473 * Make sure to skip any mapentries whose me_lof = 0
474 * and me_type == DT_CANCEL, these are mapentries
475 * in place just to mark user block deletions as not
476 * available for allocate within the same moby transaction
477 * in case we crash before it is comitted. Skip these
478 * entries in the checks below as they are not applicable.
480 for (me = mtm->mtm_next->me_next;
481 me != (mapentry_t *)mtm;
482 me = me->me_next) {
484 if (me->me_lof == 0 && me->me_dt == DT_CANCEL)
485 continue;
486 if (firstlof == 0) {
487 olof = me->me_lof;
488 firstlof = olof;
489 if (me->me_next != (mapentry_t *)mtm)
490 me = me->me_next;
491 continue;
493 ASSERT(me->me_lof != olof);
495 if (wrapped) {
496 ASSERT(me->me_lof > olof);
497 ASSERT(me->me_lof < firstlof);
498 olof = me->me_lof;
499 continue;
501 if (me->me_lof < olof) {
502 ASSERT(me->me_lof < firstlof);
503 wrapped = 1;
504 olof = me->me_lof;
505 continue;
507 ASSERT(me->me_lof > firstlof);
508 ASSERT(me->me_lof < mtm->mtm_ul->un_eol_lof);
509 olof = me->me_lof;
513 mutex_exit(&mtm->mtm_mutex);
514 return (1);
518 * check for overlap
521 matamap_overlap(mt_map_t *mtm, offset_t mof, off_t nb)
523 off_t hnb;
524 mapentry_t *me;
525 mapentry_t **mep;
527 for (hnb = 0; nb; nb -= hnb, mof += hnb) {
529 hnb = MAPBLOCKSIZE - (mof & MAPBLOCKOFF);
530 if (hnb > nb)
531 hnb = nb;
533 * search for dup entry
535 mep = MAP_HASH(mof, mtm);
536 mutex_enter(&mtm->mtm_mutex);
537 for (me = *mep; me; me = me->me_hash)
538 if (DATAoverlapME(mof, hnb, me))
539 break;
540 mutex_exit(&mtm->mtm_mutex);
543 * overlap detected
545 if (me)
546 return (1);
548 return (0);
551 * check for within
554 matamap_within(mt_map_t *mtm, offset_t mof, off_t nb)
556 off_t hnb;
557 mapentry_t *me;
558 mapentry_t **mep;
559 int scans = 0;
560 int withins = 0;
562 for (hnb = 0; nb && scans == withins; nb -= hnb, mof += hnb) {
563 scans++;
565 hnb = MAPBLOCKSIZE - (mof & MAPBLOCKOFF);
566 if (hnb > nb)
567 hnb = nb;
569 * search for within entry
571 mep = MAP_HASH(mof, mtm);
572 mutex_enter(&mtm->mtm_mutex);
573 for (me = *mep; me; me = me->me_hash)
574 if (DATAwithinME(mof, hnb, me)) {
575 withins++;
576 break;
578 mutex_exit(&mtm->mtm_mutex);
580 return (scans == withins);
584 ldl_sethead_debug(ml_unit_t *ul)
586 mt_map_t *mtm = ul->un_logmap;
587 off_t trimr = mtm->mtm_trimrlof;
588 off_t head = ul->un_head_lof;
589 off_t tail = ul->un_tail_lof;
591 if (head <= tail) {
592 if (trimr < head || trimr >= tail)
593 mtm->mtm_trimrlof = 0;
594 } else {
595 if (trimr >= tail && trimr < head)
596 mtm->mtm_trimrlof = 0;
598 return (1);
602 lufs_initialize_debug(ml_odunit_t *ud)
604 ud->od_debug = lufs_debug;
605 return (1);
608 #endif /* DEBUG */
611 * lufs_debug controls the debug level for TSufs, and is only used
612 * for a debug kernel. It's referenced by ufs_ioctl() and so is
613 * not under #ifdef DEBUG compilation.
615 uint_t lufs_debug;