1 /* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.11 2008/01/13 16:28:41 yamt Exp $ */
4 * Copyright (c)2005, 2006 YAMAMOTO Takashi,
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * CLOCK-Pro replacement policy:
31 * http://www.cs.wm.edu/hpcs/WWW/HTML/publications/abs05-3.html
33 * approximation of the list of non-resident pages using hash:
34 * http://linux-mm.org/ClockProApproximation
37 /* #define CLOCKPRO_DEBUG */
43 #else /* defined(PDSIM) */
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.11 2008/01/13 16:28:41 yamt Exp $");
50 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
57 #include <uvm/uvm_pdpolicy.h>
58 #include <uvm/uvm_pdpolicy_impl.h>
60 #if ((__STDC_VERSION__ - 0) >= 199901L)
61 #define DPRINTF(...) /* nothing */
62 #define WARN(...) printf(__VA_ARGS__)
63 #else /* ((__STDC_VERSION__ - 0) >= 199901L) */
64 #define DPRINTF(a...) /* nothing */ /* GCC */
65 #define WARN(a...) printf(a)
66 #endif /* ((__STDC_VERSION__ - 0) >= 199901L) */
68 #define dump(a) /* nothing */
74 #endif /* defined(PDSIM) */
76 #if !defined(CLOCKPRO_COLDPCT)
77 #define CLOCKPRO_COLDPCT 10
78 #endif /* !defined(CLOCKPRO_COLDPCT) */
80 #define CLOCKPRO_COLDPCTMAX 90
82 #if !defined(CLOCKPRO_HASHFACTOR)
83 #define CLOCKPRO_HASHFACTOR 2
84 #endif /* !defined(CLOCKPRO_HASHFACTOR) */
86 #define CLOCKPRO_NEWQMIN ((1024 * 1024) >> PAGE_SHIFT) /* XXX */
88 int clockpro_hashfactor
= CLOCKPRO_HASHFACTOR
;
90 PDPOL_EVCNT_DEFINE(nresrecordobj
)
91 PDPOL_EVCNT_DEFINE(nresrecordanon
)
92 PDPOL_EVCNT_DEFINE(nreslookupobj
)
93 PDPOL_EVCNT_DEFINE(nreslookupanon
)
94 PDPOL_EVCNT_DEFINE(nresfoundobj
)
95 PDPOL_EVCNT_DEFINE(nresfoundanon
)
96 PDPOL_EVCNT_DEFINE(nresanonfree
)
97 PDPOL_EVCNT_DEFINE(nresconflict
)
98 PDPOL_EVCNT_DEFINE(nresoverwritten
)
99 PDPOL_EVCNT_DEFINE(nreshandhot
)
101 PDPOL_EVCNT_DEFINE(hhottakeover
)
102 PDPOL_EVCNT_DEFINE(hhotref
)
103 PDPOL_EVCNT_DEFINE(hhotunref
)
104 PDPOL_EVCNT_DEFINE(hhotcold
)
105 PDPOL_EVCNT_DEFINE(hhotcoldtest
)
107 PDPOL_EVCNT_DEFINE(hcoldtakeover
)
108 PDPOL_EVCNT_DEFINE(hcoldref
)
109 PDPOL_EVCNT_DEFINE(hcoldunref
)
110 PDPOL_EVCNT_DEFINE(hcoldreftest
)
111 PDPOL_EVCNT_DEFINE(hcoldunreftest
)
112 PDPOL_EVCNT_DEFINE(hcoldunreftestspeculative
)
113 PDPOL_EVCNT_DEFINE(hcoldhot
)
115 PDPOL_EVCNT_DEFINE(speculativeenqueue
)
116 PDPOL_EVCNT_DEFINE(speculativehit1
)
117 PDPOL_EVCNT_DEFINE(speculativehit2
)
118 PDPOL_EVCNT_DEFINE(speculativemiss
)
120 #define PQ_REFERENCED PQ_PRIVATE1
121 #define PQ_HOT PQ_PRIVATE2
122 #define PQ_TEST PQ_PRIVATE3
123 #define PQ_INITIALREF PQ_PRIVATE4
124 #if PQ_PRIVATE6 != PQ_PRIVATE5 * 2 || PQ_PRIVATE7 != PQ_PRIVATE6 * 2
127 #define PQ_QMASK (PQ_PRIVATE5|PQ_PRIVATE6|PQ_PRIVATE7)
128 #define PQ_QFACTOR PQ_PRIVATE5
129 #define PQ_SPECULATIVE PQ_PRIVATE8
131 #define CLOCKPRO_NOQUEUE 0
132 #define CLOCKPRO_NEWQ 1 /* small queue to clear initial ref. */
134 #define CLOCKPRO_COLDQ 2
135 #define CLOCKPRO_HOTQ 3
136 #else /* defined(LISTQ) */
137 #define CLOCKPRO_COLDQ (2 + coldqidx) /* XXX */
138 #define CLOCKPRO_HOTQ (3 - coldqidx) /* XXX */
139 #endif /* defined(LISTQ) */
140 #define CLOCKPRO_LISTQ 4
141 #define CLOCKPRO_NQUEUE 4
144 clockpro_setq(struct vm_page
*pg
, int qidx
)
146 KASSERT(qidx
>= CLOCKPRO_NOQUEUE
);
147 KASSERT(qidx
<= CLOCKPRO_NQUEUE
);
149 pg
->pqflags
= (pg
->pqflags
& ~PQ_QMASK
) | (qidx
* PQ_QFACTOR
);
153 clockpro_getq(struct vm_page
*pg
)
157 qidx
= (pg
->pqflags
& PQ_QMASK
) / PQ_QFACTOR
;
158 KASSERT(qidx
>= CLOCKPRO_NOQUEUE
);
159 KASSERT(qidx
<= CLOCKPRO_NQUEUE
);
168 struct clockpro_state
{
174 pageq_t s_q
[CLOCKPRO_NQUEUE
];
176 struct uvm_pctparam s_coldtargetpct
;
180 clockpro_queue(struct clockpro_state
*s
, int qidx
)
183 KASSERT(CLOCKPRO_NOQUEUE
< qidx
);
184 KASSERT(qidx
<= CLOCKPRO_NQUEUE
);
186 return &s
->s_q
[qidx
- 1];
194 clockpro_switchqueue(void)
197 coldqidx
= 1 - coldqidx
;
200 #endif /* !defined(LISTQ) */
202 static struct clockpro_state clockpro
;
203 static struct clockpro_scanstate
{
207 /* ---------------------------------------- */
210 pageq_init(pageq_t
*q
)
218 pageq_len(const pageq_t
*q
)
224 static struct vm_page
*
225 pageq_first(const pageq_t
*q
)
228 return TAILQ_FIRST(&q
->q_q
);
232 pageq_insert_tail(pageq_t
*q
, struct vm_page
*pg
)
235 TAILQ_INSERT_TAIL(&q
->q_q
, pg
, pageq
);
240 pageq_insert_head(pageq_t
*q
, struct vm_page
*pg
)
243 TAILQ_INSERT_HEAD(&q
->q_q
, pg
, pageq
);
248 pageq_remove(pageq_t
*q
, struct vm_page
*pg
)
252 KASSERT(clockpro_queue(&clockpro
, clockpro_getq(pg
)) == q
);
254 KASSERT(q
->q_len
> 0);
255 TAILQ_REMOVE(&q
->q_q
, pg
, pageq
);
259 static struct vm_page
*
260 pageq_remove_head(pageq_t
*q
)
264 pg
= TAILQ_FIRST(&q
->q_q
);
266 KASSERT(q
->q_len
== 0);
273 /* ---------------------------------------- */
276 clockpro_insert_tail(struct clockpro_state
*s
, int qidx
, struct vm_page
*pg
)
278 pageq_t
*q
= clockpro_queue(s
, qidx
);
280 clockpro_setq(pg
, qidx
);
281 pageq_insert_tail(q
, pg
);
285 clockpro_insert_head(struct clockpro_state
*s
, int qidx
, struct vm_page
*pg
)
287 pageq_t
*q
= clockpro_queue(s
, qidx
);
289 clockpro_setq(pg
, qidx
);
290 pageq_insert_head(q
, pg
);
293 /* ---------------------------------------- */
295 typedef uint32_t nonres_cookie_t
;
296 #define NONRES_COOKIE_INVAL 0
298 typedef uintptr_t objid_t
;
301 * XXX maybe these hash functions need reconsideration,
302 * given that hash distribution is critical here.
306 pageidentityhash1(objid_t obj
, off_t idx
)
308 uint32_t hash
= HASH32_BUF_INIT
;
311 hash
= hash32_buf(&idx
, sizeof(idx
), hash
);
312 hash
= hash32_buf(&obj
, sizeof(obj
), hash
);
314 hash
= hash32_buf(&obj
, sizeof(obj
), hash
);
315 hash
= hash32_buf(&idx
, sizeof(idx
), hash
);
321 pageidentityhash2(objid_t obj
, off_t idx
)
323 uint32_t hash
= HASH32_BUF_INIT
;
325 hash
= hash32_buf(&obj
, sizeof(obj
), hash
);
326 hash
= hash32_buf(&idx
, sizeof(idx
), hash
);
330 static nonres_cookie_t
331 calccookie(objid_t obj
, off_t idx
)
333 uint32_t hash
= pageidentityhash2(obj
, idx
);
334 nonres_cookie_t cookie
= hash
;
336 if (__predict_false(cookie
== NONRES_COOKIE_INVAL
)) {
342 #define BUCKETSIZE 14
346 nonres_cookie_t pages
[BUCKETSIZE
];
348 static int cycle_target
;
349 static int cycle_target_frac
;
351 static struct bucket static_bucket
;
352 static struct bucket
*buckets
= &static_bucket
;
353 static size_t hashsize
= 1;
356 #define COLDTARGET_ADJ(d) coldadj += (d)
361 clockpro_hashalloc(int n
)
363 size_t allocsz
= sizeof(*buckets
) * n
;
365 return malloc(allocsz
);
369 clockpro_hashfree(void *p
, int n
)
375 #else /* defined(PDSIM) */
378 clockpro_hashalloc(int n
)
380 size_t allocsz
= round_page(sizeof(*buckets
) * n
);
382 return (void *)uvm_km_alloc(kernel_map
, allocsz
, 0, UVM_KMF_WIRED
);
386 clockpro_hashfree(void *p
, int n
)
388 size_t allocsz
= round_page(sizeof(*buckets
) * n
);
390 uvm_km_free(kernel_map
, (vaddr_t
)p
, allocsz
, UVM_KMF_WIRED
);
393 #endif /* defined(PDSIM) */
396 clockpro_hashinit(uint64_t n
)
398 struct bucket
*newbuckets
;
399 struct bucket
*oldbuckets
;
404 sz
= howmany(n
, BUCKETSIZE
);
405 sz
*= clockpro_hashfactor
;
406 newbuckets
= clockpro_hashalloc(sz
);
407 if (newbuckets
== NULL
) {
408 panic("%s: allocation failure", __func__
);
410 for (i
= 0; i
< sz
; i
++) {
411 struct bucket
*b
= &newbuckets
[i
];
414 b
->cycle
= cycle_target
;
416 for (j
= 0; j
< BUCKETSIZE
; j
++) {
417 b
->pages
[j
] = NONRES_COOKIE_INVAL
;
421 oldbuckets
= buckets
;
423 buckets
= newbuckets
;
426 if (oldbuckets
!= &static_bucket
) {
427 clockpro_hashfree(oldbuckets
, oldsz
);
431 static struct bucket
*
432 nonresident_getbucket(objid_t obj
, off_t idx
)
436 hash
= pageidentityhash1(obj
, idx
);
437 return &buckets
[hash
% hashsize
];
441 nonresident_rotate(struct bucket
*b
)
448 while (cycle
- cycle_target
< 0) {
449 if (b
->pages
[cur
] != NONRES_COOKIE_INVAL
) {
450 PDPOL_EVCNT_INCR(nreshandhot
);
453 b
->pages
[cur
] = NONRES_COOKIE_INVAL
;
455 if (cur
== BUCKETSIZE
) {
465 nonresident_lookupremove(objid_t obj
, off_t idx
)
467 struct bucket
*b
= nonresident_getbucket(obj
, idx
);
468 nonres_cookie_t cookie
= calccookie(obj
, idx
);
471 nonresident_rotate(b
);
472 for (i
= 0; i
< BUCKETSIZE
; i
++) {
473 if (b
->pages
[i
] == cookie
) {
474 b
->pages
[i
] = NONRES_COOKIE_INVAL
;
482 pageobj(struct vm_page
*pg
)
487 * XXX object pointer is often freed and reused for unrelated object.
488 * for vnodes, it would be better to use something like
489 * a hash of fsid/fileid/generation.
495 KASSERT(obj
!= NULL
);
496 KASSERT(pg
->offset
== 0);
503 pageidx(struct vm_page
*pg
)
506 KASSERT((pg
->offset
& PAGE_MASK
) == 0);
507 return pg
->offset
>> PAGE_SHIFT
;
511 nonresident_pagelookupremove(struct vm_page
*pg
)
513 bool found
= nonresident_lookupremove(pageobj(pg
), pageidx(pg
));
516 PDPOL_EVCNT_INCR(nreslookupobj
);
518 PDPOL_EVCNT_INCR(nreslookupanon
);
522 PDPOL_EVCNT_INCR(nresfoundobj
);
524 PDPOL_EVCNT_INCR(nresfoundanon
);
531 nonresident_pagerecord(struct vm_page
*pg
)
533 objid_t obj
= pageobj(pg
);
534 off_t idx
= pageidx(pg
);
535 struct bucket
*b
= nonresident_getbucket(obj
, idx
);
536 nonres_cookie_t cookie
= calccookie(obj
, idx
);
541 for (i
= 0; i
< BUCKETSIZE
; i
++) {
542 if (b
->pages
[i
] == cookie
) {
543 PDPOL_EVCNT_INCR(nresconflict
);
546 #endif /* defined(DEBUG) */
549 PDPOL_EVCNT_INCR(nresrecordobj
);
551 PDPOL_EVCNT_INCR(nresrecordanon
);
553 nonresident_rotate(b
);
554 if (b
->pages
[b
->cur
] != NONRES_COOKIE_INVAL
) {
555 PDPOL_EVCNT_INCR(nresoverwritten
);
558 b
->pages
[b
->cur
] = cookie
;
559 b
->cur
= (b
->cur
+ 1) % BUCKETSIZE
;
562 /* ---------------------------------------- */
564 #if defined(CLOCKPRO_DEBUG)
569 #else /* defined(CLOCKPRO_DEBUG) */
570 #define check_sanity() /* nothing */
571 #endif /* defined(CLOCKPRO_DEBUG) */
574 clockpro_reinit(void)
577 clockpro_hashinit(uvmexp
.npages
);
583 struct clockpro_state
*s
= &clockpro
;
586 for (i
= 0; i
< CLOCKPRO_NQUEUE
; i
++) {
587 pageq_init(&s
->s_q
[i
]);
591 uvm_pctparam_init(&s
->s_coldtargetpct
, CLOCKPRO_COLDPCT
, NULL
);
597 struct clockpro_state
*s
= &clockpro
;
600 #if defined(ADAPTIVE)
601 int coldmax
= s
->s_npages
* CLOCKPRO_COLDPCTMAX
/ 100;
604 coldtarget
= s
->s_coldtarget
;
605 if (coldtarget
+ coldadj
< coldmin
) {
606 coldadj
= coldmin
- coldtarget
;
607 } else if (coldtarget
+ coldadj
> coldmax
) {
608 coldadj
= coldmax
- coldtarget
;
610 coldtarget
+= coldadj
;
611 #else /* defined(ADAPTIVE) */
612 coldtarget
= UVM_PCTPARAM_APPLY(&s
->s_coldtargetpct
, s
->s_npages
);
613 if (coldtarget
< 1) {
616 #endif /* defined(ADAPTIVE) */
618 s
->s_coldtarget
= coldtarget
;
619 s
->s_newqlenmax
= coldtarget
/ 4;
620 if (s
->s_newqlenmax
< CLOCKPRO_NEWQMIN
) {
621 s
->s_newqlenmax
= CLOCKPRO_NEWQMIN
;
626 clockpro_movereferencebit(struct vm_page
*pg
)
630 referenced
= pmap_clear_reference(pg
);
632 pg
->pqflags
|= PQ_REFERENCED
;
637 clockpro_clearreferencebit(struct vm_page
*pg
)
640 clockpro_movereferencebit(pg
);
641 pg
->pqflags
&= ~PQ_REFERENCED
;
645 clockpro___newqrotate(int len
)
647 struct clockpro_state
* const s
= &clockpro
;
648 pageq_t
* const newq
= clockpro_queue(s
, CLOCKPRO_NEWQ
);
651 while (pageq_len(newq
) > len
) {
652 pg
= pageq_remove_head(newq
);
654 KASSERT(clockpro_getq(pg
) == CLOCKPRO_NEWQ
);
655 if ((pg
->pqflags
& PQ_INITIALREF
) != 0) {
656 clockpro_clearreferencebit(pg
);
657 pg
->pqflags
&= ~PQ_INITIALREF
;
659 /* place at the list head */
660 clockpro_insert_tail(s
, CLOCKPRO_COLDQ
, pg
);
665 clockpro_newqrotate(void)
667 struct clockpro_state
* const s
= &clockpro
;
670 clockpro___newqrotate(s
->s_newqlenmax
);
675 clockpro_newqflush(int n
)
679 clockpro___newqrotate(n
);
684 clockpro_newqflushone(void)
686 struct clockpro_state
* const s
= &clockpro
;
689 MAX(pageq_len(clockpro_queue(s
, CLOCKPRO_NEWQ
)) - 1, 0));
693 * our "tail" is called "list-head" in the paper.
697 clockpro___enqueuetail(struct vm_page
*pg
)
699 struct clockpro_state
* const s
= &clockpro
;
701 KASSERT(clockpro_getq(pg
) == CLOCKPRO_NOQUEUE
);
704 #if !defined(USEONCE2)
705 clockpro_insert_tail(s
, CLOCKPRO_NEWQ
, pg
);
706 clockpro_newqrotate();
707 #else /* !defined(USEONCE2) */
709 KASSERT((pg
->pqflags
& PQ_REFERENCED
) == 0);
710 #endif /* defined(LISTQ) */
711 clockpro_insert_tail(s
, CLOCKPRO_COLDQ
, pg
);
712 #endif /* !defined(USEONCE2) */
717 clockpro_pageenqueue(struct vm_page
*pg
)
719 struct clockpro_state
* const s
= &clockpro
;
721 bool speculative
= (pg
->pqflags
& PQ_SPECULATIVE
) != 0; /* XXX */
723 KASSERT((~pg
->pqflags
& (PQ_INITIALREF
|PQ_SPECULATIVE
)) != 0);
724 KASSERT(mutex_owned(&uvm_pageqlock
));
726 KASSERT(clockpro_getq(pg
) == CLOCKPRO_NOQUEUE
);
728 pg
->pqflags
&= ~(PQ_HOT
|PQ_TEST
);
731 PDPOL_EVCNT_INCR(speculativeenqueue
);
733 hot
= nonresident_pagelookupremove(pg
);
740 * consider mmap'ed file:
742 * - read-ahead enqueues a page.
744 * - on the following read-ahead hit, the fault handler activates it.
746 * - finally, the userland code which caused the above fault
747 * actually accesses the page. it makes its reference bit set.
749 * we want to count the above as a single access, rather than
750 * three accesses with short reuse distances.
753 #if defined(USEONCE2)
754 pg
->pqflags
&= ~PQ_INITIALREF
;
756 pg
->pqflags
|= PQ_TEST
;
759 clockpro_clearreferencebit(pg
);
760 clockpro___enqueuetail(pg
);
761 #else /* defined(USEONCE2) */
765 pg
->pqflags
|= PQ_HOT
;
767 pg
->pqflags
|= PQ_TEST
;
770 clockpro___enqueuetail(pg
);
771 #endif /* defined(USEONCE2) */
772 KASSERT(s
->s_ncold
<= s
->s_npages
);
776 clockpro_pagequeue(struct vm_page
*pg
)
778 struct clockpro_state
* const s
= &clockpro
;
781 qidx
= clockpro_getq(pg
);
782 KASSERT(qidx
!= CLOCKPRO_NOQUEUE
);
784 return clockpro_queue(s
, qidx
);
788 clockpro_pagedequeue(struct vm_page
*pg
)
790 struct clockpro_state
* const s
= &clockpro
;
793 KASSERT(s
->s_npages
> 0);
795 q
= clockpro_pagequeue(pg
);
798 clockpro_setq(pg
, CLOCKPRO_NOQUEUE
);
799 if ((pg
->pqflags
& PQ_HOT
) == 0) {
800 KASSERT(s
->s_ncold
> 0);
803 KASSERT(s
->s_npages
> 0);
809 clockpro_pagerequeue(struct vm_page
*pg
)
811 struct clockpro_state
* const s
= &clockpro
;
814 qidx
= clockpro_getq(pg
);
815 KASSERT(qidx
== CLOCKPRO_HOTQ
|| qidx
== CLOCKPRO_COLDQ
);
816 pageq_remove(clockpro_queue(s
, qidx
), pg
);
818 clockpro_setq(pg
, CLOCKPRO_NOQUEUE
);
820 clockpro___enqueuetail(pg
);
824 handhot_endtest(struct vm_page
*pg
)
827 KASSERT((pg
->pqflags
& PQ_HOT
) == 0);
828 if ((pg
->pqflags
& PQ_TEST
) != 0) {
829 PDPOL_EVCNT_INCR(hhotcoldtest
);
831 pg
->pqflags
&= ~PQ_TEST
;
833 PDPOL_EVCNT_INCR(hhotcold
);
838 handhot_advance(void)
840 struct clockpro_state
* const s
= &clockpro
;
848 if (s
->s_ncold
>= s
->s_coldtarget
) {
851 hotq
= clockpro_queue(s
, CLOCKPRO_HOTQ
);
853 pg
= pageq_first(hotq
);
855 DPRINTF("%s: HHOT TAKEOVER\n", __func__
);
856 dump("hhottakeover");
857 PDPOL_EVCNT_INCR(hhottakeover
);
859 while (/* CONSTCOND */ 1) {
860 pageq_t
*coldq
= clockpro_queue(s
, CLOCKPRO_COLDQ
);
862 pg
= pageq_first(coldq
);
864 clockpro_newqflushone();
865 pg
= pageq_first(coldq
);
867 WARN("hhot: no page?\n");
871 KASSERT(clockpro_pagequeue(pg
) == coldq
);
872 pageq_remove(coldq
, pg
);
874 if ((pg
->pqflags
& PQ_HOT
) == 0) {
876 clockpro_insert_tail(s
, CLOCKPRO_LISTQ
, pg
);
878 clockpro_insert_head(s
, CLOCKPRO_HOTQ
, pg
);
882 #else /* defined(LISTQ) */
883 clockpro_newqflush(0); /* XXX XXX */
884 clockpro_switchqueue();
885 hotq
= clockpro_queue(s
, CLOCKPRO_HOTQ
);
887 #endif /* defined(LISTQ) */
890 KASSERT(clockpro_pagequeue(pg
) == hotq
);
893 * terminate test period of nonresident pages by cycling them.
896 cycle_target_frac
+= BUCKETSIZE
;
897 hotqlen
= pageq_len(hotq
);
898 while (cycle_target_frac
>= hotqlen
) {
900 cycle_target_frac
-= hotqlen
;
903 if ((pg
->pqflags
& PQ_HOT
) == 0) {
905 panic("cold page in hotq: %p", pg
);
906 #else /* defined(LISTQ) */
909 #endif /* defined(LISTQ) */
911 KASSERT((pg
->pqflags
& PQ_TEST
) == 0);
912 KASSERT((pg
->pqflags
& PQ_INITIALREF
) == 0);
913 KASSERT((pg
->pqflags
& PQ_SPECULATIVE
) == 0);
916 * once we met our target,
917 * stop at a hot page so that no cold pages in test period
918 * have larger recency than any hot pages.
921 if (s
->s_ncold
>= s
->s_coldtarget
) {
925 clockpro_movereferencebit(pg
);
926 if ((pg
->pqflags
& PQ_REFERENCED
) == 0) {
927 PDPOL_EVCNT_INCR(hhotunref
);
929 pg
->pqflags
&= ~PQ_HOT
;
931 KASSERT(s
->s_ncold
<= s
->s_npages
);
933 PDPOL_EVCNT_INCR(hhotref
);
935 pg
->pqflags
&= ~PQ_REFERENCED
;
938 #endif /* !defined(LISTQ) */
939 clockpro_pagerequeue(pg
);
944 static struct vm_page
*
945 handcold_advance(void)
947 struct clockpro_state
* const s
= &clockpro
;
952 pageq_t
*listq
= clockpro_queue(s
, CLOCKPRO_LISTQ
);
953 #endif /* defined(LISTQ) */
956 clockpro_newqrotate();
959 pg
= pageq_first(listq
);
961 KASSERT(clockpro_getq(pg
) == CLOCKPRO_LISTQ
);
962 KASSERT((pg
->pqflags
& PQ_TEST
) == 0);
963 KASSERT((pg
->pqflags
& PQ_HOT
) == 0);
964 KASSERT((pg
->pqflags
& PQ_INITIALREF
) == 0);
965 pageq_remove(listq
, pg
);
967 clockpro_insert_head(s
, CLOCKPRO_COLDQ
, pg
); /* XXX */
970 #endif /* defined(LISTQ) */
972 coldq
= clockpro_queue(s
, CLOCKPRO_COLDQ
);
973 pg
= pageq_first(coldq
);
975 clockpro_newqflushone();
976 pg
= pageq_first(coldq
);
979 DPRINTF("%s: HCOLD TAKEOVER\n", __func__
);
980 dump("hcoldtakeover");
981 PDPOL_EVCNT_INCR(hcoldtakeover
);
983 pageq_len(clockpro_queue(s
, CLOCKPRO_NEWQ
)) == 0);
986 pageq_len(clockpro_queue(s
, CLOCKPRO_HOTQ
)) == 0);
987 #else /* defined(LISTQ) */
988 clockpro_switchqueue();
989 coldq
= clockpro_queue(s
, CLOCKPRO_COLDQ
);
990 pg
= pageq_first(coldq
);
991 #endif /* defined(LISTQ) */
994 WARN("hcold: no page?\n");
997 KASSERT((pg
->pqflags
& PQ_INITIALREF
) == 0);
998 if ((pg
->pqflags
& PQ_HOT
) != 0) {
999 PDPOL_EVCNT_INCR(hcoldhot
);
1000 pageq_remove(coldq
, pg
);
1001 clockpro_insert_tail(s
, CLOCKPRO_HOTQ
, pg
);
1003 KASSERT((pg
->pqflags
& PQ_TEST
) == 0);
1009 #endif /* defined(LISTQ) */
1010 KASSERT((pg
->pqflags
& PQ_HOT
) == 0);
1012 clockpro_movereferencebit(pg
);
1013 if ((pg
->pqflags
& PQ_SPECULATIVE
) != 0) {
1014 KASSERT((pg
->pqflags
& PQ_TEST
) == 0);
1015 if ((pg
->pqflags
& PQ_REFERENCED
) != 0) {
1016 PDPOL_EVCNT_INCR(speculativehit2
);
1017 pg
->pqflags
&= ~(PQ_SPECULATIVE
|PQ_REFERENCED
);
1018 clockpro_pagedequeue(pg
);
1019 clockpro_pageenqueue(pg
);
1022 PDPOL_EVCNT_INCR(speculativemiss
);
1024 switch (pg
->pqflags
& (PQ_REFERENCED
|PQ_TEST
)) {
1026 PDPOL_EVCNT_INCR(hcoldunreftest
);
1027 nonresident_pagerecord(pg
);
1030 PDPOL_EVCNT_INCR(hcoldunref
);
1032 KASSERT(s
->s_ncold
> 0);
1033 clockpro_pagerequeue(pg
); /* XXX */
1035 /* XXX "pg" is still in queue */
1039 case PQ_REFERENCED
|PQ_TEST
:
1040 PDPOL_EVCNT_INCR(hcoldreftest
);
1043 pg
->pqflags
|= PQ_HOT
;
1044 pg
->pqflags
&= ~PQ_TEST
;
1048 PDPOL_EVCNT_INCR(hcoldref
);
1049 pg
->pqflags
|= PQ_TEST
;
1052 pg
->pqflags
&= ~PQ_REFERENCED
;
1054 /* move to the list head */
1055 clockpro_pagerequeue(pg
);
1063 uvmpdpol_pageactivate(struct vm_page
*pg
)
1066 if (!uvmpdpol_pageisqueued_p(pg
)) {
1067 KASSERT((pg
->pqflags
& PQ_SPECULATIVE
) == 0);
1068 pg
->pqflags
|= PQ_INITIALREF
;
1069 clockpro_pageenqueue(pg
);
1070 } else if ((pg
->pqflags
& PQ_SPECULATIVE
)) {
1071 PDPOL_EVCNT_INCR(speculativehit1
);
1072 pg
->pqflags
&= ~PQ_SPECULATIVE
;
1073 pg
->pqflags
|= PQ_INITIALREF
;
1074 clockpro_pagedequeue(pg
);
1075 clockpro_pageenqueue(pg
);
1077 pg
->pqflags
|= PQ_REFERENCED
;
1081 uvmpdpol_pagedeactivate(struct vm_page
*pg
)
1084 clockpro_clearreferencebit(pg
);
1088 uvmpdpol_pagedequeue(struct vm_page
*pg
)
1091 if (!uvmpdpol_pageisqueued_p(pg
)) {
1094 clockpro_pagedequeue(pg
);
1095 pg
->pqflags
&= ~(PQ_INITIALREF
|PQ_SPECULATIVE
);
1099 uvmpdpol_pageenqueue(struct vm_page
*pg
)
1103 if (uvmpdpol_pageisqueued_p(pg
)) {
1106 clockpro_clearreferencebit(pg
);
1107 pg
->pqflags
|= PQ_SPECULATIVE
;
1108 clockpro_pageenqueue(pg
);
1110 uvmpdpol_pageactivate(pg
);
1115 uvmpdpol_anfree(struct vm_anon
*an
)
1118 KASSERT(an
->an_page
== NULL
);
1119 if (nonresident_lookupremove((objid_t
)an
, 0)) {
1120 PDPOL_EVCNT_INCR(nresanonfree
);
1132 uvmpdpol_reinit(void)
1139 uvmpdpol_estimatepageable(int *active
, int *inactive
)
1141 struct clockpro_state
* const s
= &clockpro
;
1144 *active
= s
->s_npages
- s
->s_ncold
;
1147 *inactive
= s
->s_ncold
;
1152 uvmpdpol_pageisqueued_p(struct vm_page
*pg
)
1155 return clockpro_getq(pg
) != CLOCKPRO_NOQUEUE
;
1159 uvmpdpol_scaninit(void)
1161 struct clockpro_scanstate
* const ss
= &scanstate
;
1163 ss
->ss_nscanned
= 0;
1167 uvmpdpol_selectvictim(void)
1169 struct clockpro_state
* const s
= &clockpro
;
1170 struct clockpro_scanstate
* const ss
= &scanstate
;
1173 if (ss
->ss_nscanned
> s
->s_npages
) {
1174 DPRINTF("scan too much\n");
1177 pg
= handcold_advance();
1183 clockpro_dropswap(pageq_t
*q
, int *todo
)
1187 TAILQ_FOREACH_REVERSE(pg
, &q
->q_q
, pglist
, pageq
) {
1191 if ((pg
->pqflags
& PQ_HOT
) == 0) {
1194 if ((pg
->pqflags
& PQ_SWAPBACKED
) == 0) {
1197 if (uvmpd_trydropswap(pg
)) {
1204 uvmpdpol_balancequeue(int swap_shortage
)
1206 struct clockpro_state
* const s
= &clockpro
;
1207 int todo
= swap_shortage
;
1214 * reclaim swap slots from hot pages
1217 DPRINTF("%s: swap_shortage=%d\n", __func__
, swap_shortage
);
1219 clockpro_dropswap(clockpro_queue(s
, CLOCKPRO_NEWQ
), &todo
);
1220 clockpro_dropswap(clockpro_queue(s
, CLOCKPRO_COLDQ
), &todo
);
1221 clockpro_dropswap(clockpro_queue(s
, CLOCKPRO_HOTQ
), &todo
);
1223 DPRINTF("%s: done=%d\n", __func__
, swap_shortage
- todo
);
1227 uvmpdpol_needsscan_p(void)
1229 struct clockpro_state
* const s
= &clockpro
;
1231 if (s
->s_ncold
< s
->s_coldtarget
) {
1246 #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */
1249 uvmpdpol_sysctlsetup(void)
1251 #if !defined(ADAPTIVE)
1252 struct clockpro_state
* const s
= &clockpro
;
1254 uvm_pctparam_createsysctlnode(&s
->s_coldtargetpct
, "coldtargetpct",
1255 SYSCTL_DESCR("Percentage cold target queue of the entire queue"));
1256 #endif /* !defined(ADAPTIVE) */
1259 #endif /* !defined(PDSIM) */
1263 void clockpro_dump(void);
1268 struct clockpro_state
* const s
= &clockpro
;
1271 int ncold
, nhot
, ntest
, nspeculative
, ninitialref
, nref
;
1272 int newqlen
, coldqlen
, hotqlen
, listqlen
;
1274 newqlen
= coldqlen
= hotqlen
= listqlen
= 0;
1275 printf("npages=%d, ncold=%d, coldtarget=%d, newqlenmax=%d\n",
1276 s
->s_npages
, s
->s_ncold
, s
->s_coldtarget
, s
->s_newqlenmax
);
1278 #define INITCOUNT() \
1279 ncold = nhot = ntest = nspeculative = ninitialref = nref = 0
1282 if ((pg->pqflags & PQ_HOT) != 0) { \
1286 if ((pg->pqflags & PQ_TEST) != 0) { \
1289 if ((pg->pqflags & PQ_SPECULATIVE) != 0) { \
1292 if ((pg->pqflags & PQ_INITIALREF) != 0) { \
1294 } else if ((pg->pqflags & PQ_REFERENCED) != 0 || \
1295 pmap_is_referenced(pg)) { \
1300 #define PRINTCOUNT(name) \
1301 printf("%s hot=%d, cold=%d, test=%d, speculative=%d, initialref=%d, " \
1303 (name), nhot, ncold, ntest, nspeculative, ninitialref, nref)
1306 TAILQ_FOREACH(pg
, &clockpro_queue(s
, CLOCKPRO_NEWQ
)->q_q
, pageq
) {
1307 if (clockpro_getq(pg
) != CLOCKPRO_NEWQ
) {
1308 printf("newq corrupt %p\n", pg
);
1316 TAILQ_FOREACH(pg
, &clockpro_queue(s
, CLOCKPRO_COLDQ
)->q_q
, pageq
) {
1317 if (clockpro_getq(pg
) != CLOCKPRO_COLDQ
) {
1318 printf("coldq corrupt %p\n", pg
);
1323 PRINTCOUNT("coldq");
1326 TAILQ_FOREACH(pg
, &clockpro_queue(s
, CLOCKPRO_HOTQ
)->q_q
, pageq
) {
1327 if (clockpro_getq(pg
) != CLOCKPRO_HOTQ
) {
1328 printf("hotq corrupt %p\n", pg
);
1331 if ((pg
->pqflags
& PQ_HOT
) == 0) {
1332 printf("cold page in hotq: %p\n", pg
);
1334 #endif /* defined(LISTQ) */
1341 TAILQ_FOREACH(pg
, &clockpro_queue(s
, CLOCKPRO_LISTQ
)->q_q
, pageq
) {
1343 printf("listq %p\n");
1344 #endif /* !defined(LISTQ) */
1345 if (clockpro_getq(pg
) != CLOCKPRO_LISTQ
) {
1346 printf("listq corrupt %p\n", pg
);
1351 PRINTCOUNT("listq");
1353 printf("newqlen=%d/%d, coldqlen=%d/%d, hotqlen=%d/%d, listqlen=%d/%d\n",
1354 newqlen
, pageq_len(clockpro_queue(s
, CLOCKPRO_NEWQ
)),
1355 coldqlen
, pageq_len(clockpro_queue(s
, CLOCKPRO_COLDQ
)),
1356 hotqlen
, pageq_len(clockpro_queue(s
, CLOCKPRO_HOTQ
)),
1357 listqlen
, pageq_len(clockpro_queue(s
, CLOCKPRO_LISTQ
)));
1360 #endif /* defined(DDB) */
1365 pdsim_dumpq(int qidx
)
1367 struct clockpro_state
* const s
= &clockpro
;
1368 pageq_t
*q
= clockpro_queue(s
, qidx
);
1371 TAILQ_FOREACH(pg
, &q
->q_q
, pageq
) {
1372 DPRINTF(" %" PRIu64
"%s%s%s%s%s%s",
1373 pg
->offset
>> PAGE_SHIFT
,
1374 (pg
->pqflags
& PQ_HOT
) ? "H" : "",
1375 (pg
->pqflags
& PQ_TEST
) ? "T" : "",
1376 (pg
->pqflags
& PQ_REFERENCED
) ? "R" : "",
1377 pmap_is_referenced(pg
) ? "r" : "",
1378 (pg
->pqflags
& PQ_INITIALREF
) ? "I" : "",
1379 (pg
->pqflags
& PQ_SPECULATIVE
) ? "S" : ""
1383 #endif /* defined(DEBUG) */
1386 pdsim_dump(const char *id
)
1389 struct clockpro_state
* const s
= &clockpro
;
1391 DPRINTF(" %s L(", id
);
1392 pdsim_dumpq(CLOCKPRO_LISTQ
);
1394 pdsim_dumpq(CLOCKPRO_HOTQ
);
1396 pdsim_dumpq(CLOCKPRO_COLDQ
);
1398 pdsim_dumpq(CLOCKPRO_NEWQ
);
1399 DPRINTF(" ) ncold=%d/%d, coldadj=%d\n",
1400 s
->s_ncold
, s
->s_coldtarget
, coldadj
);
1401 #endif /* defined(DEBUG) */
1403 #endif /* defined(PDSIM) */