4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/types.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/archsystm.h>
32 #include <sys/vmsystm.h>
33 #include <sys/machparam.h>
34 #include <sys/machsystm.h>
35 #include <vm/vm_dep.h>
36 #include <vm/hat_sfmmu.h>
37 #include <vm/seg_kmem.h>
38 #include <sys/cmn_err.h>
39 #include <sys/debug.h>
40 #include <sys/cpu_module.h>
41 #include <sys/sysmacros.h>
42 #include <sys/panic.h>
45 * pan_disable_ism_large_pages and pan_disable_large_pages are the Panther-
46 * specific versions of disable_ism_large_pages and disable_large_pages,
47 * and feed back into those two hat variables at hat initialization time,
48 * for Panther-only systems.
50 * chpjag_disable_large_pages is the Ch/Jaguar-specific version of
51 * disable_large_pages. Ditto for pan_disable_large_pages.
52 * Note that the Panther and Ch/Jaguar ITLB do not support 32M/256M pages.
54 static int panther_only
= 0;
56 static uint_t pan_disable_large_pages
= (1 << TTE256M
);
57 static uint_t chjag_disable_large_pages
= ((1 << TTE32M
) | (1 << TTE256M
));
59 static uint_t mmu_disable_ism_large_pages
= ((1 << TTE64K
) |
60 (1 << TTE512K
) | (1 << TTE32M
) | (1 << TTE256M
));
61 static uint_t mmu_disable_auto_data_large_pages
= ((1 << TTE64K
) |
62 (1 << TTE512K
) | (1 << TTE32M
) | (1 << TTE256M
));
63 static uint_t mmu_disable_auto_text_large_pages
= ((1 << TTE64K
) |
64 (1 << TTE512K
) | (1 << TTE32M
) | (1 << TTE256M
));
67 * The function returns the USIII+(i)-IV+ mmu-specific values for the
68 * hat's disable_large_pages and disable_ism_large_pages variables.
69 * Currently the hat's disable_large_pages and disable_ism_large_pages
70 * already contain the generic sparc 4 page size info, and the return
71 * values are or'd with those values.
74 mmu_large_pages_disabled(uint_t flag
)
76 uint_t pages_disable
= 0;
77 extern int use_text_pgsz64K
;
78 extern int use_text_pgsz512K
;
80 if (flag
== HAT_LOAD
) {
82 pages_disable
= pan_disable_large_pages
;
84 pages_disable
= chjag_disable_large_pages
;
86 } else if (flag
== HAT_LOAD_SHARE
) {
87 pages_disable
= mmu_disable_ism_large_pages
;
88 } else if (flag
== HAT_AUTO_DATA
) {
89 pages_disable
= mmu_disable_auto_data_large_pages
;
90 } else if (flag
== HAT_AUTO_TEXT
) {
91 pages_disable
= mmu_disable_auto_text_large_pages
;
92 if (use_text_pgsz512K
) {
93 pages_disable
&= ~(1 << TTE512K
);
95 if (use_text_pgsz64K
) {
96 pages_disable
&= ~(1 << TTE64K
);
99 return (pages_disable
);
102 #if defined(CPU_IMP_DUAL_PAGESIZE)
104 * If a platform is running with only Ch+ or Jaguar, and then someone DR's
105 * in a Panther board, the Panther mmu will not like it if one of the already
106 * running threads is context switched to the Panther and tries to program
107 * a 512K or 4M page into the T512_1. So make these platforms pay the price
108 * and follow the Panther DTLB restrictions by default. :)
109 * The mmu_init_mmu_page_sizes code below takes care of heterogeneous
110 * platforms that don't support DR, like daktari.
112 * The effect of these restrictions is to limit the allowable values in
113 * sfmmu_pgsz[0] and sfmmu_pgsz[1], since these hat variables are used in
114 * mmu_set_ctx_page_sizes to set up the values in the sfmmu_cext that
115 * are used at context switch time. The value in sfmmu_pgsz[0] is used in
116 * P_pgsz0 and sfmmu_pgsz[1] is used in P_pgsz1, as per Figure F-1-1
117 * IMMU and DMMU Primary Context Register in the Panther Implementation
118 * Supplement and Table 15-21 DMMU Primary Context Register in the
119 * Cheetah+ Delta PRM.
121 #ifdef MIXEDCPU_DR_SUPPORTED
122 int panther_dtlb_restrictions
= 1;
124 int panther_dtlb_restrictions
= 0;
125 #endif /* MIXEDCPU_DR_SUPPORTED */
128 * init_mmu_page_sizes is set to one after the bootup time initialization
129 * via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a
132 int init_mmu_page_sizes
= 0;
135 * mmu_init_large_pages is called with the desired ism_pagesize parameter,
136 * for Panther-only systems. It may be called from set_platform_defaults,
137 * if some value other than 4M is desired, for Panther-only systems.
138 * mmu_ism_pagesize is the tunable. If it has a bad value, then only warn,
139 * since it would be bad form to panic due
142 * The function re-initializes the disable_ism_large_pages and
143 * pan_disable_large_pages variables, which are closely related.
144 * Aka, if 32M is the desired [D]ISM page sizes, then 256M cannot be allowed
145 * for non-ISM large page usage, or DTLB conflict will occur. Please see the
146 * Panther PRM for additional DTLB technical info.
149 mmu_init_large_pages(size_t ism_pagesize
)
151 if (cpu_impl_dual_pgsz
== 0) { /* disable_dual_pgsz flag */
152 pan_disable_large_pages
= ((1 << TTE32M
) | (1 << TTE256M
));
153 mmu_disable_ism_large_pages
= ((1 << TTE64K
) |
154 (1 << TTE512K
) | (1 << TTE32M
) | (1 << TTE256M
));
155 mmu_disable_auto_data_large_pages
= ((1 << TTE64K
) |
156 (1 << TTE512K
) | (1 << TTE32M
) | (1 << TTE256M
));
160 switch (ism_pagesize
) {
162 pan_disable_large_pages
= (1 << TTE256M
);
163 mmu_disable_ism_large_pages
= ((1 << TTE64K
) |
164 (1 << TTE512K
) | (1 << TTE32M
) | (1 << TTE256M
));
165 mmu_disable_auto_data_large_pages
= ((1 << TTE64K
) |
166 (1 << TTE512K
) | (1 << TTE32M
) | (1 << TTE256M
));
168 case MMU_PAGESIZE32M
:
169 pan_disable_large_pages
= (1 << TTE256M
);
170 mmu_disable_ism_large_pages
= ((1 << TTE64K
) |
171 (1 << TTE512K
) | (1 << TTE256M
));
172 mmu_disable_auto_data_large_pages
= ((1 << TTE64K
) |
173 (1 << TTE512K
) | (1 << TTE4M
) | (1 << TTE256M
));
174 adjust_data_maxlpsize(ism_pagesize
);
176 case MMU_PAGESIZE256M
:
177 pan_disable_large_pages
= (1 << TTE32M
);
178 mmu_disable_ism_large_pages
= ((1 << TTE64K
) |
179 (1 << TTE512K
) | (1 << TTE32M
));
180 mmu_disable_auto_data_large_pages
= ((1 << TTE64K
) |
181 (1 << TTE512K
) | (1 << TTE4M
) | (1 << TTE32M
));
182 adjust_data_maxlpsize(ism_pagesize
);
185 cmn_err(CE_WARN
, "Unrecognized mmu_ism_pagesize value 0x%lx",
192 * Re-initialize mmu_page_sizes and friends, for Panther mmu support.
193 * Called during very early bootup from check_cpus_set().
194 * Can be called to verify that mmu_page_sizes are set up correctly.
195 * Note that ncpus is not initialized at this point in the bootup sequence.
198 mmu_init_mmu_page_sizes(int cinfo
)
200 int npanther
= cinfo
;
202 if (!init_mmu_page_sizes
) {
203 if (npanther
== ncpunode
) {
204 mmu_page_sizes
= MMU_PAGE_SIZES
;
205 mmu_hashcnt
= MAX_HASHCNT
;
206 mmu_ism_pagesize
= DEFAULT_ISM_PAGESIZE
;
207 mmu_exported_pagesize_mask
= (1 << TTE8K
) |
208 (1 << TTE64K
) | (1 << TTE512K
) | (1 << TTE4M
) |
209 (1 << TTE32M
) | (1 << TTE256M
);
210 panther_dtlb_restrictions
= 1;
212 } else if (npanther
> 0) {
213 panther_dtlb_restrictions
= 1;
215 init_mmu_page_sizes
= 1;
222 /* Cheetah+ and later worst case DTLB parameters */
223 #ifndef LOCKED_DTLB_ENTRIES
224 #define LOCKED_DTLB_ENTRIES 5 /* 2 user TSBs, 2 nucleus, + OBP */
226 #define TOTAL_DTLB_ENTRIES 16
227 #define AVAIL_32M_ENTRIES 0
228 #define AVAIL_256M_ENTRIES 0
229 #define AVAIL_DTLB_ENTRIES (TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES)
230 static uint64_t ttecnt_threshold
[MMU_PAGE_SIZES
] = {
231 AVAIL_DTLB_ENTRIES
, AVAIL_DTLB_ENTRIES
,
232 AVAIL_DTLB_ENTRIES
, AVAIL_DTLB_ENTRIES
,
233 AVAIL_32M_ENTRIES
, AVAIL_256M_ENTRIES
};
236 * The purpose of this code is to indirectly reorganize the sfmmu_pgsz array
237 * in order to handle the Panther mmu DTLB requirements. Panther only supports
238 * the 32M/256M pages in the T512_1 and not in the T16, so the Panther cpu
239 * can only support one of the two largest page sizes at a time (efficiently).
240 * Panther only supports 512K and 4M pages in the T512_0, and 32M/256M pages
241 * in the T512_1. So check the sfmmu flags and ttecnt before enabling
242 * the T512_1 for 32M or 256M page sizes, and make sure that 512K and 4M
243 * requests go to the T512_0.
245 * The tmp_pgsz array comes into this routine in sorted order, as it is
246 * sorted from largest to smallest #pages per pagesize in use by the hat code,
247 * and leaves with the Panther mmu DTLB requirements satisfied. Note that
248 * when the array leaves this function it may not contain all of the page
249 * size codes that it had coming into the function.
251 * Note that for DISM the flag can be set but the ttecnt can be 0, if we
252 * didn't fault any pages in. This allows the t512_1 to be reprogrammed,
253 * because the T16 does not support the two giant page sizes. ouch.
256 mmu_fixup_large_pages(struct hat
*hat
, uint64_t *ttecnt
, uint8_t *tmp_pgsz
)
258 uint_t pgsz0
= tmp_pgsz
[0];
259 uint_t pgsz1
= tmp_pgsz
[1];
263 * Don't program 2nd dtlb for kernel and ism hat
265 ASSERT(hat
->sfmmu_ismhat
== 0);
266 ASSERT(hat
!= ksfmmup
);
267 ASSERT(cpu_impl_dual_pgsz
== 1);
269 ASSERT(!SFMMU_TTEFLAGS_ISSET(hat
, HAT_32M_FLAG
) ||
270 !SFMMU_TTEFLAGS_ISSET(hat
, HAT_256M_FLAG
));
271 ASSERT(!SFMMU_TTEFLAGS_ISSET(hat
, HAT_256M_FLAG
) ||
272 !SFMMU_TTEFLAGS_ISSET(hat
, HAT_32M_FLAG
));
273 ASSERT(!SFMMU_FLAGS_ISSET(hat
, HAT_32M_ISM
) ||
274 !SFMMU_FLAGS_ISSET(hat
, HAT_256M_ISM
));
275 ASSERT(!SFMMU_FLAGS_ISSET(hat
, HAT_256M_ISM
) ||
276 !SFMMU_FLAGS_ISSET(hat
, HAT_32M_ISM
));
278 if (SFMMU_TTEFLAGS_ISSET(hat
, HAT_32M_FLAG
) ||
279 (ttecnt
[TTE32M
] != 0) ||
280 SFMMU_FLAGS_ISSET(hat
, HAT_32M_ISM
)) {
287 } else if (SFMMU_TTEFLAGS_ISSET(hat
, HAT_256M_FLAG
) ||
288 (ttecnt
[TTE256M
] != 0) ||
289 SFMMU_FLAGS_ISSET(hat
, HAT_256M_ISM
)) {
293 if (pgsz0
== TTE256M
)
296 } else if ((pgsz1
== TTE512K
) || (pgsz1
== TTE4M
)) {
297 if ((pgsz0
!= TTE512K
) && (pgsz0
!= TTE4M
)) {
302 pgsz1
= page_szc(MMU_PAGESIZE
);
306 * This implements PAGESIZE programming of the T8s
307 * if large TTE counts don't exceed the thresholds.
309 if (ttecnt
[pgsz0
] < ttecnt_threshold
[pgsz0
])
310 pgsz0
= page_szc(MMU_PAGESIZE
);
311 if (ttecnt
[pgsz1
] < ttecnt_threshold
[pgsz1
])
312 pgsz1
= page_szc(MMU_PAGESIZE
);
318 * Function to set up the page size values used to reprogram the DTLBs,
319 * when page sizes used by a process change significantly.
322 mmu_setup_page_sizes(struct hat
*hat
, uint64_t *ttecnt
, uint8_t *tmp_pgsz
)
327 * Don't program 2nd dtlb for kernel and ism hat
329 ASSERT(hat
->sfmmu_ismhat
== NULL
);
330 ASSERT(hat
!= ksfmmup
);
332 if (cpu_impl_dual_pgsz
== 0) /* disable_dual_pgsz flag */
336 * hat->sfmmu_pgsz[] is an array whose elements
337 * contain a sorted order of page sizes. Element
338 * 0 is the most commonly used page size, followed
339 * by element 1, and so on.
341 * ttecnt[] is an array of per-page-size page counts
342 * mapped into the process.
344 * If the HAT's choice for page sizes is unsuitable,
345 * we can override it here. The new values written
346 * to the array will be handed back to us later to
347 * do the actual programming of the TLB hardware.
349 * The policy we use for programming the dual T8s on
350 * Cheetah+ and beyond is as follows:
352 * We have two programmable TLBs, so we look at
353 * the two most common page sizes in the array, which
354 * have already been computed for us by the HAT.
355 * If the TTE count of either of a preferred page size
356 * exceeds the number of unlocked T16 entries,
357 * we reprogram one of the T8s to that page size
358 * to avoid thrashing in the T16. Else we program
359 * that T8 to the base page size. Note that we do
360 * not force either T8 to be the base page size if a
361 * process is using more than two page sizes. Policy
362 * decisions about which page sizes are best to use are
363 * left to the upper layers.
365 * Note that for Panther, 4M and 512K pages need to be
366 * programmed into T512_0, and 32M and 256M into T512_1,
367 * so we don't want to go through the MIN/MAX code.
368 * For partial-Panther systems, we still want to make sure
369 * that 4M and 512K page sizes NEVER get into the T512_1.
370 * Since the DTLB flags are not set up on a per-cpu basis,
371 * Panther rules must be applied for mixed Panther/Cheetah+/
372 * Jaguar configurations.
374 if (panther_dtlb_restrictions
) {
375 if ((tmp_pgsz
[1] == TTE512K
) || (tmp_pgsz
[1] == TTE4M
)) {
376 if ((tmp_pgsz
[0] != TTE512K
) &&
377 (tmp_pgsz
[0] != TTE4M
)) {
382 pgsz1
= page_szc(MMU_PAGESIZE
);
389 pgsz0
= MIN(tmp_pgsz
[0], tmp_pgsz
[1]);
390 pgsz1
= MAX(tmp_pgsz
[0], tmp_pgsz
[1]);
394 * This implements PAGESIZE programming of the T8s
395 * if large TTE counts don't exceed the thresholds.
397 if (ttecnt
[pgsz0
] < ttecnt_threshold
[pgsz0
])
398 pgsz0
= page_szc(MMU_PAGESIZE
);
399 if (ttecnt
[pgsz1
] < ttecnt_threshold
[pgsz1
])
400 pgsz1
= page_szc(MMU_PAGESIZE
);
406 * The HAT calls this function when an MMU context is allocated so that we
407 * can reprogram the large TLBs appropriately for the new process using
410 * The caller must hold the HAT lock.
413 mmu_set_ctx_page_sizes(struct hat
*hat
)
418 ASSERT(sfmmu_hat_lock_held(hat
));
419 ASSERT(hat
!= ksfmmup
);
421 if (cpu_impl_dual_pgsz
== 0) /* disable_dual_pgsz flag */
425 * If supported, reprogram the TLBs to a larger pagesize.
427 pgsz0
= hat
->sfmmu_pgsz
[0];
428 pgsz1
= hat
->sfmmu_pgsz
[1];
429 ASSERT(pgsz0
< mmu_page_sizes
);
430 ASSERT(pgsz1
< mmu_page_sizes
);
432 if (panther_dtlb_restrictions
) {
433 ASSERT(pgsz1
!= TTE512K
);
434 ASSERT(pgsz1
!= TTE4M
);
437 ASSERT(pgsz0
!= TTE32M
);
438 ASSERT(pgsz0
!= TTE256M
);
441 new_cext
= TAGACCEXT_MKSZPAIR(pgsz1
, pgsz0
);
442 if (hat
->sfmmu_cext
!= new_cext
) {
446 * assert cnum should be invalid, this is because pagesize
447 * can only be changed after a proc's ctxs are invalidated.
449 for (i
= 0; i
< max_mmu_ctxdoms
; i
++) {
450 ASSERT(hat
->sfmmu_ctxs
[i
].cnum
== INVALID_CONTEXT
);
453 hat
->sfmmu_cext
= new_cext
;
457 * sfmmu_setctx_sec() will take care of the
458 * rest of the chores reprogramming the hat->sfmmu_cext
459 * page size values into the DTLBs.
464 * This function assumes that there are either four or six supported page
465 * sizes and at most two programmable TLBs, so we need to decide which
466 * page sizes are most important and then adjust the TLB page sizes
467 * accordingly (if supported).
469 * If these assumptions change, this function will need to be
470 * updated to support whatever the new limits are.
473 mmu_check_page_sizes(sfmmu_t
*sfmmup
, uint64_t *ttecnt
)
475 uint64_t sortcnt
[MMU_PAGE_SIZES
];
476 uint8_t tmp_pgsz
[MMU_PAGE_SIZES
];
478 uint16_t oldval
, newval
;
481 * We only consider reprogramming the TLBs if one or more of
482 * the two most used page sizes changes and we're using
483 * large pages in this process, except for Panther 32M/256M pages,
484 * which the Panther T16 does not support.
486 if (SFMMU_LGPGS_INUSE(sfmmup
)) {
487 /* Sort page sizes. */
488 for (i
= 0; i
< mmu_page_sizes
; i
++) {
489 sortcnt
[i
] = ttecnt
[i
];
491 for (j
= 0; j
< mmu_page_sizes
; j
++) {
492 for (i
= mmu_page_sizes
- 1, max
= 0; i
> 0; i
--) {
493 if (sortcnt
[i
] > sortcnt
[max
])
501 * Handle Panther page dtlb calcs separately. The check
502 * for actual or potential 32M/256M pages must occur
503 * every time due to lack of T16 support for them.
504 * The sort works fine for Ch+/Jag, but Panther has
505 * pagesize restrictions for both DTLBs.
507 oldval
= sfmmup
->sfmmu_pgsz
[0] << 8 | sfmmup
->sfmmu_pgsz
[1];
510 mmu_fixup_large_pages(sfmmup
, ttecnt
, tmp_pgsz
);
512 /* Check 2 largest values after the sort. */
513 mmu_setup_page_sizes(sfmmup
, ttecnt
, tmp_pgsz
);
515 newval
= tmp_pgsz
[0] << 8 | tmp_pgsz
[1];
516 if (newval
!= oldval
) {
517 sfmmu_reprog_pgsz_arr(sfmmup
, tmp_pgsz
);
522 #endif /* CPU_IMP_DUAL_PAGESIZE */
524 struct heap_lp_page_size
{
530 struct heap_lp_page_size heap_lp_pgsz
[] = {
532 {CHEETAH_IMPL
, TTE8K
, 0}, /* default */
533 {CHEETAH_IMPL
, TTE64K
, 0},
534 {CHEETAH_IMPL
, TTE4M
, 0},
536 { CHEETAH_PLUS_IMPL
, TTE4M
, 1 }, /* default */
537 { CHEETAH_PLUS_IMPL
, TTE4M
, 0 },
538 { CHEETAH_PLUS_IMPL
, TTE64K
, 1 },
539 { CHEETAH_PLUS_IMPL
, TTE64K
, 0 },
540 { CHEETAH_PLUS_IMPL
, TTE8K
, 0 },
542 { JALAPENO_IMPL
, TTE4M
, 1 }, /* default */
543 { JALAPENO_IMPL
, TTE4M
, 0 },
544 { JALAPENO_IMPL
, TTE64K
, 1 },
545 { JALAPENO_IMPL
, TTE64K
, 0 },
546 { JALAPENO_IMPL
, TTE8K
, 0 },
548 { JAGUAR_IMPL
, TTE4M
, 1 }, /* default */
549 { JAGUAR_IMPL
, TTE4M
, 0 },
550 { JAGUAR_IMPL
, TTE64K
, 1 },
551 { JAGUAR_IMPL
, TTE64K
, 0 },
552 { JAGUAR_IMPL
, TTE8K
, 0 },
554 { SERRANO_IMPL
, TTE4M
, 1 }, /* default */
555 { SERRANO_IMPL
, TTE4M
, 0 },
556 { SERRANO_IMPL
, TTE64K
, 1 },
557 { SERRANO_IMPL
, TTE64K
, 0 },
558 { SERRANO_IMPL
, TTE8K
, 0 },
560 { PANTHER_IMPL
, TTE4M
, 1 }, /* default */
561 { PANTHER_IMPL
, TTE4M
, 0 },
562 { PANTHER_IMPL
, TTE64K
, 1 },
563 { PANTHER_IMPL
, TTE64K
, 0 },
564 { PANTHER_IMPL
, TTE8K
, 0 }
567 int heaplp_use_dt512
= -1;
570 mmu_init_kernel_pgsz(struct hat
*hat
)
572 uint_t tte
= page_szc(segkmem_lpsize
);
573 uchar_t new_cext_primary
, new_cext_nucleus
;
575 if (heaplp_use_dt512
== 0 || tte
> TTE4M
) {
576 /* do not reprogram dt512 tlb */
580 new_cext_nucleus
= TAGACCEXT_MKSZPAIR(tte
, TTE8K
);
581 new_cext_primary
= TAGACCEXT_MKSZPAIR(TTE8K
, tte
);
583 hat
->sfmmu_cext
= new_cext_primary
;
584 kcontextreg
= ((uint64_t)new_cext_nucleus
<< CTXREG_NEXT_SHIFT
) |
585 ((uint64_t)new_cext_primary
<< CTXREG_EXT_SHIFT
);
589 mmu_get_kernel_lpsize(size_t lpsize
)
591 struct heap_lp_page_size
*p_lpgsz
, *pend_lpgsz
;
592 int impl
= cpunodes
[getprocessorid()].implementation
;
595 if (cpu_impl_dual_pgsz
== 0) {
596 heaplp_use_dt512
= 0;
597 return (MMU_PAGESIZE
);
600 pend_lpgsz
= (struct heap_lp_page_size
*)
601 ((char *)heap_lp_pgsz
+ sizeof (heap_lp_pgsz
));
603 /* search for a valid segkmem_lpsize */
604 for (p_lpgsz
= heap_lp_pgsz
; p_lpgsz
< pend_lpgsz
; p_lpgsz
++) {
605 if (impl
!= p_lpgsz
->impl
)
610 * no setting for segkmem_lpsize in /etc/system
611 * use default from the table
614 heaplp_use_dt512
= p_lpgsz
->use_dt512
;
618 if (lpsize
== TTEBYTES(p_lpgsz
->tte
) &&
619 (heaplp_use_dt512
== -1 ||
620 heaplp_use_dt512
== p_lpgsz
->use_dt512
)) {
623 heaplp_use_dt512
= p_lpgsz
->use_dt512
;
630 if (p_lpgsz
== pend_lpgsz
) {
631 /* nothing found: disable large page kernel heap */
633 heaplp_use_dt512
= 0;
636 lpsize
= TTEBYTES(tte
);