1 /* $Id: init.c,v 1.135 1999/09/06 22:55:10 ecd Exp $
2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/config.h>
9 #include <linux/string.h>
10 #include <linux/init.h>
12 #include <linux/malloc.h>
13 #include <linux/blk.h>
14 #include <linux/swap.h>
15 #include <linux/swapctl.h>
18 #include <asm/system.h>
20 #include <asm/pgtable.h>
21 #include <asm/oplib.h>
22 #include <asm/iommu.h>
24 #include <asm/uaccess.h>
25 #include <asm/mmu_context.h>
26 #include <asm/vaddrs.h>
29 /* Turn this off if you suspect some place in some physical memory hole
30 might get into page tables (something would be broken very much). */
32 #define FREE_UNUSED_MEM_MAP
34 extern void show_net_buffers(void);
35 extern unsigned long device_scan(unsigned long);
37 struct sparc_phys_banks sp_banks
[SPARC_PHYS_BANKS
];
39 unsigned long *sparc64_valid_addr_bitmap
;
41 /* Ugly, but necessary... -DaveM */
42 unsigned long phys_base
;
44 /* get_new_mmu_context() uses "cache + 1". */
45 spinlock_t ctx_alloc_lock
= SPIN_LOCK_UNLOCKED
;
46 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
- 1;
47 #define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
48 unsigned long mmu_context_bmap
[CTX_BMAP_SLOTS
];
50 /* References to section boundaries */
51 extern char __init_begin
, __init_end
, etext
, __bss_start
;
53 int do_check_pgt_cache(int low
, int high
)
57 if(pgtable_cache_size
> high
) {
61 free_pgd_slow(get_pgd_fast()), freed
++;
64 free_pte_slow(get_pte_fast()), freed
++;
65 } while(pgtable_cache_size
> low
);
68 if (pgd_cache_size
> high
/ 4) {
69 struct page
*page
, *page2
;
70 for (page2
= NULL
, page
= (struct page
*)pgd_quicklist
; page
;) {
71 if ((unsigned long)page
->pprev_hash
== 3) {
73 page2
->next_hash
= page
->next_hash
;
75 (struct page
*)pgd_quicklist
= page
->next_hash
;
76 page
->next_hash
= NULL
;
77 page
->pprev_hash
= NULL
;
82 page
= page2
->next_hash
;
84 page
= (struct page
*)pgd_quicklist
;
85 if (pgd_cache_size
<= low
/ 4)
90 page
= page
->next_hash
;
98 * BAD_PAGE is the page that is used for page faults when linux
99 * is out-of-memory. Older versions of linux just did a
100 * do_exit(), but using this instead means there is less risk
101 * for a process dying in kernel mode, possibly leaving an inode
104 * BAD_PAGETABLE is the accompanying page-table: it is initialized
105 * to point to BAD_PAGE entries.
107 * ZERO_PAGE is a special page that is used for zero-initialized
110 pte_t
__bad_page(void)
112 memset((void *) &empty_bad_page
, 0, PAGE_SIZE
);
113 return pte_mkdirty(mk_pte((((unsigned long) &empty_bad_page
)
114 - ((unsigned long)&empty_zero_page
) + phys_base
+ PAGE_OFFSET
),
120 int free
= 0,total
= 0,reserved
= 0;
121 int shared
= 0, cached
= 0;
122 struct page
*page
, *end
;
124 printk("\nMem-info:\n");
126 printk("Free swap: %6dkB\n",nr_swap_pages
<<(PAGE_SHIFT
-10));
127 for (page
= mem_map
, end
= mem_map
+ max_mapnr
;
128 page
< end
; page
++) {
129 if (PageSkip(page
)) {
130 if (page
->next_hash
< page
)
132 page
= page
->next_hash
;
135 if (PageReserved(page
))
137 else if (PageSwapCache(page
))
139 else if (!atomic_read(&page
->count
))
142 shared
+= atomic_read(&page
->count
) - 1;
144 printk("%d pages of RAM\n",total
);
145 printk("%d free pages\n",free
);
146 printk("%d reserved pages\n",reserved
);
147 printk("%d pages shared\n",shared
);
148 printk("%d pages swap cached\n",cached
);
149 printk("%d pages in page table cache\n",pgtable_cache_size
);
151 printk("%d entries in page dir cache\n",pgd_cache_size
);
158 /* IOMMU support, the ideas are right, the code should be cleaned a bit still... */
160 /* This keeps track of pages used in sparc_alloc_dvma() invocations. */
161 /* NOTE: All of these are inited to 0 in bss, don't need to make data segment bigger */
162 #define DVMAIO_SIZE 0x2000000
163 static unsigned long dvma_map_pages
[DVMAIO_SIZE
>> 16];
164 static unsigned long dvma_pages_current_offset
;
165 static int dvma_pages_current_index
;
166 static unsigned long dvmaiobase
= 0;
167 static unsigned long dvmaiosz __initdata
= 0;
169 void __init
dvmaio_init(void)
174 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++)
175 if (sp_banks
[i
].base_addr
+ sp_banks
[i
].num_bytes
> dvmaiobase
)
176 dvmaiobase
= sp_banks
[i
].base_addr
+ sp_banks
[i
].num_bytes
;
178 /* We map directly phys_base to phys_base+(4GB-DVMAIO_SIZE). */
179 dvmaiobase
-= phys_base
;
181 dvmaiobase
= (dvmaiobase
+ DVMAIO_SIZE
+ 0x400000 - 1) & ~(0x400000 - 1);
182 for (i
= 0; i
< 6; i
++)
183 if (dvmaiobase
<= ((1024L * 64 * 1024) << i
))
185 dvmaiobase
= ((1024L * 64 * 1024) << i
) - DVMAIO_SIZE
;
190 void __init
iommu_init(int iommu_node
, struct linux_sbus
*sbus
)
192 extern int this_is_starfire
;
193 extern void *starfire_hookup(int);
194 struct iommu_struct
*iommu
;
195 struct sysio_regs
*sregs
;
196 struct linux_prom64_registers rprop
;
197 unsigned long impl
, vers
;
198 unsigned long control
, tsbbase
;
199 unsigned long tsbbases
[32];
200 unsigned long *iopte
;
204 err
= prom_getproperty(iommu_node
, "reg", (char *)&rprop
,
207 prom_printf("iommu_init: Cannot map SYSIO control registers.\n");
210 sregs
= (struct sysio_regs
*) __va(rprop
.phys_addr
);
213 prom_printf("iommu_init: Fatal error, sysio regs not mapped\n");
217 iommu
= kmalloc(sizeof(struct iommu_struct
), GFP_ATOMIC
);
219 prom_printf("iommu_init: Fatal error, kmalloc(iommu) failed\n");
223 spin_lock_init(&iommu
->iommu_lock
);
224 iommu
->sysio_regs
= sregs
;
227 control
= sregs
->iommu_control
;
228 impl
= (control
& IOMMU_CTRL_IMPL
) >> 60;
229 vers
= (control
& IOMMU_CTRL_VERS
) >> 56;
230 printk("IOMMU(SBUS): IMPL[%x] VERS[%x] SYSIO mapped at %016lx\n",
231 (unsigned int) impl
, (unsigned int)vers
, (unsigned long) sregs
);
233 /* Streaming buffer is unreliable on VERS 0 of SYSIO,
234 * although such parts were never shipped in production
235 * Sun hardware, I check just to be robust. --DaveM
237 vers
= ((sregs
->control
& SYSIO_CONTROL_VER
) >> 56);
239 iommu
->strbuf_enabled
= 0;
241 iommu
->strbuf_enabled
= 1;
243 control
&= ~(IOMMU_CTRL_TSBSZ
);
244 control
|= ((IOMMU_TSBSZ_2K
* dvmaiosz
) | IOMMU_CTRL_TBWSZ
| IOMMU_CTRL_ENAB
);
246 /* Use only 64k pages, things are layed out in the 32-bit SBUS
247 * address space like this:
249 * 0x00000000 ----------------------------------------
250 * | Direct physical mappings for most |
251 * | DVMA to paddr's within this range |
252 * dvmaiobase ----------------------------------------
253 * | For mappings requested via |
254 * | sparc_alloc_dvma() |
255 * dvmaiobase+32M ----------------------------------------
257 * NOTE: we need to order 2 contiguous order 5, that's the largest
258 * chunk page_alloc will give us. -JJ */
261 memset (tsbbases
, 0, sizeof(tsbbases
));
262 for (i
= 0; i
< 32; i
++) {
263 tsbbases
[i
] = __get_free_pages(GFP_DMA
, 5);
264 for (j
= 0; j
< i
; j
++)
265 if (tsbbases
[j
] == tsbbases
[i
] + 32768*sizeof(iopte_t
)) {
266 tsbbase
= tsbbases
[i
];
268 } else if (tsbbases
[i
] == tsbbases
[j
] + 32768*sizeof(iopte_t
)) {
269 tsbbase
= tsbbases
[j
];
278 for (i
= 0; i
< 32; i
++)
280 free_pages(tsbbases
[i
], 5);
282 tsbbase
= __get_free_pages(GFP_DMA
, dvmaiosz
);
284 prom_printf("Strange. Could not allocate 512K of contiguous RAM.\n");
287 iommu
->page_table
= (iopte_t
*) tsbbase
;
288 iopte
= (unsigned long *) tsbbase
;
290 /* Setup aliased mappings... */
291 for(i
= 0; i
< (dvmaiobase
>> 16); i
++) {
292 unsigned long val
= ((((unsigned long)i
) << 16UL) + phys_base
);
294 val
|= IOPTE_VALID
| IOPTE_64K
| IOPTE_WRITE
;
295 if (iommu
->strbuf_enabled
)
303 /* Clear all sparc_alloc_dvma() maps. */
304 for( ; i
< ((dvmaiobase
+ DVMAIO_SIZE
) >> 16); i
++)
307 sregs
->iommu_tsbbase
= __pa(tsbbase
);
308 sregs
->iommu_control
= control
;
310 /* Get the streaming buffer going. */
311 control
= sregs
->sbuf_control
;
312 impl
= (control
& SYSIO_SBUFCTRL_IMPL
) >> 60;
313 vers
= (control
& SYSIO_SBUFCTRL_REV
) >> 56;
314 printk("IOMMU: Streaming Buffer IMPL[%x] REV[%x] ... ",
315 (unsigned int)impl
, (unsigned int)vers
);
316 iommu
->flushflag
= 0;
318 if (iommu
->strbuf_enabled
!= 0) {
319 sregs
->sbuf_control
= (control
| SYSIO_SBUFCTRL_SB_EN
);
322 sregs
->sbuf_control
= (control
& ~(SYSIO_SBUFCTRL_SB_EN
));
323 printk("DISABLED\n");
326 /* Finally enable DVMA arbitration for all devices, just in case. */
327 sregs
->sbus_control
|= SYSIO_SBCNTRL_AEN
;
329 /* If necessary, hook us up for starfire IRQ translations. */
330 sbus
->upaid
= prom_getintdefault(sbus
->prom_node
, "upa-portid", -1);
332 sbus
->starfire_cookie
= starfire_hookup(sbus
->upaid
);
334 sbus
->starfire_cookie
= NULL
;
337 void mmu_map_dma_area(unsigned long addr
, int len
, __u32
*dvma_addr
,
338 struct linux_sbus
*sbus
)
344 /* Find out if we need to grab some pages. */
345 if(!dvma_map_pages
[dvma_pages_current_index
] ||
346 ((dvma_pages_current_offset
+ len
) > (1 << 16))) {
347 struct linux_sbus
*sbus
;
348 unsigned long *iopte
;
349 unsigned long newpages
= __get_free_pages(GFP_KERNEL
, 3);
353 panic("AIEEE cannot get DVMA pages.");
355 memset((char *)newpages
, 0, (1 << 16));
357 if(!dvma_map_pages
[dvma_pages_current_index
]) {
358 dvma_map_pages
[dvma_pages_current_index
] = newpages
;
359 i
= dvma_pages_current_index
;
361 dvma_map_pages
[dvma_pages_current_index
+ 1] = newpages
;
362 i
= dvma_pages_current_index
+ 1;
365 /* Stick it in the IOMMU. */
366 i
= (dvmaiobase
>> 16) + i
;
367 for_each_sbus(sbus
) {
368 struct iommu_struct
*iommu
= sbus
->iommu
;
371 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
372 iopte
= (unsigned long *)(iommu
->page_table
+ i
);
373 *iopte
= (IOPTE_VALID
| IOPTE_64K
| IOPTE_CACHE
| IOPTE_WRITE
);
374 *iopte
|= __pa(newpages
);
375 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
379 /* Get this out of the way. */
380 *dvma_addr
= (__u32
) ((dvmaiobase
) +
381 (dvma_pages_current_index
<< 16) +
382 (dvma_pages_current_offset
));
385 while((len
> 0) && (dvma_pages_current_offset
< (1 << 16))) {
387 unsigned long the_page
=
388 dvma_map_pages
[dvma_pages_current_index
] +
389 dvma_pages_current_offset
;
391 /* Map the CPU's view. */
392 pgdp
= pgd_offset(&init_mm
, addr
);
393 pmdp
= pmd_alloc_kernel(pgdp
, addr
);
394 ptep
= pte_alloc_kernel(pmdp
, addr
);
395 pte
= mk_pte(the_page
, PAGE_KERNEL
);
398 dvma_pages_current_offset
+= PAGE_SIZE
;
402 dvma_pages_current_index
++;
403 dvma_pages_current_offset
= 0;
407 __u32
mmu_get_scsi_one(char *vaddr
, unsigned long len
, struct linux_sbus
*sbus
)
409 struct iommu_struct
*iommu
= sbus
->iommu
;
410 struct sysio_regs
*sregs
= iommu
->sysio_regs
;
411 unsigned long start
= (unsigned long) vaddr
;
412 unsigned long end
= PAGE_ALIGN(start
+ len
);
413 unsigned long flags
, tmp
;
414 volatile u64
*sbctrl
= (volatile u64
*) &sregs
->sbus_control
;
417 if (end
> MAX_DMA_ADDRESS
) {
418 printk("mmu_get_scsi_one: Bogus DMA buffer address [%016lx:%d]\n",
419 (unsigned long) vaddr
, (int)len
);
420 panic("DMA address too large, tell DaveM");
423 if (iommu
->strbuf_enabled
) {
424 volatile u64
*sbuf_pflush
= (volatile u64
*) &sregs
->sbuf_pflush
;
426 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
427 iommu
->flushflag
= 0;
429 *sbuf_pflush
= start
;
432 sregs
->sbuf_fsync
= __pa(&(iommu
->flushflag
));
434 while(iommu
->flushflag
== 0)
436 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
439 return sbus_dvma_addr(vaddr
);
442 void mmu_release_scsi_one(u32 vaddr
, unsigned long len
, struct linux_sbus
*sbus
)
444 struct iommu_struct
*iommu
= sbus
->iommu
;
445 struct sysio_regs
*sregs
= iommu
->sysio_regs
;
446 unsigned long start
= (unsigned long) vaddr
;
447 unsigned long end
= PAGE_ALIGN(start
+ len
);
448 unsigned long flags
, tmp
;
449 volatile u64
*sbctrl
= (volatile u64
*) &sregs
->sbus_control
;
453 if (iommu
->strbuf_enabled
) {
454 volatile u64
*sbuf_pflush
= (volatile u64
*) &sregs
->sbuf_pflush
;
456 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
458 /* 1) Clear the flush flag word */
459 iommu
->flushflag
= 0;
461 /* 2) Tell the streaming buffer which entries
465 *sbuf_pflush
= start
;
469 /* 3) Initiate flush sequence. */
470 sregs
->sbuf_fsync
= __pa(&(iommu
->flushflag
));
472 /* 4) Guarentee completion of all previous writes
473 * by reading SYSIO's SBUS control register.
477 /* 5) Wait for flush flag to get set. */
478 while(iommu
->flushflag
== 0)
481 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
485 void mmu_get_scsi_sgl(struct mmu_sglist
*sg
, int sz
, struct linux_sbus
*sbus
)
487 struct iommu_struct
*iommu
= sbus
->iommu
;
488 struct sysio_regs
*sregs
= iommu
->sysio_regs
;
489 unsigned long flags
, tmp
;
490 volatile u64
*sbctrl
= (volatile u64
*) &sregs
->sbus_control
;
492 if (iommu
->strbuf_enabled
) {
493 volatile u64
*sbuf_pflush
= (volatile u64
*) &sregs
->sbuf_pflush
;
495 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
496 iommu
->flushflag
= 0;
499 unsigned long start
= (unsigned long)sg
[sz
].addr
;
500 unsigned long end
= PAGE_ALIGN(start
+ sg
[sz
].len
);
502 if (end
> MAX_DMA_ADDRESS
) {
503 printk("mmu_get_scsi_sgl: Bogus DMA buffer address "
504 "[%016lx:%d]\n", start
, (int) sg
[sz
].len
);
505 panic("DMA address too large, tell DaveM");
508 sg
[sz
--].dvma_addr
= sbus_dvma_addr(start
);
511 *sbuf_pflush
= start
;
516 sregs
->sbuf_fsync
= __pa(&(iommu
->flushflag
));
518 while(iommu
->flushflag
== 0)
520 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
522 /* Just verify the addresses and fill in the
523 * dvma_addr fields in this case.
526 unsigned long start
= (unsigned long)sg
[sz
].addr
;
527 unsigned long end
= PAGE_ALIGN(start
+ sg
[sz
].len
);
528 if (end
> MAX_DMA_ADDRESS
) {
529 printk("mmu_get_scsi_sgl: Bogus DMA buffer address "
530 "[%016lx:%d]\n", start
, (int) sg
[sz
].len
);
531 panic("DMA address too large, tell DaveM");
533 sg
[sz
--].dvma_addr
= sbus_dvma_addr(start
);
538 void mmu_release_scsi_sgl(struct mmu_sglist
*sg
, int sz
, struct linux_sbus
*sbus
)
540 struct iommu_struct
*iommu
= sbus
->iommu
;
541 struct sysio_regs
*sregs
= iommu
->sysio_regs
;
542 volatile u64
*sbctrl
= (volatile u64
*) &sregs
->sbus_control
;
543 unsigned long flags
, tmp
;
545 if (iommu
->strbuf_enabled
) {
546 volatile u64
*sbuf_pflush
= (volatile u64
*) &sregs
->sbuf_pflush
;
548 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
550 /* 1) Clear the flush flag word */
551 iommu
->flushflag
= 0;
553 /* 2) Tell the streaming buffer which entries
557 unsigned long start
= sg
[sz
].dvma_addr
;
558 unsigned long end
= PAGE_ALIGN(start
+ sg
[sz
].len
);
562 *sbuf_pflush
= start
;
568 /* 3) Initiate flush sequence. */
569 sregs
->sbuf_fsync
= __pa(&(iommu
->flushflag
));
571 /* 4) Guarentee completion of previous writes
572 * by reading SYSIO's SBUS control register.
576 /* 5) Wait for flush flag to get set. */
577 while(iommu
->flushflag
== 0)
580 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
584 void mmu_set_sbus64(struct linux_sbus_device
*sdev
, int bursts
)
586 struct linux_sbus
*sbus
= sdev
->my_bus
;
587 struct sysio_regs
*sregs
= sbus
->iommu
->sysio_regs
;
588 int slot
= sdev
->slot
;
594 cfg
= &sregs
->sbus_s0cfg
;
597 cfg
= &sregs
->sbus_s1cfg
;
600 cfg
= &sregs
->sbus_s2cfg
;
603 cfg
= &sregs
->sbus_s3cfg
;
607 cfg
= &sregs
->sbus_s4cfg
;
610 cfg
= &sregs
->sbus_s5cfg
;
613 cfg
= &sregs
->sbus_s6cfg
;
620 /* ETM already enabled? If so, we're done. */
622 if ((tmp
& SYSIO_SBSCFG_ETM
) != 0)
625 /* Set burst bits. */
626 if (bursts
& DMA_BURST8
)
627 tmp
|= SYSIO_SBSCFG_BA8
;
628 if (bursts
& DMA_BURST16
)
629 tmp
|= SYSIO_SBSCFG_BA16
;
630 if (bursts
& DMA_BURST32
)
631 tmp
|= SYSIO_SBSCFG_BA32
;
632 if (bursts
& DMA_BURST64
)
633 tmp
|= SYSIO_SBSCFG_BA64
;
635 /* Finally turn on ETM and set register. */
636 *cfg
= (tmp
| SYSIO_SBSCFG_ETM
);
639 int mmu_info(char *buf
)
641 /* We'll do the rest later to make it nice... -DaveM */
642 return sprintf(buf
, "MMU Type\t: Spitfire\n");
645 static unsigned long mempool
;
647 struct linux_prom_translation
{
653 static inline void inherit_prom_mappings(void)
655 struct linux_prom_translation
*trans
;
661 node
= prom_finddevice("/virtual-memory");
662 n
= prom_getproplen(node
, "translations");
663 if (n
== 0 || n
== -1) {
664 prom_printf("Couldn't get translation property\n");
668 for (i
= 1; i
< n
; i
<<= 1) /* empty */;
669 trans
= sparc_init_alloc(&mempool
, i
);
671 if (prom_getproperty(node
, "translations", (char *)trans
, i
) == -1) {
672 prom_printf("Couldn't get translation property\n");
675 n
= n
/ sizeof(*trans
);
677 for (i
= 0; i
< n
; i
++) {
680 if (trans
[i
].virt
>= 0xf0000000 && trans
[i
].virt
< 0x100000000) {
681 for (vaddr
= trans
[i
].virt
;
682 vaddr
< trans
[i
].virt
+ trans
[i
].size
;
683 vaddr
+= PAGE_SIZE
) {
684 pgdp
= pgd_offset(&init_mm
, vaddr
);
685 if (pgd_none(*pgdp
)) {
686 pmdp
= sparc_init_alloc(&mempool
,
688 memset(pmdp
, 0, PAGE_SIZE
);
691 pmdp
= pmd_offset(pgdp
, vaddr
);
692 if (pmd_none(*pmdp
)) {
693 ptep
= sparc_init_alloc(&mempool
,
697 ptep
= pte_offset(pmdp
, vaddr
);
698 set_pte (ptep
, __pte(trans
[i
].data
| _PAGE_MODIFIED
));
699 trans
[i
].data
+= PAGE_SIZE
;
705 /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
706 * upwards as reserved for use by the firmware (I wonder if this
707 * will be the same on Cheetah...). We use this virtual address
708 * range for the VPTE table mappings of the nucleus so we need
709 * to zap them when we enter the PROM. -DaveM
711 static void __flush_nucleus_vptes(void)
713 unsigned long prom_reserved_base
= 0xfffffffc00000000UL
;
716 /* Only DTLB must be checked for VPTE entries. */
717 for(i
= 0; i
< 63; i
++) {
718 unsigned long tag
= spitfire_get_dtlb_tag(i
);
720 if(((tag
& ~(PAGE_MASK
)) == 0) &&
721 ((tag
& (PAGE_MASK
)) >= prom_reserved_base
)) {
722 __asm__
__volatile__("stxa %%g0, [%0] %1"
724 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
726 spitfire_put_dtlb_data(i
, 0x0UL
);
732 static int prom_ditlb_set
= 0;
733 struct prom_tlb_entry
{
735 unsigned long tlb_tag
;
736 unsigned long tlb_data
;
738 struct prom_tlb_entry prom_itlb
[8], prom_dtlb
[8];
740 void prom_world(int enter
)
742 unsigned long pstate
;
746 set_fs(current
->thread
.current_ds
);
751 /* Make sure the following runs atomically. */
752 __asm__
__volatile__("flushw\n\t"
753 "rdpr %%pstate, %0\n\t"
754 "wrpr %0, %1, %%pstate"
759 /* Kick out nucleus VPTEs. */
760 __flush_nucleus_vptes();
762 /* Install PROM world. */
763 for (i
= 0; i
< 8; i
++) {
764 if (prom_dtlb
[i
].tlb_ent
!= -1) {
765 __asm__
__volatile__("stxa %0, [%1] %2"
766 : : "r" (prom_dtlb
[i
].tlb_tag
), "r" (TLB_TAG_ACCESS
),
769 spitfire_put_dtlb_data(prom_dtlb
[i
].tlb_ent
,
770 prom_dtlb
[i
].tlb_data
);
774 if (prom_itlb
[i
].tlb_ent
!= -1) {
775 __asm__
__volatile__("stxa %0, [%1] %2"
776 : : "r" (prom_itlb
[i
].tlb_tag
), "r" (TLB_TAG_ACCESS
),
779 spitfire_put_itlb_data(prom_itlb
[i
].tlb_ent
,
780 prom_itlb
[i
].tlb_data
);
785 for (i
= 0; i
< 8; i
++) {
786 if (prom_dtlb
[i
].tlb_ent
!= -1) {
787 __asm__
__volatile__("stxa %%g0, [%0] %1"
788 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
790 spitfire_put_dtlb_data(prom_dtlb
[i
].tlb_ent
, 0x0UL
);
793 if (prom_itlb
[i
].tlb_ent
!= -1) {
794 __asm__
__volatile__("stxa %%g0, [%0] %1"
795 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
797 spitfire_put_itlb_data(prom_itlb
[i
].tlb_ent
, 0x0UL
);
802 __asm__
__volatile__("wrpr %0, 0, %%pstate"
806 void inherit_locked_prom_mappings(int save_p
)
812 /* Fucking losing PROM has more mappings in the TLB, but
813 * it (conveniently) fails to mention any of these in the
814 * translations property. The only ones that matter are
815 * the locked PROM tlb entries, so we impose the following
816 * irrecovable rule on the PROM, it is allowed 8 locked
817 * entries in the ITLB and 8 in the DTLB.
819 * Supposedly the upper 16GB of the address space is
820 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
821 * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
822 * used between the client program and the firmware on sun5
823 * systems to coordinate mmu mappings is also COMPLETELY
824 * UNDOCUMENTED!!!!!! Thanks S(t)un!
827 for(i
= 0; i
< 8; i
++) {
828 prom_dtlb
[i
].tlb_ent
= -1;
829 prom_itlb
[i
].tlb_ent
= -1;
832 for(i
= 0; i
< 63; i
++) {
835 data
= spitfire_get_dtlb_data(i
);
836 if((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
837 unsigned long tag
= spitfire_get_dtlb_tag(i
);
840 prom_dtlb
[dtlb_seen
].tlb_ent
= i
;
841 prom_dtlb
[dtlb_seen
].tlb_tag
= tag
;
842 prom_dtlb
[dtlb_seen
].tlb_data
= data
;
844 __asm__
__volatile__("stxa %%g0, [%0] %1"
845 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
847 spitfire_put_dtlb_data(i
, 0x0UL
);
855 for(i
= 0; i
< 63; i
++) {
858 data
= spitfire_get_itlb_data(i
);
859 if((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
860 unsigned long tag
= spitfire_get_itlb_tag(i
);
863 prom_itlb
[itlb_seen
].tlb_ent
= i
;
864 prom_itlb
[itlb_seen
].tlb_tag
= tag
;
865 prom_itlb
[itlb_seen
].tlb_data
= data
;
867 __asm__
__volatile__("stxa %%g0, [%0] %1"
868 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
870 spitfire_put_itlb_data(i
, 0x0UL
);
882 /* Give PROM back his world, done during reboots... */
883 void prom_reload_locked(void)
887 for (i
= 0; i
< 8; i
++) {
888 if (prom_dtlb
[i
].tlb_ent
!= -1) {
889 __asm__
__volatile__("stxa %0, [%1] %2"
890 : : "r" (prom_dtlb
[i
].tlb_tag
), "r" (TLB_TAG_ACCESS
),
893 spitfire_put_dtlb_data(prom_dtlb
[i
].tlb_ent
,
894 prom_dtlb
[i
].tlb_data
);
898 if (prom_itlb
[i
].tlb_ent
!= -1) {
899 __asm__
__volatile__("stxa %0, [%1] %2"
900 : : "r" (prom_itlb
[i
].tlb_tag
), "r" (TLB_TAG_ACCESS
),
903 spitfire_put_itlb_data(prom_itlb
[i
].tlb_ent
,
904 prom_itlb
[i
].tlb_data
);
910 void __flush_dcache_range(unsigned long start
, unsigned long end
)
915 for (va
= start
; va
< end
; va
+= 32) {
916 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
922 void __flush_cache_all(void)
927 for(va
= 0; va
< (PAGE_SIZE
<< 1); va
+= 32)
928 spitfire_put_icache_tag(va
, 0x0);
931 /* If not locked, zap it. */
932 void __flush_tlb_all(void)
934 unsigned long pstate
;
937 __asm__
__volatile__("flushw\n\t"
938 "rdpr %%pstate, %0\n\t"
939 "wrpr %0, %1, %%pstate"
942 for(i
= 0; i
< 64; i
++) {
943 if(!(spitfire_get_dtlb_data(i
) & _PAGE_L
)) {
944 __asm__
__volatile__("stxa %%g0, [%0] %1"
946 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
948 spitfire_put_dtlb_data(i
, 0x0UL
);
951 if(!(spitfire_get_itlb_data(i
) & _PAGE_L
)) {
952 __asm__
__volatile__("stxa %%g0, [%0] %1"
954 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
956 spitfire_put_itlb_data(i
, 0x0UL
);
960 __asm__
__volatile__("wrpr %0, 0, %%pstate"
964 /* Caller does TLB context flushing on local CPU if necessary.
966 * We must be careful about boundary cases so that we never
967 * let the user have CTX 0 (nucleus) or we ever use a CTX
968 * version of zero (and thus NO_CONTEXT would not be caught
969 * by version mis-match tests in mmu_context.h).
971 void get_new_mmu_context(struct mm_struct
*mm
)
973 unsigned long ctx
, new_ctx
;
975 spin_lock(&ctx_alloc_lock
);
976 ctx
= CTX_HWBITS(tlb_context_cache
+ 1);
979 if (CTX_VALID(mm
->context
)) {
980 unsigned long nr
= CTX_HWBITS(mm
->context
);
981 mmu_context_bmap
[nr
>>6] &= ~(1UL << (nr
& 63));
983 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1UL << CTX_VERSION_SHIFT
, ctx
);
984 if (new_ctx
>= (1UL << CTX_VERSION_SHIFT
)) {
985 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
986 if (new_ctx
>= ctx
) {
988 new_ctx
= (tlb_context_cache
& CTX_VERSION_MASK
) +
991 new_ctx
= CTX_FIRST_VERSION
;
993 /* Don't call memset, for 16 entries that's just
996 mmu_context_bmap
[0] = 3;
997 mmu_context_bmap
[1] = 0;
998 mmu_context_bmap
[2] = 0;
999 mmu_context_bmap
[3] = 0;
1000 for(i
= 4; i
< CTX_BMAP_SLOTS
; i
+= 4) {
1001 mmu_context_bmap
[i
+ 0] = 0;
1002 mmu_context_bmap
[i
+ 1] = 0;
1003 mmu_context_bmap
[i
+ 2] = 0;
1004 mmu_context_bmap
[i
+ 3] = 0;
1009 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
1010 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
1012 tlb_context_cache
= new_ctx
;
1013 spin_unlock(&ctx_alloc_lock
);
1015 mm
->context
= new_ctx
;
1019 struct pgtable_cache_struct pgt_quicklists
;
1022 pmd_t
*get_pmd_slow(pgd_t
*pgd
, unsigned long offset
)
1026 pmd
= (pmd_t
*) __get_free_page(GFP_KERNEL
);
1028 memset(pmd
, 0, PAGE_SIZE
);
1030 return pmd
+ offset
;
1035 pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long offset
)
1039 pte
= (pte_t
*) __get_free_page(GFP_KERNEL
);
1041 memset(pte
, 0, PAGE_SIZE
);
1043 return pte
+ offset
;
1049 allocate_ptable_skeleton(unsigned long start
, unsigned long end
)
1055 while (start
< end
) {
1056 pgdp
= pgd_offset(&init_mm
, start
);
1057 if (pgd_none(*pgdp
)) {
1058 pmdp
= sparc_init_alloc(&mempool
, PAGE_SIZE
);
1059 memset(pmdp
, 0, PAGE_SIZE
);
1060 pgd_set(pgdp
, pmdp
);
1062 pmdp
= pmd_offset(pgdp
, start
);
1063 if (pmd_none(*pmdp
)) {
1064 ptep
= sparc_init_alloc(&mempool
, PAGE_SIZE
);
1065 memset(ptep
, 0, PAGE_SIZE
);
1066 pmd_set(pmdp
, ptep
);
1068 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
1073 * Create a mapping for an I/O register. Have to make sure the side-effect
1077 void sparc_ultra_mapioaddr(unsigned long physaddr
, unsigned long virt_addr
,
1078 int bus
, int rdonly
)
1080 pgd_t
*pgdp
= pgd_offset(&init_mm
, virt_addr
);
1081 pmd_t
*pmdp
= pmd_offset(pgdp
, virt_addr
);
1082 pte_t
*ptep
= pte_offset(pmdp
, virt_addr
);
1085 physaddr
&= PAGE_MASK
;
1088 pte
= mk_pte_phys(physaddr
, __pgprot(pg_iobits
| __PRIV_BITS
));
1090 pte
= mk_pte_phys(physaddr
, __pgprot(pg_iobits
| __DIRTY_BITS
| __PRIV_BITS
));
1095 /* XXX no longer used, remove me... -DaveM */
1096 void sparc_ultra_unmapioaddr(unsigned long virt_addr
)
1102 pgdp
= pgd_offset(&init_mm
, virt_addr
);
1103 pmdp
= pmd_offset(pgdp
, virt_addr
);
1104 ptep
= pte_offset(pmdp
, virt_addr
);
1106 /* No need to flush uncacheable page. */
1110 void sparc_ultra_dump_itlb(void)
1114 printk ("Contents of itlb: ");
1115 for (slot
= 0; slot
< 14; slot
++) printk (" ");
1116 printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
1117 for (slot
= 1; slot
< 64; slot
+=3) {
1118 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1119 slot
, spitfire_get_itlb_tag(slot
), spitfire_get_itlb_data(slot
),
1120 slot
+1, spitfire_get_itlb_tag(slot
+1), spitfire_get_itlb_data(slot
+1),
1121 slot
+2, spitfire_get_itlb_tag(slot
+2), spitfire_get_itlb_data(slot
+2));
1125 void sparc_ultra_dump_dtlb(void)
1129 printk ("Contents of dtlb: ");
1130 for (slot
= 0; slot
< 14; slot
++) printk (" ");
1131 printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_dtlb_tag(0),
1132 spitfire_get_dtlb_data(0));
1133 for (slot
= 1; slot
< 64; slot
+=3) {
1134 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1135 slot
, spitfire_get_dtlb_tag(slot
), spitfire_get_dtlb_data(slot
),
1136 slot
+1, spitfire_get_dtlb_tag(slot
+1), spitfire_get_dtlb_data(slot
+1),
1137 slot
+2, spitfire_get_dtlb_tag(slot
+2), spitfire_get_dtlb_data(slot
+2));
1141 /* paging_init() sets up the page tables */
1143 extern unsigned long free_area_init(unsigned long, unsigned long);
1144 extern unsigned long sun_serial_setup(unsigned long);
1146 unsigned long __init
1147 paging_init(unsigned long start_mem
, unsigned long end_mem
)
1149 extern pmd_t swapper_pmd_dir
[1024];
1150 extern unsigned int sparc64_vpte_patchme1
[1];
1151 extern unsigned int sparc64_vpte_patchme2
[1];
1152 unsigned long alias_base
= phys_base
+ PAGE_OFFSET
;
1153 unsigned long second_alias_page
= 0;
1155 unsigned long flags
;
1156 unsigned long shift
= alias_base
- ((unsigned long)&empty_zero_page
);
1158 set_bit(0, mmu_context_bmap
);
1159 /* We assume physical memory starts at some 4mb multiple,
1160 * if this were not true we wouldn't boot up to this point
1163 pt
= phys_base
| _PAGE_VALID
| _PAGE_SZ4MB
;
1164 pt
|= _PAGE_CP
| _PAGE_CV
| _PAGE_P
| _PAGE_L
| _PAGE_W
;
1165 __save_and_cli(flags
);
1166 __asm__
__volatile__("
1175 : "r" (TLB_TAG_ACCESS
), "r" (alias_base
), "r" (pt
),
1176 "i" (ASI_DMMU
), "i" (ASI_DTLB_DATA_ACCESS
), "r" (61 << 3)
1178 if (start_mem
>= KERNBASE
+ 0x340000) {
1179 second_alias_page
= alias_base
+ 0x400000;
1180 __asm__
__volatile__("
1189 : "r" (TLB_TAG_ACCESS
), "r" (second_alias_page
), "r" (pt
+ 0x400000),
1190 "i" (ASI_DMMU
), "i" (ASI_DTLB_DATA_ACCESS
), "r" (60 << 3)
1193 __restore_flags(flags
);
1195 /* Now set kernel pgd to upper alias so physical page computations
1198 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
1200 memset(swapper_pmd_dir
, 0, sizeof(swapper_pmd_dir
));
1202 /* Now can init the kernel/bad page tables. */
1203 pgd_set(&swapper_pg_dir
[0], swapper_pmd_dir
+ (shift
/ sizeof(pgd_t
)));
1205 sparc64_vpte_patchme1
[0] |= (init_mm
.pgd
[0] >> 10);
1206 sparc64_vpte_patchme2
[0] |= (init_mm
.pgd
[0] & 0x3ff);
1207 flushi((long)&sparc64_vpte_patchme1
[0]);
1209 /* We use mempool to create page tables, therefore adjust it up
1210 * such that __pa() macros etc. work.
1212 mempool
= PAGE_ALIGN(start_mem
) + shift
;
1214 #ifdef CONFIG_SUN_SERIAL
1215 /* This does not logically belong here, but is the first place
1216 we can initialize it at, so that we work in the PAGE_OFFSET+
1218 mempool
= sun_serial_setup(mempool
);
1221 /* Allocate 64M for dynamic DVMA mapping area. */
1222 allocate_ptable_skeleton(DVMA_VADDR
, DVMA_VADDR
+ 0x4000000);
1223 inherit_prom_mappings();
1225 /* Ok, we can use our TLB miss and window trap handlers safely.
1226 * We need to do a quick peek here to see if we are on StarFire
1227 * or not, so setup_tba can setup the IRQ globals correctly (it
1228 * needs to get the hard smp processor id correctly).
1231 extern void setup_tba(int);
1232 int is_starfire
= prom_finddevice("/ssp-serial");
1233 if(is_starfire
!= 0 && is_starfire
!= -1)
1237 setup_tba(is_starfire
);
1240 /* Really paranoid. */
1241 flushi((long)&empty_zero_page
);
1244 /* Cleanup the extra locked TLB entry we created since we have the
1245 * nice TLB miss handlers of ours installed now.
1247 /* We only created DTLB mapping of this stuff. */
1248 spitfire_flush_dtlb_nucleus_page(alias_base
);
1249 if (second_alias_page
)
1250 spitfire_flush_dtlb_nucleus_page(second_alias_page
);
1254 flushi((long)&empty_zero_page
);
1257 inherit_locked_prom_mappings(1);
1261 start_mem
= free_area_init(PAGE_ALIGN(mempool
), end_mem
);
1263 return device_scan (PAGE_ALIGN (start_mem
));
1266 static void __init
taint_real_pages(unsigned long start_mem
, unsigned long end_mem
)
1268 unsigned long tmp
= 0, paddr
, endaddr
;
1269 unsigned long end
= __pa(end_mem
);
1272 for (paddr
= __pa(start_mem
); paddr
< end
; ) {
1273 for (; sp_banks
[tmp
].num_bytes
!= 0; tmp
++)
1274 if (sp_banks
[tmp
].base_addr
+ sp_banks
[tmp
].num_bytes
> paddr
)
1276 if (!sp_banks
[tmp
].num_bytes
) {
1277 mem_map
[paddr
>>PAGE_SHIFT
].flags
|= (1<<PG_skip
);
1278 mem_map
[paddr
>>PAGE_SHIFT
].next_hash
= mem_map
+ (phys_base
>> PAGE_SHIFT
);
1279 mem_map
[(paddr
>>PAGE_SHIFT
)+1UL].flags
|= (1<<PG_skip
);
1280 mem_map
[(paddr
>>PAGE_SHIFT
)+1UL].next_hash
= mem_map
+ (phys_base
>> PAGE_SHIFT
);
1284 if (sp_banks
[tmp
].base_addr
> paddr
) {
1285 /* Making a one or two pages PG_skip holes
1286 * is not necessary. We add one more because
1287 * we must set the PG_skip flag on the first
1288 * two mem_map[] entries for the hole. Go and
1289 * see the mm/filemap.c:shrink_mmap() loop for
1292 if (sp_banks
[tmp
].base_addr
- paddr
> 3 * PAGE_SIZE
) {
1293 mem_map
[paddr
>>PAGE_SHIFT
].flags
|= (1<<PG_skip
);
1294 mem_map
[paddr
>>PAGE_SHIFT
].next_hash
= mem_map
+ (sp_banks
[tmp
].base_addr
>> PAGE_SHIFT
);
1295 mem_map
[(paddr
>>PAGE_SHIFT
)+1UL].flags
|= (1<<PG_skip
);
1296 mem_map
[(paddr
>>PAGE_SHIFT
)+1UL].next_hash
= mem_map
+ (sp_banks
[tmp
].base_addr
>> PAGE_SHIFT
);
1298 paddr
= sp_banks
[tmp
].base_addr
;
1301 endaddr
= sp_banks
[tmp
].base_addr
+ sp_banks
[tmp
].num_bytes
;
1302 while (paddr
< endaddr
) {
1303 mem_map
[paddr
>>PAGE_SHIFT
].flags
&= ~(1<<PG_reserved
);
1304 set_bit(paddr
>> 22, sparc64_valid_addr_bitmap
);
1305 if (paddr
>= (MAX_DMA_ADDRESS
- PAGE_OFFSET
))
1306 mem_map
[paddr
>>PAGE_SHIFT
].flags
&= ~(1<<PG_DMA
);
1312 void __init
mem_init(unsigned long start_mem
, unsigned long end_mem
)
1318 unsigned long alias_base
= phys_base
+ PAGE_OFFSET
- (long)(&empty_zero_page
);
1319 struct page
*page
, *end
;
1322 end_mem
&= PAGE_MASK
;
1323 max_mapnr
= MAP_NR(end_mem
);
1324 high_memory
= (void *) end_mem
;
1326 start_mem
= ((start_mem
+ 7UL) & ~7UL);
1327 sparc64_valid_addr_bitmap
= (unsigned long *)start_mem
;
1328 i
= max_mapnr
>> ((22 - PAGE_SHIFT
) + 6);
1330 memset(sparc64_valid_addr_bitmap
, 0, i
<< 3);
1331 start_mem
+= i
<< 3;
1333 start_mem
= PAGE_ALIGN(start_mem
);
1337 mem_map
[0].flags
|= (1<<PG_skip
) | (1<<PG_reserved
);
1338 mem_map
[0].next_hash
= mem_map
+ (phys_base
>> PAGE_SHIFT
);
1339 mem_map
[1].flags
|= (1<<PG_skip
) | (1<<PG_reserved
);
1340 mem_map
[1].next_hash
= mem_map
+ (phys_base
>> PAGE_SHIFT
);
1343 addr
= PAGE_OFFSET
+ phys_base
;
1344 while(addr
< start_mem
) {
1345 #ifdef CONFIG_BLK_DEV_INITRD
1346 if (initrd_below_start_ok
&& addr
>= initrd_start
&& addr
< initrd_end
)
1347 mem_map
[MAP_NR(addr
)].flags
&= ~(1<<PG_reserved
);
1350 mem_map
[MAP_NR(addr
)].flags
|= (1<<PG_reserved
);
1351 set_bit(__pa(addr
) >> 22, sparc64_valid_addr_bitmap
);
1355 taint_real_pages(start_mem
, end_mem
);
1357 #ifdef FREE_UNUSED_MEM_MAP
1358 end
= mem_map
+ max_mapnr
;
1359 for (page
= mem_map
; page
< end
; page
++) {
1360 if (PageSkip(page
)) {
1361 unsigned long low
, high
;
1363 /* See taint_real_pages() for why this is done. -DaveM */
1366 low
= PAGE_ALIGN((unsigned long)(page
+1));
1367 if (page
->next_hash
< page
)
1368 high
= ((unsigned long)end
) & PAGE_MASK
;
1370 high
= ((unsigned long)page
->next_hash
) & PAGE_MASK
;
1371 while (low
< high
) {
1372 mem_map
[MAP_NR(low
)].flags
&= ~(1<<PG_reserved
);
1379 for (addr
= PAGE_OFFSET
; addr
< end_mem
; addr
+= PAGE_SIZE
) {
1380 if (PageSkip(mem_map
+ MAP_NR(addr
))) {
1381 unsigned long next
= mem_map
[MAP_NR(addr
)].next_hash
- mem_map
;
1383 next
= (next
<< PAGE_SHIFT
) + PAGE_OFFSET
;
1384 if (next
< addr
|| next
>= end_mem
)
1389 if (PageReserved(mem_map
+ MAP_NR(addr
))) {
1390 if ((addr
< ((unsigned long) &etext
) + alias_base
) && (addr
>= alias_base
))
1392 else if((addr
>= ((unsigned long)&__init_begin
) + alias_base
)
1393 && (addr
< ((unsigned long)&__init_end
) + alias_base
))
1395 else if((addr
< start_mem
) && (addr
>= alias_base
))
1399 atomic_set(&mem_map
[MAP_NR(addr
)].count
, 1);
1400 #ifdef CONFIG_BLK_DEV_INITRD
1401 if (!initrd_start
||
1402 (addr
< initrd_start
|| addr
>= initrd_end
))
1409 /* Put empty_pg_dir on pgd_quicklist */
1410 extern pgd_t empty_pg_dir
[1024];
1411 unsigned long addr
= (unsigned long)empty_pg_dir
;
1413 memset(empty_pg_dir
, 0, sizeof(empty_pg_dir
));
1415 mem_map
[MAP_NR(addr
)].pprev_hash
= 0;
1416 free_pgd_fast((pgd_t
*)addr
);
1420 printk("Memory: %uk available (%dk kernel code, %dk data, %dk init) [%016lx,%016lx]\n",
1421 nr_free_pages
<< (PAGE_SHIFT
-10),
1422 codepages
<< (PAGE_SHIFT
-10),
1423 datapages
<< (PAGE_SHIFT
-10),
1424 initpages
<< (PAGE_SHIFT
-10),
1425 PAGE_OFFSET
, end_mem
);
1427 /* NOTE NOTE NOTE NOTE
1428 * Please keep track of things and make sure this
1429 * always matches the code in mm/page_alloc.c -DaveM
1431 i
= nr_free_pages
>> 7;
1437 freepages
.low
= i
<< 1;
1438 freepages
.high
= freepages
.low
+ i
;
1441 void free_initmem (void)
1445 addr
= (unsigned long)(&__init_begin
);
1446 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
1447 unsigned long page
= addr
+ (long)__va(phys_base
)
1448 - (long)(&empty_zero_page
);
1450 mem_map
[MAP_NR(page
)].flags
&= ~(1 << PG_reserved
);
1451 atomic_set(&mem_map
[MAP_NR(page
)].count
, 1);
1456 void si_meminfo(struct sysinfo
*val
)
1458 struct page
*page
, *end
;
1462 val
->freeram
= ((unsigned long)nr_free_pages
) << PAGE_SHIFT
;
1463 val
->bufferram
= atomic_read(&buffermem
);
1464 for (page
= mem_map
, end
= mem_map
+ max_mapnr
;
1465 page
< end
; page
++) {
1466 if (PageSkip(page
)) {
1467 if (page
->next_hash
< page
)
1469 page
= page
->next_hash
;
1471 if (PageReserved(page
))
1474 if (!atomic_read(&page
->count
))
1476 val
->sharedram
+= atomic_read(&page
->count
) - 1;
1478 val
->totalram
<<= PAGE_SHIFT
;
1479 val
->sharedram
<<= PAGE_SHIFT
;