1 /* $Id: init.c,v 1.29 1997/05/27 06:28:13 davem Exp $
2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/string.h>
9 #include <linux/init.h>
10 #include <linux/blk.h>
11 #include <linux/swap.h>
13 #include <asm/system.h>
15 #include <asm/pgtable.h>
16 #include <asm/oplib.h>
17 #include <asm/iommu.h>
19 #include <asm/mmu_context.h>
20 #include <asm/vaddrs.h>
22 extern void show_net_buffers(void);
23 extern unsigned long device_scan(unsigned long);
25 struct sparc_phys_banks sp_banks
[SPARC_PHYS_BANKS
];
27 /* Ugly, but necessary... -DaveM */
28 unsigned long phys_base
, null_pmd_table
, null_pte_table
;
30 extern unsigned long empty_null_pmd_table
;
31 extern unsigned long empty_null_pte_table
;
33 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
;
35 /* References to section boundaries */
36 extern char __init_begin
, __init_end
, etext
, __p1275_loc
, __bss_start
;
39 * BAD_PAGE is the page that is used for page faults when linux
40 * is out-of-memory. Older versions of linux just did a
41 * do_exit(), but using this instead means there is less risk
42 * for a process dying in kernel mode, possibly leaving an inode
45 * BAD_PAGETABLE is the accompanying page-table: it is initialized
46 * to point to BAD_PAGE entries.
48 * ZERO_PAGE is a special page that is used for zero-initialized
51 pmd_t
*__bad_pmd(void)
53 pmd_t
*pmdp
= (pmd_t
*) &empty_bad_pmd_table
;
59 pte_t
*__bad_pte(void)
61 memset((void *) &empty_bad_pte_table
, 0, PAGE_SIZE
);
62 return (pte_t
*) (((unsigned long)&empty_bad_pte_table
) + phys_base
);
65 pte_t
__bad_page(void)
67 memset((void *) &empty_bad_page
, 0, PAGE_SIZE
);
68 return pte_mkdirty(mk_pte((((unsigned long) &empty_bad_page
)+phys_base
),
74 int i
,free
= 0,total
= 0,reserved
= 0;
77 printk("\nMem-info:\n");
79 printk("Free swap: %6dkB\n",nr_swap_pages
<<(PAGE_SHIFT
-10));
83 if (PageReserved(mem_map
+ i
))
85 else if (!atomic_read(&mem_map
[i
].count
))
88 shared
+= atomic_read(&mem_map
[i
].count
) - 1;
90 printk("%d pages of RAM\n",total
);
91 printk("%d free pages\n",free
);
92 printk("%d reserved pages\n",reserved
);
93 printk("%d pages shared\n",shared
);
100 /* IOMMU support, the ideas are right, the code should be cleaned a bit still... */
102 /* XXX Also, play with the streaming buffers at some point, both
103 * XXX Fusion and Sunfire both have them aparently... -DaveM
106 /* This keeps track of pages used in sparc_alloc_dvma() invocations. */
107 static unsigned long dvma_map_pages
[0x10000000 >> 16] = { 0, };
108 static unsigned long dvma_pages_current_offset
= 0;
109 static int dvma_pages_current_index
= 0;
111 __initfunc(unsigned long iommu_init(int iommu_node
, unsigned long memory_start
,
112 unsigned long memory_end
, struct linux_sbus
*sbus
))
114 struct iommu_struct
*iommu
;
115 struct sysio_regs
*sregs
;
116 struct linux_prom_registers rprop
[2];
117 unsigned long impl
, vers
;
118 unsigned long control
, tsbbase
;
119 unsigned long *iopte
;
122 err
= prom_getproperty(iommu_node
, "reg", (char *)rprop
,
125 prom_printf("iommu_init: Cannot map SYSIO control registers.\n");
128 sregs
= (struct sysio_regs
*) sparc_alloc_io(rprop
[0].phys_addr
,
130 sizeof(struct sysio_regs
),
132 rprop
[0].which_io
, 0x0);
134 memory_start
= (memory_start
+ 7) & ~7;
135 iommu
= (struct iommu_struct
*) memory_start
;
136 memory_start
+= sizeof(struct iommu_struct
);
137 iommu
->sysio_regs
= sregs
;
140 control
= sregs
->iommu_control
;
141 impl
= (control
& IOMMU_CTRL_IMPL
) >> 60;
142 vers
= (control
& IOMMU_CTRL_VERS
) >> 56;
143 printk("IOMMU: IMPL[%x] VERS[%x] SYSIO mapped at %016lx\n",
144 (unsigned int) impl
, (unsigned int)vers
, (unsigned long) sregs
);
146 control
&= ~(IOMMU_CTRL_TSBSZ
);
147 control
|= (IOMMU_TSBSZ_64K
| IOMMU_CTRL_TBWSZ
| IOMMU_CTRL_ENAB
);
149 /* Use only 64k pages, things are layed out in the 32-bit SBUS
150 * address space like this:
152 * 0x00000000 ----------------------------------------
153 * | Direct physical mappings for most |
154 * | DVMA to paddr's within this range |
155 * 0xf0000000 ----------------------------------------
156 * | For mappings requested via |
157 * | sparc_alloc_dvma() |
158 * 0xffffffff ----------------------------------------
160 tsbbase
= PAGE_ALIGN(memory_start
);
161 memory_start
= (tsbbase
+ ((64 * 1024) * 8));
162 iommu
->page_table
= (iopte_t
*) tsbbase
;
163 iopte
= (unsigned long *) tsbbase
;
165 /* Setup aliased mappings... */
166 for(i
= 0; i
< (65536 - 4096); i
++) {
167 *iopte
= (IOPTE_VALID
| IOPTE_64K
| IOPTE_CACHE
| IOPTE_WRITE
);
172 /* Clear all sparc_alloc_dvma() maps. */
173 for( ; i
< 65536; i
++)
176 sregs
->iommu_tsbbase
= __pa(tsbbase
);
177 sregs
->iommu_control
= control
;
182 void mmu_map_dma_area(unsigned long addr
, int len
, __u32
*dvma_addr
)
184 struct iommu_struct
*iommu
= SBus_chain
->iommu
; /* GROSS ME OUT! */
189 /* Find out if we need to grab some pages. */
190 if(!dvma_map_pages
[dvma_pages_current_index
] ||
191 ((dvma_pages_current_offset
+ len
) > (1 << 16))) {
192 unsigned long *iopte
;
193 unsigned long newpages
= __get_free_pages(GFP_KERNEL
, 3, 0);
197 panic("AIEEE cannot get DVMA pages.");
199 memset((char *)newpages
, 0, (1 << 16));
201 if(!dvma_map_pages
[dvma_pages_current_index
]) {
202 dvma_map_pages
[dvma_pages_current_index
] = newpages
;
203 i
= dvma_pages_current_index
;
205 dvma_map_pages
[dvma_pages_current_index
+ 1] = newpages
;
206 i
= dvma_pages_current_index
+ 1;
209 /* Stick it in the IOMMU. */
210 i
= (65536 - 4096) + i
;
211 iopte
= (unsigned long *)(iommu
->page_table
+ i
);
212 *iopte
= (IOPTE_VALID
| IOPTE_64K
| IOPTE_CACHE
| IOPTE_WRITE
);
213 *iopte
|= __pa(newpages
);
216 /* Get this out of the way. */
217 *dvma_addr
= (__u32
) ((0xf0000000) +
218 (dvma_pages_current_index
<< 16) +
219 (dvma_pages_current_offset
));
222 while((len
> 0) && (dvma_pages_current_offset
< (1 << 16))) {
224 unsigned long the_page
=
225 dvma_map_pages
[dvma_pages_current_index
] +
226 dvma_pages_current_offset
;
228 /* Map the CPU's view. */
229 pgdp
= pgd_offset(init_task
.mm
, addr
);
230 pmdp
= pmd_alloc_kernel(pgdp
, addr
);
231 ptep
= pte_alloc_kernel(pmdp
, addr
);
232 pte
= mk_pte(the_page
, PAGE_KERNEL
);
235 dvma_pages_current_offset
+= PAGE_SIZE
;
239 dvma_pages_current_index
++;
240 dvma_pages_current_offset
= 0;
244 __u32
mmu_get_scsi_one(char *vaddr
, unsigned long len
, struct linux_sbus
*sbus
)
246 __u32 sbus_addr
= (__u32
) __pa(vaddr
);
248 if((sbus_addr
< 0xf0000000) &&
249 ((sbus_addr
+ len
) < 0xf0000000))
252 /* "can't happen"... GFP_DMA assures this. */
253 panic("Very high scsi_one mappings should never happen.");
257 void mmu_get_scsi_sgl(struct mmu_sglist
*sg
, int sz
, struct linux_sbus
*sbus
)
260 __u32 page
= (__u32
) __pa(((unsigned long) sg
[sz
].addr
));
261 if((page
< 0xf0000000) &&
262 (page
+ sg
[sz
].len
) < 0xf0000000) {
263 sg
[sz
].dvma_addr
= page
;
265 /* "can't happen"... GFP_DMA assures this. */
266 panic("scsi_sgl high mappings should never happen.");
272 static char sfmmuinfo
[512];
276 /* We'll do the rest later to make it nice... -DaveM */
277 sprintf(sfmmuinfo
, "MMU Type\t: Spitfire\n");
282 static unsigned long mempool
;
284 struct linux_prom_translation
{
290 #define MAX_TRANSLATIONS 64
291 static void inherit_prom_mappings(void)
293 struct linux_prom_translation transl
[MAX_TRANSLATIONS
];
299 node
= prom_finddevice("/virtual-memory");
300 if ((n
= prom_getproperty(node
, "translations", (char *) transl
,
301 sizeof(transl
))) == -1) {
302 prom_printf("Couldn't get translation property\n");
305 n
= n
/ sizeof(transl
[0]);
307 for (i
= 0; i
< n
; i
++) {
310 if (transl
[i
].virt
>= 0xf0000000 && transl
[i
].virt
< 0x100000000) {
311 for (vaddr
= transl
[i
].virt
;
312 vaddr
< transl
[i
].virt
+ transl
[i
].size
;
313 vaddr
+= PAGE_SIZE
) {
314 pgdp
= pgd_offset(init_task
.mm
, vaddr
);
315 if (pgd_none(*pgdp
)) {
316 pmdp
= sparc_init_alloc(&mempool
,
321 pmdp
= pmd_offset(pgdp
, vaddr
);
322 if (pmd_none(*pmdp
)) {
323 ptep
= sparc_init_alloc(&mempool
,
327 ptep
= pte_offset(pmdp
, vaddr
);
328 set_pte (ptep
, __pte(transl
[i
].data
| _PAGE_MODIFIED
));
329 transl
[i
].data
+= PAGE_SIZE
;
335 static void inherit_locked_prom_mappings(void)
341 /* Fucking losing PROM has more mappings in the TLB, but
342 * it (conveniently) fails to mention any of these in the
343 * translations property. The only ones that matter are
344 * the locked PROM tlb entries, so we impose the following
345 * irrecovable rule on the PROM, it is allowed 1 locked
346 * entry in the ITLB and 1 in the DTLB. We move those
347 * (if necessary) up into tlb entry 62.
349 * Supposedly the upper 16GB of the address space is
350 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
351 * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
352 * used between the client program and the firmware on sun5
353 * systems to coordinate mmu mappings is also COMPLETELY
354 * UNDOCUMENTED!!!!!! Thanks S(t)un!
356 for(i
= 0; i
< 62; i
++) {
359 data
= spitfire_get_dtlb_data(i
);
360 if(!dtlb_seen
&& (data
& _PAGE_L
)) {
361 unsigned long tag
= spitfire_get_dtlb_tag(i
);
362 __asm__
__volatile__("stxa %%g0, [%0] %1"
363 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
365 spitfire_put_dtlb_data(i
, 0x0UL
);
369 __asm__
__volatile__("stxa %0, [%1] %2"
370 : : "r" (tag
), "r" (TLB_TAG_ACCESS
),
373 spitfire_put_dtlb_data(62, data
);
379 data
= spitfire_get_itlb_data(i
);
380 if(!itlb_seen
&& (data
& _PAGE_L
)) {
381 unsigned long tag
= spitfire_get_itlb_tag(i
);
382 __asm__
__volatile__("stxa %%g0, [%0] %1"
383 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
385 spitfire_put_itlb_data(i
, 0x0UL
);
389 __asm__
__volatile__("stxa %0, [%1] %2"
390 : : "r" (tag
), "r" (TLB_TAG_ACCESS
),
393 spitfire_put_itlb_data(62, data
);
402 __initfunc(static void
403 allocate_ptable_skeleton(unsigned long start
, unsigned long end
))
409 while (start
< end
) {
410 pgdp
= pgd_offset(init_task
.mm
, start
);
411 if (pgd_none(*pgdp
)) {
412 pmdp
= sparc_init_alloc(&mempool
,
417 pmdp
= pmd_offset(pgdp
, start
);
418 if (pmd_none(*pmdp
)) {
419 ptep
= sparc_init_alloc(&mempool
,
423 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
428 * Create a mapping for an I/O register. Have to make sure the side-effect
432 void sparc_ultra_mapioaddr(unsigned long physaddr
, unsigned long virt_addr
,
435 pgd_t
*pgdp
= pgd_offset(init_task
.mm
, virt_addr
);
436 pmd_t
*pmdp
= pmd_offset(pgdp
, virt_addr
);
437 pte_t
*ptep
= pte_offset(pmdp
, virt_addr
);
440 physaddr
&= PAGE_MASK
;
443 pte
= mk_pte_phys(physaddr
, __pgprot(pg_iobits
));
445 pte
= mk_pte_phys(physaddr
, __pgprot(pg_iobits
| __DIRTY_BITS
));
450 void sparc_ultra_unmapioaddr(unsigned long virt_addr
)
456 pgdp
= pgd_offset(init_task
.mm
, virt_addr
);
457 pmdp
= pmd_offset(pgdp
, virt_addr
);
458 ptep
= pte_offset(pmdp
, virt_addr
);
460 /* No need to flush uncacheable page. */
464 void sparc_ultra_dump_itlb(void)
468 printk ("Contents of itlb: ");
469 for (slot
= 0; slot
< 14; slot
++) printk (" ");
470 printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
471 for (slot
= 1; slot
< 64; slot
+=3) {
472 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
473 slot
, spitfire_get_itlb_tag(slot
), spitfire_get_itlb_data(slot
),
474 slot
+1, spitfire_get_itlb_tag(slot
+1), spitfire_get_itlb_data(slot
+1),
475 slot
+2, spitfire_get_itlb_tag(slot
+2), spitfire_get_itlb_data(slot
+2));
479 void sparc_ultra_dump_dtlb(void)
483 printk ("Contents of dtlb: ");
484 for (slot
= 0; slot
< 14; slot
++) printk (" ");
485 printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
486 for (slot
= 1; slot
< 64; slot
+=3) {
487 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
488 slot
, spitfire_get_dtlb_tag(slot
), spitfire_get_dtlb_data(slot
),
489 slot
+1, spitfire_get_dtlb_tag(slot
+1), spitfire_get_dtlb_data(slot
+1),
490 slot
+2, spitfire_get_dtlb_tag(slot
+2), spitfire_get_dtlb_data(slot
+2));
494 /* paging_init() sets up the page tables */
496 extern unsigned long free_area_init(unsigned long, unsigned long);
498 __initfunc(unsigned long
499 paging_init(unsigned long start_mem
, unsigned long end_mem
))
501 extern unsigned long phys_base
;
502 extern void setup_tba(unsigned long kpgdir
);
503 extern void __bfill64(void *, unsigned long);
509 /* Must create 2nd locked DTLB entry if physical ram starts at
510 * 4MB absolute or higher, kernel image has been placed in the
511 * right place at PAGE_OFFSET but references to start_mem and pages
512 * will be to the perfect alias mapping, so set it up now.
514 if(phys_base
>= (4 * 1024 * 1024)) {
515 unsigned long alias_base
= phys_base
+ PAGE_OFFSET
;
519 /* We assume physical memory starts at some 4mb multiple,
520 * if this were not true we wouldn't boot up to this point
523 pte
= phys_base
| _PAGE_VALID
| _PAGE_SZ4MB
;
524 pte
|= _PAGE_CP
| _PAGE_CV
| _PAGE_P
| _PAGE_L
| _PAGE_W
;
525 save_flags(flags
); cli();
526 __asm__
__volatile__("
535 : "r" (TLB_TAG_ACCESS
), "r" (alias_base
), "r" (pte
),
536 "i" (ASI_DMMU
), "i" (ASI_DTLB_DATA_ACCESS
), "r" (61 << 3)
538 restore_flags(flags
);
540 /* Now set kernel pgd to upper alias so physical page computations
543 init_mm
.pgd
+= (phys_base
/ (sizeof(pgd_t
*)));
546 null_pmd_table
= __pa(((unsigned long)&empty_null_pmd_table
) + phys_base
);
547 null_pte_table
= __pa(((unsigned long)&empty_null_pte_table
) + phys_base
);
549 pmdp
= (pmd_t
*) &empty_null_pmd_table
;
550 for(i
= 0; i
< 1024; i
++)
551 pmd_val(pmdp
[i
]) = null_pte_table
;
553 memset((void *) &empty_null_pte_table
, 0, PAGE_SIZE
);
555 /* Now can init the kernel/bad page tables. */
556 __bfill64((void *)swapper_pg_dir
, null_pmd_table
);
557 __bfill64((void *)&empty_bad_pmd_table
, null_pte_table
);
559 /* We use mempool to create page tables, therefore adjust it up
560 * such that __pa() macros etc. work.
562 mempool
= PAGE_ALIGN(start_mem
) + phys_base
;
564 /* FIXME: This should be done much nicer.
565 * Just now we allocate 64M for each.
567 allocate_ptable_skeleton(IOBASE_VADDR
, IOBASE_VADDR
+ 0x4000000);
568 allocate_ptable_skeleton(DVMA_VADDR
, DVMA_VADDR
+ 0x4000000);
569 inherit_prom_mappings();
570 allocate_ptable_skeleton(0, 0x8000 + PAGE_SIZE
);
572 /* Map prom interface page. */
573 pgdp
= pgd_offset(init_task
.mm
, 0x8000);
574 pmdp
= pmd_offset(pgdp
, 0x8000);
575 ptep
= pte_offset(pmdp
, 0x8000);
576 pte
= mk_pte(((unsigned long)&__p1275_loc
)+phys_base
, PAGE_KERNEL
);
579 /* Ok, we can use our TLB miss and window trap handlers safely. */
580 setup_tba((unsigned long)init_mm
.pgd
);
582 /* Kill locked PROM interface page mapping, the mapping will
583 * re-enter on the next PROM interface call via our TLB miss
586 spitfire_flush_dtlb_primary_page(0x8000);
588 spitfire_flush_itlb_primary_page(0x8000);
591 /* Really paranoid. */
595 /* Cleanup the extra locked TLB entry we created since we have the
596 * nice TLB miss handlers of ours installed now.
598 if(phys_base
>= (4 * 1024 * 1024)) {
599 /* We only created DTLB mapping of this stuff. */
600 spitfire_flush_dtlb_nucleus_page(phys_base
+ PAGE_OFFSET
);
608 inherit_locked_prom_mappings();
612 start_mem
= free_area_init(PAGE_ALIGN(mempool
), end_mem
);
614 return device_scan (PAGE_ALIGN (start_mem
));
617 extern int min_free_pages
;
618 extern int free_pages_low
;
619 extern int free_pages_high
;
621 __initfunc(static void taint_real_pages(unsigned long start_mem
, unsigned long end_mem
))
623 unsigned long addr
, tmp2
= 0;
625 for(addr
= PAGE_OFFSET
; addr
< end_mem
; addr
+= PAGE_SIZE
) {
626 if(addr
>= PAGE_OFFSET
&& addr
< start_mem
)
628 for(tmp2
=0; sp_banks
[tmp2
].num_bytes
!= 0; tmp2
++) {
629 unsigned long phys_addr
= __pa(addr
);
630 unsigned long base
= sp_banks
[tmp2
].base_addr
;
631 unsigned long limit
= base
+ sp_banks
[tmp2
].num_bytes
;
633 if((phys_addr
>= base
) && (phys_addr
< limit
) &&
634 ((phys_addr
+ PAGE_SIZE
) < limit
))
635 mem_map
[MAP_NR(addr
)].flags
&= ~(1<<PG_reserved
);
640 __initfunc(void mem_init(unsigned long start_mem
, unsigned long end_mem
))
646 unsigned long tmp2
, addr
;
647 unsigned long data_end
;
649 end_mem
&= PAGE_MASK
;
650 max_mapnr
= MAP_NR(end_mem
);
651 high_memory
= (void *) end_mem
;
653 start_mem
= PAGE_ALIGN(start_mem
);
654 num_physpages
= (start_mem
- PAGE_OFFSET
) >> PAGE_SHIFT
;
657 while(addr
< start_mem
) {
658 #ifdef CONFIG_BLK_DEV_INITRD
659 if (initrd_below_start_ok
&& addr
>= initrd_start
&& addr
< initrd_end
)
660 mem_map
[MAP_NR(addr
)].flags
&= ~(1<<PG_reserved
);
663 mem_map
[MAP_NR(addr
)].flags
|= (1<<PG_reserved
);
667 taint_real_pages(start_mem
, end_mem
);
668 data_end
= start_mem
- phys_base
;
669 for (addr
= PAGE_OFFSET
; addr
< end_mem
; addr
+= PAGE_SIZE
) {
670 if(PageReserved(mem_map
+ MAP_NR(addr
))) {
671 if ((addr
< (unsigned long) &etext
) && (addr
>= PAGE_OFFSET
))
673 else if((addr
>= (unsigned long)&__init_begin
&& addr
< (unsigned long)&__init_end
))
675 else if((addr
>= (unsigned long)&__p1275_loc
&& addr
< (unsigned long)&__bss_start
))
677 else if((addr
< data_end
) && (addr
>= PAGE_OFFSET
))
681 atomic_set(&mem_map
[MAP_NR(addr
)].count
, 1);
683 #ifdef CONFIG_BLK_DEV_INITRD
685 (addr
< initrd_start
|| addr
>= initrd_end
))
690 tmp2
= nr_free_pages
<< PAGE_SHIFT
;
692 printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %dk prom) [%016lx,%016lx]\n",
694 codepages
<< (PAGE_SHIFT
-10),
695 datapages
<< (PAGE_SHIFT
-10),
696 initpages
<< (PAGE_SHIFT
-10),
697 prompages
<< (PAGE_SHIFT
-10),
698 PAGE_OFFSET
, end_mem
);
700 min_free_pages
= nr_free_pages
>> 7;
701 if(min_free_pages
< 16)
703 free_pages_low
= min_free_pages
+ (min_free_pages
>> 1);
704 free_pages_high
= min_free_pages
+ min_free_pages
;
707 printk("Testing fault handling...\n");
708 *(char *)0x00000deadbef0000UL
= 0;
712 void free_initmem (void)
716 addr
= (unsigned long)(&__init_begin
);
717 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
718 unsigned long page
= addr
;
720 if(page
< ((unsigned long)__va(phys_base
)))
723 mem_map
[MAP_NR(page
)].flags
&= ~(1 << PG_reserved
);
724 atomic_set(&mem_map
[MAP_NR(page
)].count
, 1);
729 void si_meminfo(struct sysinfo
*val
)
733 i
= MAP_NR(high_memory
);
736 val
->freeram
= nr_free_pages
<< PAGE_SHIFT
;
737 val
->bufferram
= buffermem
;
739 if (PageReserved(mem_map
+ i
))
742 if (!atomic_read(&mem_map
[i
].count
))
744 val
->sharedram
+= atomic_read(&mem_map
[i
].count
) - 1;
746 val
->totalram
<<= PAGE_SHIFT
;
747 val
->sharedram
<<= PAGE_SHIFT
;