1 /* $Id: generic.c,v 1.14 2001/12/21 04:56:15 davem Exp $
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/pagemap.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
16 #include <asm/cacheflush.h>
17 #include <asm/tlbflush.h>
19 static inline void forget_pte(pte_t page
)
21 #if 0 /* old 2.4 code */
24 if (pte_present(page
)) {
25 unsigned long pfn
= pte_pfn(page
);
29 ptpage
= pfn_to_page(pfn
);
30 if (PageReserved(ptpage
))
32 page_cache_release(ptpage
);
35 swap_free(pte_to_swp_entry(page
));
37 if (!pte_none(page
)) {
38 printk("forget_pte: old mapping existed!\n");
44 /* Remap IO memory, the same way as remap_pfn_range(), but use
45 * the obio memory space.
47 * They use a pgprot that sets PAGE_IO and does not check the
48 * mem_map table as this is independent of normal memory.
50 static inline void io_remap_pte_range(struct mm_struct
*mm
, pte_t
* pte
, unsigned long address
, unsigned long size
,
51 unsigned long offset
, pgprot_t prot
, int space
)
61 pte_clear(mm
, address
, pte
);
62 set_pte(pte
, mk_pte_io(offset
, prot
, space
));
67 } while (address
< end
);
70 static inline int io_remap_pmd_range(struct mm_struct
*mm
, pmd_t
* pmd
, unsigned long address
, unsigned long size
,
71 unsigned long offset
, pgprot_t prot
, int space
)
75 address
&= ~PGDIR_MASK
;
81 pte_t
* pte
= pte_alloc_map(mm
, pmd
, address
);
84 io_remap_pte_range(mm
, pte
, address
, end
- address
, address
+ offset
, prot
, space
);
85 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
87 } while (address
< end
);
91 int io_remap_page_range(struct vm_area_struct
*vma
, unsigned long from
, unsigned long offset
, unsigned long size
, pgprot_t prot
, int space
)
95 unsigned long beg
= from
;
96 unsigned long end
= from
+ size
;
97 struct mm_struct
*mm
= vma
->vm_mm
;
99 prot
= __pgprot(pg_iobits
);
101 dir
= pgd_offset(mm
, from
);
102 flush_cache_range(vma
, beg
, end
);
104 spin_lock(&mm
->page_table_lock
);
106 pmd_t
*pmd
= pmd_alloc(current
->mm
, dir
, from
);
110 error
= io_remap_pmd_range(mm
, pmd
, from
, end
- from
, offset
+ from
, prot
, space
);
113 from
= (from
+ PGDIR_SIZE
) & PGDIR_MASK
;
116 spin_unlock(&mm
->page_table_lock
);
118 flush_tlb_range(vma
, beg
, end
);
122 int io_remap_pfn_range(struct vm_area_struct
*vma
, unsigned long from
,
123 unsigned long pfn
, unsigned long size
, pgprot_t prot
)
127 unsigned long beg
= from
;
128 unsigned long end
= from
+ size
;
129 struct mm_struct
*mm
= vma
->vm_mm
;
130 int space
= GET_IOSPACE(pfn
);
131 unsigned long offset
= GET_PFN(pfn
) << PAGE_SHIFT
;
133 prot
= __pgprot(pg_iobits
);
135 dir
= pgd_offset(mm
, from
);
136 flush_cache_range(vma
, beg
, end
);
138 spin_lock(&mm
->page_table_lock
);
140 pmd_t
*pmd
= pmd_alloc(current
->mm
, dir
, from
);
144 error
= io_remap_pmd_range(mm
, pmd
, from
, end
- from
, offset
+ from
, prot
, space
);
147 from
= (from
+ PGDIR_SIZE
) & PGDIR_MASK
;
150 spin_unlock(&mm
->page_table_lock
);
152 flush_tlb_range(vma
, beg
, end
);