4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Released under the terms of the GNU GPL v2.0.
10 #include <linux/mutex.h>
12 #include <asm/mmu_context.h>
13 #include <asm/cacheflush.h>
15 #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
17 static inline void *kmap_coherent(struct page
*page
, unsigned long addr
)
19 enum fixed_addresses idx
;
20 unsigned long vaddr
, flags
;
25 idx
= (addr
& current_cpu_data
.dcache
.alias_mask
) >> PAGE_SHIFT
;
26 vaddr
= __fix_to_virt(FIX_CMAP_END
- idx
);
27 pte
= mk_pte(page
, PAGE_KERNEL
);
29 local_irq_save(flags
);
30 flush_tlb_one(get_asid(), vaddr
);
31 local_irq_restore(flags
);
33 update_mmu_cache(NULL
, vaddr
, pte
);
38 static inline void kunmap_coherent(struct page
*page
)
41 preempt_check_resched();
47 * @address: U0 address to be mapped
48 * @page: page (virt_to_page(to))
50 void clear_user_page(void *to
, unsigned long address
, struct page
*page
)
52 __set_bit(PG_mapped
, &page
->flags
);
53 if (((address
^ (unsigned long)to
) & CACHE_ALIAS
) == 0)
56 void *vto
= kmap_coherent(page
, address
);
57 __clear_user_page(vto
, to
);
66 * @address: U0 address to be mapped
67 * @page: page (virt_to_page(to))
69 void copy_user_page(void *to
, void *from
, unsigned long address
,
72 __set_bit(PG_mapped
, &page
->flags
);
73 if (((address
^ (unsigned long)to
) & CACHE_ALIAS
) == 0)
76 void *vfrom
= kmap_coherent(page
, address
);
77 __copy_user_page(vfrom
, from
, to
);
78 kunmap_coherent(vfrom
);
83 * For SH-4, we have our own implementation for ptep_get_and_clear
85 inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
89 pte_clear(mm
, addr
, ptep
);
90 if (!pte_not_present(pte
)) {
91 unsigned long pfn
= pte_pfn(pte
);
93 struct page
*page
= pfn_to_page(pfn
);
94 struct address_space
*mapping
= page_mapping(page
);
95 if (!mapping
|| !mapping_writably_mapped(mapping
))
96 __clear_bit(PG_mapped
, &page
->flags
);