2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #ifndef _ASM_TILE_MMU_CONTEXT_H
16 #define _ASM_TILE_MMU_CONTEXT_H
18 #include <linux/smp.h>
19 #include <asm/setup.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlbflush.h>
24 #include <asm/homecache.h>
25 #include <asm-generic/mm_hooks.h>
28 init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
33 /* Note that arch/tile/kernel/head.S also calls hv_install_context() */
34 static inline void __install_page_table(pgd_t
*pgdir
, int asid
, pgprot_t prot
)
36 int rc
= hv_install_context(__pa(pgdir
), prot
, asid
, HV_CTX_DIRECTIO
);
38 panic("hv_install_context failed: %d", rc
);
41 static inline void install_page_table(pgd_t
*pgdir
, int asid
)
43 pte_t
*ptep
= virt_to_pte(NULL
, (unsigned long)pgdir
);
44 __install_page_table(pgdir
, asid
, *ptep
);
48 * "Lazy" TLB mode is entered when we are switching to a kernel task,
49 * which borrows the mm of the previous task. The goal of this
50 * optimization is to avoid having to install a new page table. On
51 * early x86 machines (where the concept originated) you couldn't do
52 * anything short of a full page table install for invalidation, so
53 * handling a remote TLB invalidate required doing a page table
54 * re-install. Someone clearly decided that it was silly to keep
55 * doing this while in "lazy" TLB mode, so the optimization involves
56 * installing the swapper page table instead the first time one
57 * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
58 * the kernel task doesn't need to take any more interrupts. At that
59 * point it's then necessary to explicitly reinstall it when context
60 * switching back to the original mm.
62 * On Tile, we have to do a page-table install whenever DMA is enabled,
63 * so in that case lazy mode doesn't help anyway. And more generally,
64 * we have efficient per-page TLB shootdown, and don't expect to spend
65 * that much time in kernel tasks in general, so just leaving the
66 * kernel task borrowing the old page table, but handling TLB
67 * shootdowns, is a reasonable thing to do. And importantly, this
68 * lets us use the hypervisor's internal APIs for TLB shootdown, which
69 * means we don't have to worry about having TLB shootdowns blocked
70 * when Linux is disabling interrupts; see the page migration code for
71 * an example of where it's important for TLB shootdowns to complete
72 * even when interrupts are disabled at the Linux level.
74 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*t
)
76 #if CHIP_HAS_TILE_DMA()
78 * We have to do an "identity" page table switch in order to
79 * clear any pending DMA interrupts.
81 if (current
->thread
.tile_dma_state
.enabled
)
82 install_page_table(mm
->pgd
, __get_cpu_var(current_asid
));
86 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
87 struct task_struct
*tsk
)
89 if (likely(prev
!= next
)) {
91 int cpu
= smp_processor_id();
94 int asid
= __get_cpu_var(current_asid
) + 1;
95 if (asid
> max_asid
) {
99 __get_cpu_var(current_asid
) = asid
;
101 /* Clear cpu from the old mm, and set it in the new one. */
102 cpumask_clear_cpu(cpu
, &prev
->cpu_vm_mask
);
103 cpumask_set_cpu(cpu
, &next
->cpu_vm_mask
);
105 /* Re-load page tables */
106 install_page_table(next
->pgd
, asid
);
108 /* See how we should set the red/black cache info */
109 check_mm_caching(prev
, next
);
112 * Since we're changing to a new mm, we have to flush
113 * the icache in case some physical page now being mapped
114 * has subsequently been repurposed and has new code.
121 static inline void activate_mm(struct mm_struct
*prev_mm
,
122 struct mm_struct
*next_mm
)
124 switch_mm(prev_mm
, next_mm
, NULL
);
127 #define destroy_context(mm) do { } while (0)
128 #define deactivate_mm(tsk, mm) do { } while (0)
130 #endif /* _ASM_TILE_MMU_CONTEXT_H */