2 * Copyright (C) 2006 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup sparc64mm
35 #include <arch/mm/as.h>
36 #include <arch/mm/tlb.h>
37 #include <genarch/mm/as_ht.h>
38 #include <genarch/mm/asid_fifo.h>
43 #include <arch/mm/tsb.h>
44 #include <arch/memstr.h>
45 #include <synch/mutex.h>
50 #endif /* CONFIG_TSB */
52 #ifdef CONFIG_VIRT_IDX_DCACHE
53 #include <arch/mm/cache.h>
54 #endif /* CONFIG_VIRT_IDX_DCACHE */
56 /** Architecture dependent address space init. */
57 void as_arch_init(void)
59 if (config
.cpu_active
== 1) {
60 as_operations
= &as_ht_operations
;
65 int as_constructor_arch(as_t
*as
, int flags
)
68 int order
= fnzb32(((ITSB_ENTRY_COUNT
+DTSB_ENTRY_COUNT
)*sizeof(tsb_entry_t
))>>FRAME_WIDTH
);
69 uintptr_t tsb
= (uintptr_t) frame_alloc(order
, flags
| FRAME_KA
);
74 as
->arch
.itsb
= (tsb_entry_t
*) tsb
;
75 as
->arch
.dtsb
= (tsb_entry_t
*) (tsb
+ ITSB_ENTRY_COUNT
* sizeof(tsb_entry_t
));
76 memsetb((uintptr_t) as
->arch
.itsb
, (ITSB_ENTRY_COUNT
+DTSB_ENTRY_COUNT
)*sizeof(tsb_entry_t
), 0);
81 int as_destructor_arch(as_t
*as
)
84 count_t cnt
= ((ITSB_ENTRY_COUNT
+DTSB_ENTRY_COUNT
)*sizeof(tsb_entry_t
))>>FRAME_WIDTH
;
85 frame_free(KA2PA((uintptr_t) as
->arch
.itsb
));
92 int as_create_arch(as_t
*as
, int flags
)
97 ipl
= interrupts_disable();
98 mutex_lock_active(&as
->lock
); /* completely unnecessary, but polite */
99 tsb_invalidate(as
, 0, (count_t
) -1);
100 mutex_unlock(&as
->lock
);
101 interrupts_restore(ipl
);
106 /** Perform sparc64-specific tasks when an address space becomes active on the processor.
108 * Install ASID and map TSBs.
110 * @param as Address space.
112 void as_install_arch(as_t
*as
)
114 tlb_context_reg_t ctx
;
117 * Note that we don't lock the address space.
118 * That's correct - we can afford it here
119 * because we only read members that are
120 * currently read-only.
124 * Write ASID to secondary context register.
125 * The primary context register has to be set
126 * from TL>0 so it will be filled from the
127 * secondary context register from the TL=1
128 * code just before switch to userspace.
131 ctx
.context
= as
->asid
;
132 mmu_secondary_context_write(ctx
.v
);
135 uintptr_t base
= ALIGN_DOWN(config
.base
, 1 << KERNEL_PAGE_WIDTH
);
137 ASSERT(as
->arch
.itsb
&& as
->arch
.dtsb
);
139 uintptr_t tsb
= (uintptr_t) as
->arch
.itsb
;
141 if (!overlaps(tsb
, 8*PAGE_SIZE
, base
, 1 << KERNEL_PAGE_WIDTH
)) {
143 * TSBs were allocated from memory not covered
144 * by the locked 4M kernel DTLB entry. We need
145 * to map both TSBs explicitly.
147 dtlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_NUCLEUS
, tsb
);
148 dtlb_insert_mapping(tsb
, KA2PA(tsb
), PAGESIZE_64K
, true, true);
152 * Setup TSB Base registers.
154 tsb_base_reg_t tsb_base
;
157 tsb_base
.size
= TSB_SIZE
;
160 tsb_base
.base
= ((uintptr_t) as
->arch
.itsb
) >> PAGE_WIDTH
;
161 itsb_base_write(tsb_base
.value
);
162 tsb_base
.base
= ((uintptr_t) as
->arch
.dtsb
) >> PAGE_WIDTH
;
163 dtsb_base_write(tsb_base
.value
);
165 #ifdef CONFIG_VIRT_IDX_DCACHE
166 if (as
->dcache_flush_on_install
) {
168 * Some mappings in this address space are illegal address
169 * aliases. Upon their creation, the dcache_flush_on_install
172 * We are now obliged to flush the D-cache in order to guarantee
173 * that there will be at most one cache line for each address
176 * This flush performs a cleanup after another address space in
177 * which the alias might have existed.
181 #endif /* CONFIG_VIRT_IDX_DCACHE */
184 /** Perform sparc64-specific tasks when an address space is removed from the processor.
188 * @param as Address space.
190 void as_deinstall_arch(as_t
*as
)
194 * Note that we don't lock the address space.
195 * That's correct - we can afford it here
196 * because we only read members that are
197 * currently read-only.
201 uintptr_t base
= ALIGN_DOWN(config
.base
, 1 << KERNEL_PAGE_WIDTH
);
203 ASSERT(as
->arch
.itsb
&& as
->arch
.dtsb
);
205 uintptr_t tsb
= (uintptr_t) as
->arch
.itsb
;
207 if (!overlaps(tsb
, 8*PAGE_SIZE
, base
, 1 << KERNEL_PAGE_WIDTH
)) {
209 * TSBs were allocated from memory not covered
210 * by the locked 4M kernel DTLB entry. We need
211 * to demap the entry installed by as_install_arch().
213 dtlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_NUCLEUS
, tsb
);
216 #ifdef CONFIG_VIRT_IDX_DCACHE
217 if (as
->dcache_flush_on_deinstall
) {
219 * Some mappings in this address space are illegal address
220 * aliases. Upon their creation, the dcache_flush_on_deinstall
223 * We are now obliged to flush the D-cache in order to guarantee
224 * that there will be at most one cache line for each address
227 * This flush performs a cleanup after this address space. It is
228 * necessary because other address spaces that contain the same
229 * alias are not necessarily aware of the need to carry out the
230 * cache flush. The only address spaces that are aware of it are
231 * those that created the illegal alias.
235 #endif /* CONFIG_VIRT_IDX_DCACHE */