sparc64 update.
[helenos.git] / kernel / arch / sparc64 / src / proc / scheduler.c
blob2cf02f728814de387ca1832323cd30fe95ed7745
1 /*
2 * Copyright (C) 2006 Jakub Jermar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup sparc64proc
30 * @{
32 /** @file
35 #include <proc/scheduler.h>
36 #include <proc/thread.h>
37 #include <arch.h>
38 #include <arch/asm.h>
39 #include <arch/regdef.h>
40 #include <arch/stack.h>
41 #include <arch/mm/tlb.h>
42 #include <arch/mm/page.h>
43 #include <config.h>
44 #include <align.h>
45 #include <macros.h>
47 /** Perform sparc64 specific tasks needed before the new task is run. */
48 void before_task_runs_arch(void)
52 /** Perform sparc64 specific steps before scheduling a thread.
54 * Ensure that thread's kernel stack, as well as userspace window
55 * buffer for userspace threads, are locked in DTLB.
56 * For userspace threads, initialize reserved global registers
57 * in the alternate and interrupt sets.
59 void before_thread_runs_arch(void)
61 uintptr_t base;
63 base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
65 if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
67 * Kernel stack of this thread is not locked in DTLB.
68 * First, make sure it is not mapped already.
69 * If not, create a locked mapping for it.
71 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
72 dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
75 if ((THREAD->flags & THREAD_FLAG_USPACE)) {
77 * If this thread executes also in userspace, we have to lock
78 * its userspace window buffer into DTLB.
80 ASSERT(THREAD->arch.uspace_window_buffer);
81 uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
82 if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
84 * The buffer is not covered by the 4M locked kernel DTLB entry.
86 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
87 dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, true, true);
91 * Write kernel stack address to %g6 and a pointer to the last item
92 * in the userspace window buffer to %g7 in the alternate and interrupt sets.
94 uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE
95 - (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
96 write_to_ig_g6(sp);
97 write_to_ag_g6(sp);
98 write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
102 /** Perform sparc64 specific steps before a thread stops running.
104 * Demap any locked DTLB entries isntalled by the thread (i.e. kernel stack
105 * and userspace window buffer).
107 void after_thread_ran_arch(void)
109 uintptr_t base;
111 base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
113 if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
115 * Kernel stack of this thread is locked in DTLB.
116 * Destroy the mapping.
118 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
121 if ((THREAD->flags & THREAD_FLAG_USPACE)) {
123 * If this thread executes also in userspace, we have to force all
124 * its still-active userspace windows into the userspace window buffer
125 * and demap the buffer from DTLB.
127 ASSERT(THREAD->arch.uspace_window_buffer);
129 uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
130 if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
132 * The buffer is not covered by the 4M locked kernel DTLB entry
133 * and therefore it was given a dedicated locked DTLB entry.
134 * Demap it.
136 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
139 /* sample the state of the userspace window buffer */
140 THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7();
144 /** @}