3 Copyright (C) 2006-2009 Jörg Pfähler
5 This program is free software; you can redistribute it and/or
6 modify it under the terms of the GNU General Public License
7 as published by the Free Software Foundation; either version 2
8 of the License, or (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
21 #include <kernel/context.hpp>
22 #include <kernel/process.hpp>
23 #include <kernel/page_allocator.hpp>
24 #include <kernel/processor.hpp>
26 #include <kernel/virtual_memory.hpp>
28 using namespace kernel
;
29 using kernel::virtual_memory
;
31 #define PAGE_PRESENT 0x0001
32 #define PAGE_WRITE 0x0002
33 #define PAGE_USER 0x0004
34 #define PAGE_WRITE_THROUGH 0x0008
35 #define PAGE_CACHE_DISABLE 0x0010
36 #define PAGE_ACCESSED 0x0020
37 #define PAGE_DIRTY 0x0040
38 #define PAGE_4MB 0x0080
39 #define PAGE_GLOBAL 0x0100
40 #define PAGE_COPY_ON_WRITE 0x0200
42 // TODO: Heap address?
44 : m_handle(), m_heap(reinterpret_cast<void*>(0x400000))
46 page_allocator
& PageAllocator
= page_allocator::instance();
47 m_handle
= PageAllocator
.allocate();
49 // TODO: kernel context
52 // TODO: Delegate this to the kernel_context, we need to lock the kernel_context
54 kernel::x86::virtual_memory::kernel_page_directory
<void>(),
55 virtual_memory::page_size
);
63 size_t context::get_flags(size_t flags
)
65 size_t fl
= PAGE_PRESENT
;
66 if ((flags
& lightOS::context::write
) != 0)fl
|= PAGE_WRITE
;
67 if ((flags
& lightOS::context::user
) != 0)fl
|= PAGE_USER
;
68 if ((flags
& lightOS::context::global
) != 0)fl
|= PAGE_GLOBAL
;
69 if ((flags
& lightOS::context::write_through
) != 0)fl
|= PAGE_WRITE_THROUGH
;
70 if ((flags
& lightOS::context::cache_disable
) != 0)fl
|= PAGE_CACHE_DISABLE
;
71 if ((flags
& lightOS::context::copy_on_write
) != 0)fl
|= PAGE_COPY_ON_WRITE
;
75 bool context::copy_on_write_handler(void* vaddress
,
78 size_t flags
= physical_address_flags(vaddress
);
79 if ((flags
& lightOS::context::copy_on_write
) != 0)
81 // Allocate a new page
82 page_allocator
&PageAllocator
= page_allocator::instance();
83 void *newPage
= PageAllocator
.allocate();
84 Process
.mPages
.push_back(newPage
);
89 void *vaddr
= reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(vaddress
) & 0xFFFFF000);
90 void *toCopy
= physical_address(vaddr
);
93 virtual_memory::page_size
);
96 size_t newFlags
= (flags
& (~lightOS::context::copy_on_write
)) | lightOS::context::write
;
97 map(newPage
, vaddr
, newFlags
);
106 void* context::physical_address_nolock(void* vaddress
)
108 KERNEL_CONTEXT_START
;
110 uint32_t physical
= 0;
111 size_t iDir
= reinterpret_cast<size_t>(vaddress
) >> 22;
112 size_t pageTable
= reinterpret_cast<uint32_t*>(m_handle
)[iDir
];
113 if ((pageTable
& PAGE_PRESENT
) == PAGE_PRESENT
)
115 size_t iPageTable
= (reinterpret_cast<uint32_t>(vaddress
) >> 12) & 0x3FF;
116 physical
= reinterpret_cast<uint32_t*>(pageTable
& 0xFFFFF000)[iPageTable
] & 0xFFFFF000;
120 return reinterpret_cast<void*>(physical
| (reinterpret_cast<uint32_t>(vaddress
) & 0xFFF));
123 size_t context::physical_address_flags(void* vaddress
)
125 KERNEL_CONTEXT_START
;
128 size_t iDir
= reinterpret_cast<size_t>(vaddress
) >> 22;
129 size_t pageTable
= reinterpret_cast<uint32_t*>(m_handle
)[iDir
];
130 if ((pageTable
& PAGE_PRESENT
) == PAGE_PRESENT
)
132 size_t iPageTable
= (reinterpret_cast<uint32_t>(vaddress
) >> 12) & 0x3FF;
133 flags
= reinterpret_cast<uint32_t*>(pageTable
& 0xFFFFF000)[iPageTable
] & 0xFFF;
138 size_t fl
= lightOS::context::execute
;
139 if ((flags
& PAGE_WRITE
) != 0)fl
|= lightOS::context::write
;
140 if ((flags
& PAGE_USER
) != 0)fl
|= lightOS::context::user
;
141 if ((flags
& PAGE_GLOBAL
) != 0)fl
|= lightOS::context::global
;
142 if ((flags
& PAGE_WRITE_THROUGH
) != 0)fl
|= lightOS::context::write_through
;
143 if ((flags
& PAGE_CACHE_DISABLE
) != 0)fl
|= lightOS::context::cache_disable
;
144 if ((flags
& PAGE_COPY_ON_WRITE
) != 0)fl
|= lightOS::context::copy_on_write
;
148 void* context::unmap(void* vaddress
)
150 KERNEL_CONTEXT_START
;
152 size_t iDir
= reinterpret_cast<size_t>(vaddress
) >> 22;
153 size_t pageTable
= reinterpret_cast<uint32_t*>(m_handle
)[iDir
];
154 if ((pageTable
& PAGE_PRESENT
) == PAGE_PRESENT
)
156 size_t iPageTable
= (reinterpret_cast<uint32_t>(vaddress
) >> 12) & 0x3FF;
157 pageTable
&= 0xFFFFF000;
158 uint32_t page
= reinterpret_cast<uint32_t*>(pageTable
)[iPageTable
];
159 reinterpret_cast<uint32_t*>(pageTable
)[iPageTable
] = 0;
160 return reinterpret_cast<void*>(page
& 0xFFFFF000);
167 void context::map(void* paddress
,
171 KERNEL_CONTEXT_START
;
173 size_t fl
= get_flags(flags
);
175 size_t iDir
= reinterpret_cast<size_t>(vaddress
) >> 22;
176 size_t pageTable
= reinterpret_cast<uint32_t*>(m_handle
)[iDir
];
177 if ((pageTable
& PAGE_PRESENT
) != PAGE_PRESENT
)
179 // Allocate page table
180 page_allocator
&PageAllocator
= page_allocator::instance();
181 void *page
= PageAllocator
.allocate();
182 memset(page
, 0, 4096);
183 pageTable
= reinterpret_cast<uint32_t*>(m_handle
)[iDir
] = reinterpret_cast<uint32_t>(page
) | fl
| PAGE_WRITE
;
185 // If kernel address space, patch all process address spaces
186 if (m_handle
== kernel_context::instance().m_handle
)
188 for (size_t i
=0;i
< process::size();i
++)
190 process
*Process
= process::get_process_by_index(i
);
191 context
&Context
= Process
->getContext();
192 reinterpret_cast<uint32_t*>(Context
.m_handle
)[iDir
] = reinterpret_cast<uint32_t>(page
) | fl
| PAGE_WRITE
;
196 size_t iPageTable
= (reinterpret_cast<uint32_t>(vaddress
) >> 12) & 0x3FF;
197 reinterpret_cast<uint32_t*>(pageTable
& 0xFFFFF000)[iPageTable
] = (reinterpret_cast<uint32_t>(paddress
) & 0xFFFFF000) | fl
;
198 vaddress
= reinterpret_cast<void*>(reinterpret_cast<uint32_t>(vaddress
) + 4096);
199 paddress
= reinterpret_cast<void*>(reinterpret_cast<uint32_t>(paddress
) + 4096);
204 void context::copy_shared_memory(void* vaddress_src
,
209 KERNEL_CONTEXT_START
;
211 size_t flold
= PAGE_USER
;
212 if (flags
!= libkernel::shared_memory::transfer_ownership
)flold
|= PAGE_PRESENT
;
213 if (flags
!= libkernel::shared_memory::mutual_write
)flold
|= PAGE_WRITE
;
214 size_t flnew
= PAGE_PRESENT
| PAGE_USER
;
215 if (flags
!= libkernel::shared_memory::read_only
)flnew
|= PAGE_WRITE
;
217 size_t iDirSrc
= reinterpret_cast<uint32_t>(vaddress_src
) >> 22;
218 uint32_t pageTableSrc
= reinterpret_cast<uint32_t*>(m_handle
)[iDirSrc
];
219 if ((pageTableSrc
& PAGE_PRESENT
) == PAGE_PRESENT
)
221 size_t iDirDest
= reinterpret_cast<uint32_t>(vaddress_dest
) >> 22;
222 reinterpret_cast<uint32_t*>(dest
.m_handle
)[iDirDest
] = (pageTableSrc
& 0xFFFFF000) | flnew
;
223 reinterpret_cast<uint32_t*>(m_handle
)[iDirSrc
] = (pageTableSrc
& 0xFFFFF000) | flold
;
229 void context::free_shared_memory(void* vaddress
,
232 size_t pageCount
= size
/ 4096;
233 if ((size
% 4096) != 0)++pageCount
;
235 KERNEL_CONTEXT_START
;
237 size_t iDir
= reinterpret_cast<size_t>(vaddress
) >> 22;
238 uint32_t pageTable
= reinterpret_cast<uint32_t*>(m_handle
)[iDir
];
239 if ((pageTable
& PAGE_PRESENT
) == PAGE_PRESENT
)
241 page_allocator
&PageAllocator
= page_allocator::instance();
242 for (size_t i
=0;i
< pageCount
;i
++)
243 PageAllocator
.free(reinterpret_cast<void*>(reinterpret_cast<uint32_t*>(pageTable
& 0xFFFFF000)[i
] & 0xFFFFF000));
244 PageAllocator
.free(reinterpret_cast<void*>(pageTable
& 0xFFFFF000));
245 reinterpret_cast<uint32_t*>(m_handle
)[iDir
] = 0;
251 bool context::create_page_tables(void* page
,
256 KERNEL_CONTEXT_START
;
258 size_t fl
= get_flags(flags
);
259 uint32_t iDir
= reinterpret_cast<uint32_t>(vaddress
) >> 22;
260 uint32_t pageTable
= reinterpret_cast<uint32_t*>(m_handle
)[iDir
];
261 if ((pageTable
& PAGE_PRESENT
) != PAGE_PRESENT
)
263 memset(page
, 0, 4096);
264 reinterpret_cast<uint32_t*>(m_handle
)[iDir
] = reinterpret_cast<uint32_t>(page
) | fl
;
269 uint32_t iPageTable
= (reinterpret_cast<uint32_t>(vaddress
) >> 12) & 0x3FF;
270 uint32_t pageTableEntry
= reinterpret_cast<uint32_t*>(pageTable
& 0xFFFFF000)[iPageTable
];
271 if ((pageTableEntry
& PAGE_PRESENT
) != PAGE_PRESENT
)
273 memset(page
, 0, 4096);
274 reinterpret_cast<uint32_t*>(pageTable
& 0xFFFFF000)[iPageTable
] = reinterpret_cast<uint32_t>(page
) | fl
;
283 void context::unmap_shared_memory(void* vaddress
)
285 KERNEL_CONTEXT_START
;
287 size_t iDir
= reinterpret_cast<uint32_t>(vaddress
) >> 22;
288 reinterpret_cast<uint32_t*>(m_handle
)[iDir
] = 0;
295 KERNEL_CONTEXT_START
;
297 page_allocator
&PageAllocator
= page_allocator::instance();
300 for (size_t i
= 0;i
< 768;i
++)
302 uint32_t pageTable
= reinterpret_cast<uint32_t*>(m_handle
)[i
];
303 if ((pageTable
& PAGE_PRESENT
) == PAGE_PRESENT
)
304 PageAllocator
.free(reinterpret_cast<void*>(pageTable
& 0xFFFFF000));
306 PageAllocator
.free(m_handle
);