linprocfs - Introduce /proc/mounts
[dragonfly.git] / sys / emulation / ndis / subr_ntoskrnl.c
blob707bceccf88287f2b78a56be96f3a8c7404f0623
1 /*
2 * Copyright (c) 2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.40 2004/07/20 20:28:57 wpaul Exp $
33 * $DragonFly: src/sys/emulation/ndis/subr_ntoskrnl.c,v 1.13 2006/12/23 00:27:02 swildner Exp $
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
45 #include <sys/callout.h>
46 #if __FreeBSD_version > 502113
47 #include <sys/kdb.h>
48 #endif
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/kthread.h>
52 #include <sys/bus.h>
53 #include <sys/rman.h>
55 #include <machine/atomic.h>
56 #include <machine/clock.h>
57 #include <machine/stdarg.h>
59 #include "regcall.h"
60 #include "pe_var.h"
61 #include "resource_var.h"
62 #include "ntoskrnl_var.h"
63 #include "ndis_var.h"
64 #include "hal_var.h"
66 #define __regparm __attribute__((regparm(3)))
68 #define FUNC void(*)(void)
70 __stdcall static uint8_t ntoskrnl_unicode_equal(ndis_unicode_string *,
71 ndis_unicode_string *, uint8_t);
72 __stdcall static void ntoskrnl_unicode_copy(ndis_unicode_string *,
73 ndis_unicode_string *);
74 __stdcall static ndis_status ntoskrnl_unicode_to_ansi(ndis_ansi_string *,
75 ndis_unicode_string *, uint8_t);
76 __stdcall static ndis_status ntoskrnl_ansi_to_unicode(ndis_unicode_string *,
77 ndis_ansi_string *, uint8_t);
78 __stdcall static void *ntoskrnl_iobuildsynchfsdreq(uint32_t, void *,
79 void *, uint32_t, uint32_t *, void *, void *);
82 * registerized calls
84 __stdcall __regcall static uint32_t
85 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp));
86 __stdcall __regcall static void
87 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost));
88 __stdcall __regcall static slist_entry *
89 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry));
90 __stdcall __regcall static slist_entry *
91 ntoskrnl_pop_slist(REGARGS1(slist_header *head));
92 __stdcall __regcall static slist_entry *
93 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock);
94 __stdcall __regcall static slist_entry *
95 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock));
97 __stdcall __regcall static uint32_t
98 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend));
99 __stdcall __regcall static uint32_t
100 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend));
101 __stdcall __regcall static void
102 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc));
103 __stdcall __regcall static void
104 ntoskrnl_objderef(REGARGS1(void *object));
106 __stdcall static uint32_t ntoskrnl_waitforobjs(uint32_t,
107 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
108 int64_t *, wait_block *);
109 static void ntoskrnl_wakeup(void *);
110 static void ntoskrnl_timercall(void *);
111 static void ntoskrnl_run_dpc(void *);
112 __stdcall static void ntoskrnl_writereg_ushort(uint16_t *, uint16_t);
113 __stdcall static uint16_t ntoskrnl_readreg_ushort(uint16_t *);
114 __stdcall static void ntoskrnl_writereg_ulong(uint32_t *, uint32_t);
115 __stdcall static uint32_t ntoskrnl_readreg_ulong(uint32_t *);
116 __stdcall static void ntoskrnl_writereg_uchar(uint8_t *, uint8_t);
117 __stdcall static uint8_t ntoskrnl_readreg_uchar(uint8_t *);
118 __stdcall static int64_t _allmul(int64_t, int64_t);
119 __stdcall static int64_t _alldiv(int64_t, int64_t);
120 __stdcall static int64_t _allrem(int64_t, int64_t);
121 __regparm static int64_t _allshr(int64_t, uint8_t);
122 __regparm static int64_t _allshl(int64_t, uint8_t);
123 __stdcall static uint64_t _aullmul(uint64_t, uint64_t);
124 __stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
125 __stdcall static uint64_t _aullrem(uint64_t, uint64_t);
126 __regparm static uint64_t _aullshr(uint64_t, uint8_t);
127 __regparm static uint64_t _aullshl(uint64_t, uint8_t);
128 __stdcall static void *ntoskrnl_allocfunc(uint32_t, size_t, uint32_t);
129 __stdcall static void ntoskrnl_freefunc(void *);
130 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
131 static slist_entry *ntoskrnl_popsl(slist_header *);
132 __stdcall static void ntoskrnl_init_lookaside(paged_lookaside_list *,
133 lookaside_alloc_func *, lookaside_free_func *,
134 uint32_t, size_t, uint32_t, uint16_t);
135 __stdcall static void ntoskrnl_delete_lookaside(paged_lookaside_list *);
136 __stdcall static void ntoskrnl_init_nplookaside(npaged_lookaside_list *,
137 lookaside_alloc_func *, lookaside_free_func *,
138 uint32_t, size_t, uint32_t, uint16_t);
139 __stdcall static void ntoskrnl_delete_nplookaside(npaged_lookaside_list *);
140 __stdcall static void ntoskrnl_freemdl(ndis_buffer *);
141 __stdcall static uint32_t ntoskrnl_sizeofmdl(void *, size_t);
142 __stdcall static void ntoskrnl_build_npaged_mdl(ndis_buffer *);
143 __stdcall static void *ntoskrnl_mmaplockedpages(ndis_buffer *, uint8_t);
144 __stdcall static void *ntoskrnl_mmaplockedpages_cache(ndis_buffer *,
145 uint8_t, uint32_t, void *, uint32_t, uint32_t);
146 __stdcall static void ntoskrnl_munmaplockedpages(void *, ndis_buffer *);
147 __stdcall static void ntoskrnl_init_lock(kspin_lock *);
148 __stdcall static size_t ntoskrnl_memcmp(const void *, const void *, size_t);
149 __stdcall static void ntoskrnl_init_ansi_string(ndis_ansi_string *, char *);
150 __stdcall static void ntoskrnl_init_unicode_string(ndis_unicode_string *,
151 uint16_t *);
152 __stdcall static void ntoskrnl_free_unicode_string(ndis_unicode_string *);
153 __stdcall static void ntoskrnl_free_ansi_string(ndis_ansi_string *);
154 __stdcall static ndis_status ntoskrnl_unicode_to_int(ndis_unicode_string *,
155 uint32_t, uint32_t *);
156 static int atoi (const char *);
157 static long atol (const char *);
158 static int rand(void);
159 static void ntoskrnl_time(uint64_t *);
160 __stdcall static uint8_t ntoskrnl_wdmver(uint8_t, uint8_t);
161 static void ntoskrnl_thrfunc(void *);
162 __stdcall static ndis_status ntoskrnl_create_thread(ndis_handle *,
163 uint32_t, void *, ndis_handle, void *, void *, void *);
164 __stdcall static ndis_status ntoskrnl_thread_exit(ndis_status);
165 __stdcall static ndis_status ntoskrnl_devprop(device_object *, uint32_t,
166 uint32_t, void *, uint32_t *);
167 __stdcall static void ntoskrnl_init_mutex(kmutant *, uint32_t);
168 __stdcall static uint32_t ntoskrnl_release_mutex(kmutant *, uint8_t);
169 __stdcall static uint32_t ntoskrnl_read_mutex(kmutant *);
170 __stdcall static ndis_status ntoskrnl_objref(ndis_handle, uint32_t, void *,
171 uint8_t, void **, void **);
172 __stdcall static uint32_t ntoskrnl_zwclose(ndis_handle);
173 static uint32_t ntoskrnl_dbgprint(char *, ...);
174 __stdcall static void ntoskrnl_debugger(void);
175 __stdcall static void dummy(void);
177 static struct lwkt_token ntoskrnl_dispatchtoken;
178 static kspin_lock ntoskrnl_global;
179 static int ntoskrnl_kth = 0;
180 static struct nt_objref_head ntoskrnl_reflist;
182 static MALLOC_DEFINE(M_NDIS, "ndis", "ndis emulation");
185 ntoskrnl_libinit(void)
187 lwkt_token_init(&ntoskrnl_dispatchtoken);
188 ntoskrnl_init_lock(&ntoskrnl_global);
189 TAILQ_INIT(&ntoskrnl_reflist);
190 return(0);
194 ntoskrnl_libfini(void)
196 lwkt_token_uninit(&ntoskrnl_dispatchtoken);
197 return(0);
200 __stdcall static uint8_t
201 ntoskrnl_unicode_equal(ndis_unicode_string *str1,
202 ndis_unicode_string *str2,
203 uint8_t caseinsensitive)
205 int i;
207 if (str1->nus_len != str2->nus_len)
208 return(FALSE);
210 for (i = 0; i < str1->nus_len; i++) {
211 if (caseinsensitive == TRUE) {
212 if (toupper((char)(str1->nus_buf[i] & 0xFF)) !=
213 toupper((char)(str2->nus_buf[i] & 0xFF)))
214 return(FALSE);
215 } else {
216 if (str1->nus_buf[i] != str2->nus_buf[i])
217 return(FALSE);
221 return(TRUE);
224 __stdcall static void
225 ntoskrnl_unicode_copy(ndis_unicode_string *dest,
226 ndis_unicode_string *src)
229 if (dest->nus_maxlen >= src->nus_len)
230 dest->nus_len = src->nus_len;
231 else
232 dest->nus_len = dest->nus_maxlen;
233 memcpy(dest->nus_buf, src->nus_buf, dest->nus_len);
234 return;
237 __stdcall static ndis_status
238 ntoskrnl_unicode_to_ansi(ndis_ansi_string *dest,
239 ndis_unicode_string *src,
240 uint8_t allocate)
242 char *astr = NULL;
244 if (dest == NULL || src == NULL)
245 return(NDIS_STATUS_FAILURE);
247 if (allocate == TRUE) {
248 if (ndis_unicode_to_ascii(src->nus_buf, src->nus_len, &astr))
249 return(NDIS_STATUS_FAILURE);
250 dest->nas_buf = astr;
251 dest->nas_len = dest->nas_maxlen = strlen(astr);
252 } else {
253 dest->nas_len = src->nus_len / 2; /* XXX */
254 if (dest->nas_maxlen < dest->nas_len)
255 dest->nas_len = dest->nas_maxlen;
256 ndis_unicode_to_ascii(src->nus_buf, dest->nas_len * 2,
257 &dest->nas_buf);
259 return (NDIS_STATUS_SUCCESS);
262 __stdcall static ndis_status
263 ntoskrnl_ansi_to_unicode(ndis_unicode_string *dest,
264 ndis_ansi_string *src,
265 uint8_t allocate)
267 uint16_t *ustr = NULL;
269 if (dest == NULL || src == NULL)
270 return(NDIS_STATUS_FAILURE);
272 if (allocate == TRUE) {
273 if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
274 return(NDIS_STATUS_FAILURE);
275 dest->nus_buf = ustr;
276 dest->nus_len = dest->nus_maxlen = strlen(src->nas_buf) * 2;
277 } else {
278 dest->nus_len = src->nas_len * 2; /* XXX */
279 if (dest->nus_maxlen < dest->nus_len)
280 dest->nus_len = dest->nus_maxlen;
281 ndis_ascii_to_unicode(src->nas_buf, &dest->nus_buf);
283 return (NDIS_STATUS_SUCCESS);
286 __stdcall static void *
287 ntoskrnl_iobuildsynchfsdreq(uint32_t func, void *dobj, void *buf,
288 uint32_t len, uint32_t *off,
289 void *event, void *status)
291 return(NULL);
294 __stdcall __regcall static uint32_t
295 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp))
297 return(0);
300 __stdcall __regcall static void
301 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost))
305 static void
306 ntoskrnl_wakeup(void *arg)
308 nt_dispatch_header *obj;
309 wait_block *w;
310 list_entry *e;
311 struct thread *td;
312 struct lwkt_tokref tokref;
314 obj = arg;
316 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
317 obj->dh_sigstate = TRUE;
318 e = obj->dh_waitlisthead.nle_flink;
319 while (e != &obj->dh_waitlisthead) {
320 w = (wait_block *)e;
321 td = w->wb_kthread;
322 ndis_thresume(td);
324 * For synchronization objects, only wake up
325 * the first waiter.
327 if (obj->dh_type == EVENT_TYPE_SYNC)
328 break;
329 e = e->nle_flink;
331 lwkt_reltoken(&tokref);
333 return;
336 static void
337 ntoskrnl_time(uint64_t *tval)
339 struct timespec ts;
341 nanotime(&ts);
342 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
343 11644473600LL;
345 return;
349 * KeWaitForSingleObject() is a tricky beast, because it can be used
350 * with several different object types: semaphores, timers, events,
351 * mutexes and threads. Semaphores don't appear very often, but the
352 * other object types are quite common. KeWaitForSingleObject() is
353 * what's normally used to acquire a mutex, and it can be used to
354 * wait for a thread termination.
356 * The Windows NDIS API is implemented in terms of Windows kernel
357 * primitives, and some of the object manipulation is duplicated in
358 * NDIS. For example, NDIS has timers and events, which are actually
359 * Windows kevents and ktimers. Now, you're supposed to only use the
360 * NDIS variants of these objects within the confines of the NDIS API,
361 * but there are some naughty developers out there who will use
362 * KeWaitForSingleObject() on NDIS timer and event objects, so we
363 * have to support that as well. Conseqently, our NDIS timer and event
364 * code has to be closely tied into our ntoskrnl timer and event code,
365 * just as it is in Windows.
367 * KeWaitForSingleObject() may do different things for different kinds
368 * of objects:
370 * - For events, we check if the event has been signalled. If the
371 * event is already in the signalled state, we just return immediately,
372 * otherwise we wait for it to be set to the signalled state by someone
373 * else calling KeSetEvent(). Events can be either synchronization or
374 * notification events.
376 * - For timers, if the timer has already fired and the timer is in
377 * the signalled state, we just return, otherwise we wait on the
378 * timer. Unlike an event, timers get signalled automatically when
379 * they expire rather than someone having to trip them manually.
380 * Timers initialized with KeInitializeTimer() are always notification
381 * events: KeInitializeTimerEx() lets you initialize a timer as
382 * either a notification or synchronization event.
384 * - For mutexes, we try to acquire the mutex and if we can't, we wait
385 * on the mutex until it's available and then grab it. When a mutex is
386 * released, it enters the signaled state, which wakes up one of the
387 * threads waiting to acquire it. Mutexes are always synchronization
388 * events.
390 * - For threads, the only thing we do is wait until the thread object
391 * enters a signalled state, which occurs when the thread terminates.
392 * Threads are always notification events.
394 * A notification event wakes up all threads waiting on an object. A
395 * synchronization event wakes up just one. Also, a synchronization event
396 * is auto-clearing, which means we automatically set the event back to
397 * the non-signalled state once the wakeup is done.
400 __stdcall uint32_t
401 ntoskrnl_waitforobj(nt_dispatch_header *obj, uint32_t reason,
402 uint32_t mode, uint8_t alertable, int64_t *duetime)
404 struct thread *td = curthread;
405 kmutant *km;
406 wait_block w;
407 struct timeval tv;
408 int error = 0;
409 int ticks;
410 uint64_t curtime;
411 struct lwkt_tokref tokref;
413 if (obj == NULL)
414 return(STATUS_INVALID_PARAMETER);
416 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
419 * See if the object is a mutex. If so, and we already own
420 * it, then just increment the acquisition count and return.
422 * For any other kind of object, see if it's already in the
423 * signalled state, and if it is, just return. If the object
424 * is marked as a synchronization event, reset the state to
425 * unsignalled.
428 if (obj->dh_size == OTYPE_MUTEX) {
429 km = (kmutant *)obj;
430 if (km->km_ownerthread == NULL ||
431 km->km_ownerthread == curthread->td_proc) {
432 obj->dh_sigstate = FALSE;
433 km->km_acquirecnt++;
434 km->km_ownerthread = curthread->td_proc;
435 lwkt_reltoken(&tokref);
436 return (STATUS_SUCCESS);
438 } else if (obj->dh_sigstate == TRUE) {
439 if (obj->dh_type == EVENT_TYPE_SYNC)
440 obj->dh_sigstate = FALSE;
441 lwkt_reltoken(&tokref);
442 return (STATUS_SUCCESS);
445 w.wb_object = obj;
446 w.wb_kthread = td;
448 INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
451 * The timeout value is specified in 100 nanosecond units
452 * and can be a positive or negative number. If it's positive,
453 * then the duetime is absolute, and we need to convert it
454 * to an absolute offset relative to now in order to use it.
455 * If it's negative, then the duetime is relative and we
456 * just have to convert the units.
459 if (duetime != NULL) {
460 if (*duetime < 0) {
461 tv.tv_sec = - (*duetime) / 10000000;
462 tv.tv_usec = (- (*duetime) / 10) -
463 (tv.tv_sec * 1000000);
464 } else {
465 ntoskrnl_time(&curtime);
466 if (*duetime < curtime)
467 tv.tv_sec = tv.tv_usec = 0;
468 else {
469 tv.tv_sec = ((*duetime) - curtime) / 10000000;
470 tv.tv_usec = ((*duetime) - curtime) / 10 -
471 (tv.tv_sec * 1000000);
476 lwkt_reltoken(&tokref);
478 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
479 error = ndis_thsuspend(td, duetime == NULL ? 0 : ticks);
481 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
483 /* We timed out. Leave the object alone and return status. */
485 if (error == EWOULDBLOCK) {
486 REMOVE_LIST_ENTRY((&w.wb_waitlist));
487 lwkt_reltoken(&tokref);
488 return(STATUS_TIMEOUT);
492 * Mutexes are always synchronization objects, which means
493 * if several threads are waiting to acquire it, only one will
494 * be woken up. If that one is us, and the mutex is up for grabs,
495 * grab it.
498 if (obj->dh_size == OTYPE_MUTEX) {
499 km = (kmutant *)obj;
500 if (km->km_ownerthread == NULL) {
501 km->km_ownerthread = curthread->td_proc;
502 km->km_acquirecnt++;
506 if (obj->dh_type == EVENT_TYPE_SYNC)
507 obj->dh_sigstate = FALSE;
508 REMOVE_LIST_ENTRY((&w.wb_waitlist));
510 lwkt_reltoken(&tokref);
512 return(STATUS_SUCCESS);
515 __stdcall static uint32_t
516 ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[],
517 uint32_t wtype, uint32_t reason, uint32_t mode,
518 uint8_t alertable, int64_t *duetime,
519 wait_block *wb_array)
521 struct thread *td = curthread;
522 kmutant *km;
523 wait_block _wb_array[THREAD_WAIT_OBJECTS];
524 wait_block *w;
525 struct timeval tv;
526 int i, wcnt = 0, widx = 0, error = 0;
527 uint64_t curtime;
528 struct timespec t1, t2;
529 struct lwkt_tokref tokref;
531 if (cnt > MAX_WAIT_OBJECTS)
532 return(STATUS_INVALID_PARAMETER);
533 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
534 return(STATUS_INVALID_PARAMETER);
536 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
538 if (wb_array == NULL)
539 w = &_wb_array[0];
540 else
541 w = wb_array;
543 tv.tv_sec = 0; /* fix compiler warning */
544 tv.tv_usec = 0; /* fix compiler warning */
546 /* First pass: see if we can satisfy any waits immediately. */
548 for (i = 0; i < cnt; i++) {
549 if (obj[i]->dh_size == OTYPE_MUTEX) {
550 km = (kmutant *)obj[i];
551 if (km->km_ownerthread == NULL ||
552 km->km_ownerthread == curthread->td_proc) {
553 obj[i]->dh_sigstate = FALSE;
554 km->km_acquirecnt++;
555 km->km_ownerthread = curthread->td_proc;
556 if (wtype == WAITTYPE_ANY) {
557 lwkt_reltoken(&tokref);
558 return (STATUS_WAIT_0 + i);
561 } else if (obj[i]->dh_sigstate == TRUE) {
562 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
563 obj[i]->dh_sigstate = FALSE;
564 if (wtype == WAITTYPE_ANY) {
565 lwkt_reltoken(&tokref);
566 return (STATUS_WAIT_0 + i);
572 * Second pass: set up wait for anything we can't
573 * satisfy immediately.
576 for (i = 0; i < cnt; i++) {
577 if (obj[i]->dh_sigstate == TRUE)
578 continue;
579 INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
580 (&w[i].wb_waitlist));
581 w[i].wb_kthread = td;
582 w[i].wb_object = obj[i];
583 wcnt++;
586 if (duetime) {
587 if (*duetime < 0) {
588 tv.tv_sec = -*duetime / 10000000;
589 tv.tv_usec = (-*duetime / 10) - (tv.tv_sec * 1000000);
590 } else {
591 ntoskrnl_time(&curtime);
592 if (*duetime < curtime) {
593 tv.tv_sec = 0;
594 tv.tv_usec = 0;
595 } else {
596 tv.tv_sec = ((*duetime) - curtime) / 10000000;
597 tv.tv_usec = ((*duetime) - curtime) / 10 -
598 (tv.tv_sec * 1000000);
603 while (wcnt) {
604 nanotime(&t1);
605 lwkt_reltoken(&tokref);
607 if (duetime) {
608 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
609 error = ndis_thsuspend(td, ticks);
610 } else {
611 error = ndis_thsuspend(td, 0);
614 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
615 nanotime(&t2);
617 for (i = 0; i < cnt; i++) {
618 if (obj[i]->dh_size == OTYPE_MUTEX) {
619 km = (kmutant *)obj;
620 if (km->km_ownerthread == NULL) {
621 km->km_ownerthread =
622 curthread->td_proc;
623 km->km_acquirecnt++;
626 if (obj[i]->dh_sigstate == TRUE) {
627 widx = i;
628 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
629 obj[i]->dh_sigstate = FALSE;
630 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
631 wcnt--;
635 if (error || wtype == WAITTYPE_ANY)
636 break;
638 if (duetime) {
639 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
640 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
644 if (wcnt) {
645 for (i = 0; i < cnt; i++)
646 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
649 if (error == EWOULDBLOCK) {
650 lwkt_reltoken(&tokref);
651 return(STATUS_TIMEOUT);
654 if (wtype == WAITTYPE_ANY && wcnt) {
655 lwkt_reltoken(&tokref);
656 return(STATUS_WAIT_0 + widx);
659 lwkt_reltoken(&tokref);
661 return(STATUS_SUCCESS);
664 __stdcall static void
665 ntoskrnl_writereg_ushort(uint16_t *reg, uint16_t val)
667 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
668 return;
671 __stdcall static uint16_t
672 ntoskrnl_readreg_ushort(uint16_t *reg)
674 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
677 __stdcall static void
678 ntoskrnl_writereg_ulong(uint32_t *reg, uint32_t val)
680 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
681 return;
684 __stdcall static uint32_t
685 ntoskrnl_readreg_ulong(uint32_t *reg)
687 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
690 __stdcall static uint8_t
691 ntoskrnl_readreg_uchar(uint8_t *reg)
693 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
696 __stdcall static void
697 ntoskrnl_writereg_uchar(uint8_t *reg, uint8_t val)
699 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
700 return;
703 __stdcall static int64_t
704 _allmul(int64_t a, int64_t b)
706 return (a * b);
709 __stdcall static int64_t
710 _alldiv(int64_t a, int64_t b)
712 return (a / b);
715 __stdcall static int64_t
716 _allrem(int64_t a, int64_t b)
718 return (a % b);
721 __stdcall static uint64_t
722 _aullmul(uint64_t a, uint64_t b)
724 return (a * b);
727 __stdcall static uint64_t
728 _aulldiv(uint64_t a, uint64_t b)
730 return (a / b);
733 __stdcall static uint64_t
734 _aullrem(uint64_t a, uint64_t b)
736 return (a % b);
739 __regparm static int64_t
740 _allshl(int64_t a, uint8_t b)
742 return (a << b);
745 __regparm static uint64_t
746 _aullshl(uint64_t a, uint8_t b)
748 return (a << b);
751 __regparm static int64_t
752 _allshr(int64_t a, uint8_t b)
754 return (a >> b);
757 __regparm static uint64_t
758 _aullshr(uint64_t a, uint8_t b)
760 return (a >> b);
763 static slist_entry *
764 ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
766 slist_entry *oldhead;
768 oldhead = head->slh_list.slh_next;
769 entry->sl_next = head->slh_list.slh_next;
770 head->slh_list.slh_next = entry;
771 head->slh_list.slh_depth++;
772 head->slh_list.slh_seq++;
774 return(oldhead);
777 static slist_entry *
778 ntoskrnl_popsl(slist_header *head)
780 slist_entry *first;
782 first = head->slh_list.slh_next;
783 if (first != NULL) {
784 head->slh_list.slh_next = first->sl_next;
785 head->slh_list.slh_depth--;
786 head->slh_list.slh_seq++;
789 return(first);
792 __stdcall static void *
793 ntoskrnl_allocfunc(uint32_t pooltype, size_t size, uint32_t tag)
795 return(kmalloc(size, M_DEVBUF, M_WAITOK));
798 __stdcall static void
799 ntoskrnl_freefunc(void *buf)
801 kfree(buf, M_DEVBUF);
802 return;
805 __stdcall static void
806 ntoskrnl_init_lookaside(paged_lookaside_list *lookaside,
807 lookaside_alloc_func *allocfunc,
808 lookaside_free_func *freefunc,
809 uint32_t flags, size_t size,
810 uint32_t tag, uint16_t depth)
812 bzero((char *)lookaside, sizeof(paged_lookaside_list));
814 if (size < sizeof(slist_entry))
815 lookaside->nll_l.gl_size = sizeof(slist_entry);
816 else
817 lookaside->nll_l.gl_size = size;
818 lookaside->nll_l.gl_tag = tag;
819 if (allocfunc == NULL)
820 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
821 else
822 lookaside->nll_l.gl_allocfunc = allocfunc;
824 if (freefunc == NULL)
825 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
826 else
827 lookaside->nll_l.gl_freefunc = freefunc;
829 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
831 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
832 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
834 return;
837 __stdcall static void
838 ntoskrnl_delete_lookaside(paged_lookaside_list *lookaside)
840 void *buf;
841 __stdcall void (*freefunc)(void *);
843 freefunc = lookaside->nll_l.gl_freefunc;
844 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
845 freefunc(buf);
847 return;
850 __stdcall static void
851 ntoskrnl_init_nplookaside(npaged_lookaside_list *lookaside,
852 lookaside_alloc_func *allocfunc,
853 lookaside_free_func *freefunc,
854 uint32_t flags, size_t size,
855 uint32_t tag, uint16_t depth)
857 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
859 if (size < sizeof(slist_entry))
860 lookaside->nll_l.gl_size = sizeof(slist_entry);
861 else
862 lookaside->nll_l.gl_size = size;
863 lookaside->nll_l.gl_tag = tag;
864 if (allocfunc == NULL)
865 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
866 else
867 lookaside->nll_l.gl_allocfunc = allocfunc;
869 if (freefunc == NULL)
870 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
871 else
872 lookaside->nll_l.gl_freefunc = freefunc;
874 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
876 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
877 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
879 return;
882 __stdcall static void
883 ntoskrnl_delete_nplookaside(npaged_lookaside_list *lookaside)
885 void *buf;
886 __stdcall void (*freefunc)(void *);
888 freefunc = lookaside->nll_l.gl_freefunc;
889 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
890 freefunc(buf);
892 return;
896 * Note: the interlocked slist push and pop routines are
897 * declared to be _fastcall in Windows. gcc 3.4 is supposed
898 * to have support for this calling convention, however we
899 * don't have that version available yet, so we kludge things
900 * up using some inline assembly.
903 __stdcall __regcall static slist_entry *
904 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry))
906 slist_entry *oldhead;
908 oldhead = (slist_entry *)FASTCALL3(ntoskrnl_push_slist_ex,
909 head, entry, &ntoskrnl_global);
911 return(oldhead);
914 __stdcall __regcall static slist_entry *
915 ntoskrnl_pop_slist(REGARGS1(slist_header *head))
917 slist_entry *first;
919 first = (slist_entry *)FASTCALL2(ntoskrnl_pop_slist_ex,
920 head, &ntoskrnl_global);
922 return(first);
925 __stdcall __regcall static slist_entry *
926 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock)
928 slist_entry *oldhead;
929 uint8_t irql;
931 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
932 oldhead = ntoskrnl_pushsl(head, entry);
933 FASTCALL2(hal_unlock, lock, irql);
935 return(oldhead);
938 __stdcall __regcall static slist_entry *
939 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock))
941 slist_entry *first;
942 uint8_t irql;
944 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
945 first = ntoskrnl_popsl(head);
946 FASTCALL2(hal_unlock, lock, irql);
948 return(first);
951 __stdcall __regcall void
952 ntoskrnl_lock_dpc(REGARGS1(kspin_lock *lock))
954 while (atomic_poll_acquire_int((volatile u_int *)lock) == 0)
955 /* sit and spin */;
958 __stdcall __regcall void
959 ntoskrnl_unlock_dpc(REGARGS1(kspin_lock *lock))
961 atomic_poll_release_int((volatile u_int *)lock);
964 __stdcall __regcall static uint32_t
965 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend))
967 atomic_add_long((volatile u_long *)addend, 1);
968 return(*addend);
971 __stdcall __regcall static uint32_t
972 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend))
974 atomic_subtract_long((volatile u_long *)addend, 1);
975 return(*addend);
978 __stdcall __regcall static void
979 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc))
981 uint8_t irql;
983 irql = FASTCALL2(hal_lock, &ntoskrnl_global, DISPATCH_LEVEL);
984 *addend += inc;
985 FASTCALL2(hal_unlock, &ntoskrnl_global, irql);
987 return;
990 __stdcall static void
991 ntoskrnl_freemdl(ndis_buffer *mdl)
993 ndis_buffer *head;
995 if (mdl == NULL || mdl->nb_process == NULL)
996 return;
998 head = mdl->nb_process;
1000 if (head->nb_flags != 0x1)
1001 return;
1003 mdl->nb_next = head->nb_next;
1004 head->nb_next = mdl;
1006 /* Decrement count of busy buffers. */
1008 head->nb_bytecount--;
1011 * If the pool has been marked for deletion and there are
1012 * no more buffers outstanding, nuke the pool.
1015 if (head->nb_byteoffset && head->nb_bytecount == 0)
1016 kfree(head, M_DEVBUF);
1018 return;
1021 __stdcall static uint32_t
1022 ntoskrnl_sizeofmdl(void *vaddr, size_t len)
1024 uint32_t l;
1026 l = sizeof(struct ndis_buffer) +
1027 (sizeof(uint32_t) * SPAN_PAGES(vaddr, len));
1029 return(l);
1032 __stdcall static void
1033 ntoskrnl_build_npaged_mdl(ndis_buffer *mdl)
1035 mdl->nb_mappedsystemva = (char *)mdl->nb_startva + mdl->nb_byteoffset;
1036 return;
1039 __stdcall static void *
1040 ntoskrnl_mmaplockedpages(ndis_buffer *buf, uint8_t accessmode)
1042 return(MDL_VA(buf));
1045 __stdcall static void *
1046 ntoskrnl_mmaplockedpages_cache(ndis_buffer *buf, uint8_t accessmode,
1047 uint32_t cachetype, void *vaddr,
1048 uint32_t bugcheck, uint32_t prio)
1050 return(MDL_VA(buf));
1053 __stdcall static void
1054 ntoskrnl_munmaplockedpages(void *vaddr, ndis_buffer *buf)
1056 return;
1060 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1061 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1062 * to crit_enter()/crit_exit() in their use. We can't create a new mutex
1063 * lock here because there is no complimentary KeFreeSpinLock()
1064 * function. Instead, we grab a mutex from the mutex pool.
1066 __stdcall static void
1067 ntoskrnl_init_lock(kspin_lock *lock)
1069 *lock = 0;
1071 return;
1074 __stdcall static size_t
1075 ntoskrnl_memcmp(const void *s1, const void *s2, size_t len)
1077 size_t i, total = 0;
1078 uint8_t *m1, *m2;
1080 m1 = __DECONST(char *, s1);
1081 m2 = __DECONST(char *, s2);
1083 for (i = 0; i < len; i++) {
1084 if (m1[i] == m2[i])
1085 total++;
1087 return(total);
1090 __stdcall static void
1091 ntoskrnl_init_ansi_string(ndis_ansi_string *dst, char *src)
1093 ndis_ansi_string *a;
1095 a = dst;
1096 if (a == NULL)
1097 return;
1098 if (src == NULL) {
1099 a->nas_len = a->nas_maxlen = 0;
1100 a->nas_buf = NULL;
1101 } else {
1102 a->nas_buf = src;
1103 a->nas_len = a->nas_maxlen = strlen(src);
1106 return;
1109 __stdcall static void
1110 ntoskrnl_init_unicode_string(ndis_unicode_string *dst, uint16_t *src)
1112 ndis_unicode_string *u;
1113 int i;
1115 u = dst;
1116 if (u == NULL)
1117 return;
1118 if (src == NULL) {
1119 u->nus_len = u->nus_maxlen = 0;
1120 u->nus_buf = NULL;
1121 } else {
1122 i = 0;
1123 while(src[i] != 0)
1124 i++;
1125 u->nus_buf = src;
1126 u->nus_len = u->nus_maxlen = i * 2;
1129 return;
1132 __stdcall ndis_status
1133 ntoskrnl_unicode_to_int(ndis_unicode_string *ustr, uint32_t base,
1134 uint32_t *val)
1136 uint16_t *uchr;
1137 int len, neg = 0;
1138 char abuf[64];
1139 char *astr;
1141 uchr = ustr->nus_buf;
1142 len = ustr->nus_len;
1143 bzero(abuf, sizeof(abuf));
1145 if ((char)((*uchr) & 0xFF) == '-') {
1146 neg = 1;
1147 uchr++;
1148 len -= 2;
1149 } else if ((char)((*uchr) & 0xFF) == '+') {
1150 neg = 0;
1151 uchr++;
1152 len -= 2;
1155 if (base == 0) {
1156 if ((char)((*uchr) & 0xFF) == 'b') {
1157 base = 2;
1158 uchr++;
1159 len -= 2;
1160 } else if ((char)((*uchr) & 0xFF) == 'o') {
1161 base = 8;
1162 uchr++;
1163 len -= 2;
1164 } else if ((char)((*uchr) & 0xFF) == 'x') {
1165 base = 16;
1166 uchr++;
1167 len -= 2;
1168 } else
1169 base = 10;
1172 astr = abuf;
1173 if (neg) {
1174 strcpy(astr, "-");
1175 astr++;
1178 ndis_unicode_to_ascii(uchr, len, &astr);
1179 *val = strtoul(abuf, NULL, base);
1181 return(NDIS_STATUS_SUCCESS);
1184 __stdcall static void
1185 ntoskrnl_free_unicode_string(ndis_unicode_string *ustr)
1187 if (ustr->nus_buf == NULL)
1188 return;
1189 kfree(ustr->nus_buf, M_DEVBUF);
1190 ustr->nus_buf = NULL;
1191 return;
1194 __stdcall static void
1195 ntoskrnl_free_ansi_string(ndis_ansi_string *astr)
1197 if (astr->nas_buf == NULL)
1198 return;
1199 kfree(astr->nas_buf, M_DEVBUF);
1200 astr->nas_buf = NULL;
1201 return;
1204 static int
1205 atoi(const char *str)
1207 return (int)strtol(str, NULL, 10);
1210 static long
1211 atol(const char *str)
1213 return strtol(str, NULL, 10);
1216 static int
1217 rand(void)
1219 struct timeval tv;
1221 microtime(&tv);
1222 skrandom(tv.tv_usec);
1223 return((int)krandom());
1226 __stdcall static uint8_t
1227 ntoskrnl_wdmver(uint8_t major, uint8_t minor)
1229 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
1230 return(TRUE);
1231 return(FALSE);
1234 __stdcall static ndis_status
1235 ntoskrnl_devprop(device_object *devobj, uint32_t regprop, uint32_t buflen,
1236 void *prop, uint32_t *reslen)
1238 ndis_miniport_block *block;
1240 block = devobj->do_rsvd;
1242 switch (regprop) {
1243 case DEVPROP_DRIVER_KEYNAME:
1244 ndis_ascii_to_unicode(__DECONST(char *,
1245 device_get_nameunit(block->nmb_dev)), (uint16_t **)&prop);
1246 *reslen = strlen(device_get_nameunit(block->nmb_dev)) * 2;
1247 break;
1248 default:
1249 return(STATUS_INVALID_PARAMETER_2);
1250 break;
1253 return(STATUS_SUCCESS);
1256 __stdcall static void
1257 ntoskrnl_init_mutex(kmutant *kmutex, uint32_t level)
1259 INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
1260 kmutex->km_abandoned = FALSE;
1261 kmutex->km_apcdisable = 1;
1262 kmutex->km_header.dh_sigstate = TRUE;
1263 kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
1264 kmutex->km_header.dh_size = OTYPE_MUTEX;
1265 kmutex->km_acquirecnt = 0;
1266 kmutex->km_ownerthread = NULL;
1267 return;
1270 __stdcall static uint32_t
1271 ntoskrnl_release_mutex(kmutant *kmutex, uint8_t kwait)
1273 struct lwkt_tokref tokref;
1275 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
1276 if (kmutex->km_ownerthread != curthread->td_proc) {
1277 lwkt_reltoken(&tokref);
1278 return(STATUS_MUTANT_NOT_OWNED);
1280 kmutex->km_acquirecnt--;
1281 if (kmutex->km_acquirecnt == 0) {
1282 kmutex->km_ownerthread = NULL;
1283 lwkt_reltoken(&tokref);
1284 ntoskrnl_wakeup(&kmutex->km_header);
1285 } else
1286 lwkt_reltoken(&tokref);
1288 return(kmutex->km_acquirecnt);
1291 __stdcall static uint32_t
1292 ntoskrnl_read_mutex(kmutant *kmutex)
1294 return(kmutex->km_header.dh_sigstate);
1297 __stdcall void
1298 ntoskrnl_init_event(nt_kevent *kevent, uint32_t type, uint8_t state)
1300 INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
1301 kevent->k_header.dh_sigstate = state;
1302 kevent->k_header.dh_type = type;
1303 kevent->k_header.dh_size = OTYPE_EVENT;
1304 return;
1307 __stdcall uint32_t
1308 ntoskrnl_reset_event(nt_kevent *kevent)
1310 uint32_t prevstate;
1311 struct lwkt_tokref tokref;
1313 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
1314 prevstate = kevent->k_header.dh_sigstate;
1315 kevent->k_header.dh_sigstate = FALSE;
1316 lwkt_reltoken(&tokref);
1318 return(prevstate);
1321 __stdcall uint32_t
1322 ntoskrnl_set_event(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
1324 uint32_t prevstate;
1326 prevstate = kevent->k_header.dh_sigstate;
1327 ntoskrnl_wakeup(&kevent->k_header);
1329 return(prevstate);
1332 __stdcall void
1333 ntoskrnl_clear_event(nt_kevent *kevent)
1335 kevent->k_header.dh_sigstate = FALSE;
1336 return;
1339 __stdcall uint32_t
1340 ntoskrnl_read_event(nt_kevent *kevent)
1342 return(kevent->k_header.dh_sigstate);
1345 __stdcall static ndis_status
1346 ntoskrnl_objref(ndis_handle handle, uint32_t reqaccess, void *otype,
1347 uint8_t accessmode, void **object, void **handleinfo)
1349 nt_objref *nr;
1351 nr = kmalloc(sizeof(nt_objref), M_DEVBUF, M_WAITOK|M_ZERO);
1353 INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
1354 nr->no_obj = handle;
1355 nr->no_dh.dh_size = OTYPE_THREAD;
1356 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
1357 *object = nr;
1359 return(NDIS_STATUS_SUCCESS);
1362 __stdcall __regcall static void
1363 ntoskrnl_objderef(REGARGS1(void *object))
1365 nt_objref *nr;
1367 nr = object;
1368 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
1369 kfree(nr, M_DEVBUF);
1371 return;
1374 __stdcall static uint32_t
1375 ntoskrnl_zwclose(ndis_handle handle)
1377 return(STATUS_SUCCESS);
1381 * This is here just in case the thread returns without calling
1382 * PsTerminateSystemThread().
1384 static void
1385 ntoskrnl_thrfunc(void *arg)
1387 thread_context *thrctx;
1388 __stdcall uint32_t (*tfunc)(void *);
1389 void *tctx;
1390 uint32_t rval;
1392 thrctx = arg;
1393 tfunc = thrctx->tc_thrfunc;
1394 tctx = thrctx->tc_thrctx;
1395 kfree(thrctx, M_TEMP);
1397 rval = tfunc(tctx);
1399 ntoskrnl_thread_exit(rval);
1400 return; /* notreached */
1403 __stdcall static ndis_status
1404 ntoskrnl_create_thread(ndis_handle *handle, uint32_t reqaccess,
1405 void *objattrs, ndis_handle phandle,
1406 void *clientid, void *thrfunc, void *thrctx)
1408 int error;
1409 char tname[128];
1410 thread_context *tc;
1411 thread_t td;
1413 tc = kmalloc(sizeof(thread_context), M_TEMP, M_WAITOK);
1415 tc->tc_thrctx = thrctx;
1416 tc->tc_thrfunc = thrfunc;
1418 ksprintf(tname, "windows kthread %d", ntoskrnl_kth);
1419 error = kthread_create_stk(ntoskrnl_thrfunc, tc, &td,
1420 NDIS_KSTACK_PAGES * PAGE_SIZE, tname);
1421 *handle = td;
1423 ntoskrnl_kth++;
1425 return(error);
1429 * In Windows, the exit of a thread is an event that you're allowed
1430 * to wait on, assuming you've obtained a reference to the thread using
1431 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
1432 * simulate this behavior is to register each thread we create in a
1433 * reference list, and if someone holds a reference to us, we poke
1434 * them.
1436 __stdcall static ndis_status
1437 ntoskrnl_thread_exit(ndis_status status)
1439 struct nt_objref *nr;
1441 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
1442 if (nr->no_obj != curthread)
1443 continue;
1444 ntoskrnl_wakeup(&nr->no_dh);
1445 break;
1448 ntoskrnl_kth--;
1450 kthread_exit();
1451 return(0); /* notreached */
1454 static uint32_t
1455 ntoskrnl_dbgprint(char *fmt, ...)
1457 __va_list ap;
1459 if (bootverbose) {
1460 __va_start(ap, fmt);
1461 kvprintf(fmt, ap);
1464 return(STATUS_SUCCESS);
1467 __stdcall static void
1468 ntoskrnl_debugger(void)
1471 #if __FreeBSD_version < 502113
1472 Debugger("ntoskrnl_debugger(): breakpoint");
1473 #else
1474 kdb_enter("ntoskrnl_debugger(): breakpoint");
1475 #endif
1478 static void
1479 ntoskrnl_timercall(void *arg)
1481 ktimer *timer;
1483 timer = arg;
1485 timer->k_header.dh_inserted = FALSE;
1488 * If this is a periodic timer, re-arm it
1489 * so it will fire again. We do this before
1490 * calling any deferred procedure calls because
1491 * it's possible the DPC might cancel the timer,
1492 * in which case it would be wrong for us to
1493 * re-arm it again afterwards.
1496 if (timer->k_period) {
1497 timer->k_header.dh_inserted = TRUE;
1498 callout_reset(timer->k_handle, 1 + timer->k_period * hz / 1000,
1499 ntoskrnl_timercall, timer);
1500 } else {
1501 callout_deactivate(timer->k_handle);
1502 kfree(timer->k_handle, M_NDIS);
1503 timer->k_handle = NULL;
1506 if (timer->k_dpc != NULL)
1507 ntoskrnl_queue_dpc(timer->k_dpc, NULL, NULL);
1509 ntoskrnl_wakeup(&timer->k_header);
1512 __stdcall void
1513 ntoskrnl_init_timer(ktimer *timer)
1515 if (timer == NULL)
1516 return;
1518 ntoskrnl_init_timer_ex(timer, EVENT_TYPE_NOTIFY);
1521 __stdcall void
1522 ntoskrnl_init_timer_ex(ktimer *timer, uint32_t type)
1524 if (timer == NULL)
1525 return;
1527 INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
1528 timer->k_header.dh_sigstate = FALSE;
1529 timer->k_header.dh_inserted = FALSE;
1530 timer->k_header.dh_type = type;
1531 timer->k_header.dh_size = OTYPE_TIMER;
1532 timer->k_handle = NULL;
1534 return;
1538 * This is a wrapper for Windows deferred procedure calls that
1539 * have been placed on an NDIS thread work queue. We need it
1540 * since the DPC could be a _stdcall function. Also, as far as
1541 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
1543 static void
1544 ntoskrnl_run_dpc(void *arg)
1546 kdpc_func dpcfunc;
1547 kdpc *dpc;
1548 uint8_t irql;
1550 dpc = arg;
1551 dpcfunc = (kdpc_func)dpc->k_deferedfunc;
1552 irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
1553 dpcfunc(dpc, dpc->k_deferredctx, dpc->k_sysarg1, dpc->k_sysarg2);
1554 FASTCALL1(hal_lower_irql, irql);
1556 return;
1559 __stdcall void
1560 ntoskrnl_init_dpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
1562 if (dpc == NULL)
1563 return;
1565 dpc->k_deferedfunc = dpcfunc;
1566 dpc->k_deferredctx = dpcctx;
1568 return;
1571 __stdcall uint8_t
1572 ntoskrnl_queue_dpc(kdpc *dpc, void *sysarg1, void *sysarg2)
1574 dpc->k_sysarg1 = sysarg1;
1575 dpc->k_sysarg2 = sysarg2;
1576 if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1577 return(FALSE);
1579 return(TRUE);
1582 __stdcall uint8_t
1583 ntoskrnl_dequeue_dpc(kdpc *dpc)
1585 if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1586 return(FALSE);
1588 return(TRUE);
1591 __stdcall uint8_t
1592 ntoskrnl_set_timer_ex(ktimer *timer, int64_t duetime, uint32_t period,
1593 kdpc *dpc)
1595 struct timeval tv;
1596 uint64_t curtime;
1597 uint8_t pending;
1598 int ticks;
1600 if (timer == NULL)
1601 return(FALSE);
1603 if (timer->k_header.dh_inserted == TRUE) {
1604 if (timer->k_handle != NULL)
1605 callout_stop(timer->k_handle);
1606 timer->k_header.dh_inserted = FALSE;
1607 pending = TRUE;
1608 } else
1609 pending = FALSE;
1611 timer->k_duetime = duetime;
1612 timer->k_period = period;
1613 timer->k_header.dh_sigstate = FALSE;
1614 timer->k_dpc = dpc;
1616 if (duetime < 0) {
1617 tv.tv_sec = - (duetime) / 10000000;
1618 tv.tv_usec = (- (duetime) / 10) -
1619 (tv.tv_sec * 1000000);
1620 } else {
1621 ntoskrnl_time(&curtime);
1622 if (duetime < curtime)
1623 tv.tv_sec = tv.tv_usec = 0;
1624 else {
1625 tv.tv_sec = ((duetime) - curtime) / 10000000;
1626 tv.tv_usec = ((duetime) - curtime) / 10 -
1627 (tv.tv_sec * 1000000);
1631 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
1632 timer->k_header.dh_inserted = TRUE;
1633 if (timer->k_handle == NULL) {
1634 timer->k_handle = kmalloc(sizeof(struct callout), M_NDIS,
1635 M_INTWAIT);
1636 callout_init(timer->k_handle);
1638 callout_reset(timer->k_handle, ticks, ntoskrnl_timercall, timer);
1640 return(pending);
1643 __stdcall uint8_t
1644 ntoskrnl_set_timer(ktimer *timer, int64_t duetime, kdpc *dpc)
1646 return (ntoskrnl_set_timer_ex(timer, duetime, 0, dpc));
1649 __stdcall uint8_t
1650 ntoskrnl_cancel_timer(ktimer *timer)
1652 uint8_t pending;
1654 if (timer == NULL)
1655 return(FALSE);
1657 if (timer->k_header.dh_inserted == TRUE) {
1658 if (timer->k_handle != NULL) {
1659 callout_stop(timer->k_handle);
1660 kfree(timer->k_handle, M_NDIS);
1661 timer->k_handle = NULL;
1663 if (timer->k_dpc != NULL)
1664 ntoskrnl_dequeue_dpc(timer->k_dpc);
1665 pending = TRUE;
1666 } else
1667 pending = FALSE;
1670 return(pending);
1673 __stdcall uint8_t
1674 ntoskrnl_read_timer(ktimer *timer)
1676 return(timer->k_header.dh_sigstate);
1679 __stdcall static void
1680 dummy(void)
1682 kprintf ("ntoskrnl dummy called...\n");
1683 return;
1687 image_patch_table ntoskrnl_functbl[] = {
1688 { "RtlCompareMemory", (FUNC)ntoskrnl_memcmp },
1689 { "RtlEqualUnicodeString", (FUNC)ntoskrnl_unicode_equal },
1690 { "RtlCopyUnicodeString", (FUNC)ntoskrnl_unicode_copy },
1691 { "RtlUnicodeStringToAnsiString", (FUNC)ntoskrnl_unicode_to_ansi },
1692 { "RtlAnsiStringToUnicodeString", (FUNC)ntoskrnl_ansi_to_unicode },
1693 { "RtlInitAnsiString", (FUNC)ntoskrnl_init_ansi_string },
1694 { "RtlInitUnicodeString", (FUNC)ntoskrnl_init_unicode_string },
1695 { "RtlFreeAnsiString", (FUNC)ntoskrnl_free_ansi_string },
1696 { "RtlFreeUnicodeString", (FUNC)ntoskrnl_free_unicode_string },
1697 { "RtlUnicodeStringToInteger", (FUNC)ntoskrnl_unicode_to_int },
1698 { "sprintf", (FUNC)ksprintf },
1699 { "vsprintf", (FUNC)kvsprintf },
1700 { "_snprintf", (FUNC)ksnprintf },
1701 { "_vsnprintf", (FUNC)kvsnprintf },
1702 { "DbgPrint", (FUNC)ntoskrnl_dbgprint },
1703 { "DbgBreakPoint", (FUNC)ntoskrnl_debugger },
1704 { "strncmp", (FUNC)strncmp },
1705 { "strcmp", (FUNC)strcmp },
1706 { "strncpy", (FUNC)strncpy },
1707 { "strcpy", (FUNC)strcpy },
1708 { "strlen", (FUNC)strlen },
1709 { "memcpy", (FUNC)memcpy },
1710 { "memmove", (FUNC)memcpy },
1711 { "memset", (FUNC)memset },
1712 { "IofCallDriver", (FUNC)ntoskrnl_iofcalldriver },
1713 { "IofCompleteRequest", (FUNC)ntoskrnl_iofcompletereq },
1714 { "IoBuildSynchronousFsdRequest", (FUNC)ntoskrnl_iobuildsynchfsdreq },
1715 { "KeWaitForSingleObject", (FUNC)ntoskrnl_waitforobj },
1716 { "KeWaitForMultipleObjects", (FUNC)ntoskrnl_waitforobjs },
1717 { "_allmul", (FUNC)_allmul },
1718 { "_alldiv", (FUNC)_alldiv },
1719 { "_allrem", (FUNC)_allrem },
1720 { "_allshr", (FUNC)_allshr },
1721 { "_allshl", (FUNC)_allshl },
1722 { "_aullmul", (FUNC)_aullmul },
1723 { "_aulldiv", (FUNC)_aulldiv },
1724 { "_aullrem", (FUNC)_aullrem },
1725 { "_aullshr", (FUNC)_aullshr },
1726 { "_aullshl", (FUNC)_aullshl },
1727 { "atoi", (FUNC)atoi },
1728 { "atol", (FUNC)atol },
1729 { "rand", (FUNC)rand },
1730 { "WRITE_REGISTER_USHORT", (FUNC)ntoskrnl_writereg_ushort },
1731 { "READ_REGISTER_USHORT", (FUNC)ntoskrnl_readreg_ushort },
1732 { "WRITE_REGISTER_ULONG", (FUNC)ntoskrnl_writereg_ulong },
1733 { "READ_REGISTER_ULONG", (FUNC)ntoskrnl_readreg_ulong },
1734 { "READ_REGISTER_UCHAR", (FUNC)ntoskrnl_readreg_uchar },
1735 { "WRITE_REGISTER_UCHAR", (FUNC)ntoskrnl_writereg_uchar },
1736 { "ExInitializePagedLookasideList", (FUNC)ntoskrnl_init_lookaside },
1737 { "ExDeletePagedLookasideList", (FUNC)ntoskrnl_delete_lookaside },
1738 { "ExInitializeNPagedLookasideList", (FUNC)ntoskrnl_init_nplookaside },
1739 { "ExDeleteNPagedLookasideList", (FUNC)ntoskrnl_delete_nplookaside },
1740 { "InterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist },
1741 { "InterlockedPushEntrySList", (FUNC)ntoskrnl_push_slist },
1742 { "ExInterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist_ex },
1743 { "ExInterlockedPushEntrySList",(FUNC)ntoskrnl_push_slist_ex },
1744 { "KefAcquireSpinLockAtDpcLevel", (FUNC)ntoskrnl_lock_dpc },
1745 { "KefReleaseSpinLockFromDpcLevel", (FUNC)ntoskrnl_unlock_dpc },
1746 { "InterlockedIncrement", (FUNC)ntoskrnl_interlock_inc },
1747 { "InterlockedDecrement", (FUNC)ntoskrnl_interlock_dec },
1748 { "ExInterlockedAddLargeStatistic",
1749 (FUNC)ntoskrnl_interlock_addstat },
1750 { "IoFreeMdl", (FUNC)ntoskrnl_freemdl },
1751 { "MmSizeOfMdl", (FUNC)ntoskrnl_sizeofmdl },
1752 { "MmMapLockedPages", (FUNC)ntoskrnl_mmaplockedpages },
1753 { "MmMapLockedPagesSpecifyCache",
1754 (FUNC)ntoskrnl_mmaplockedpages_cache },
1755 { "MmUnmapLockedPages", (FUNC)ntoskrnl_munmaplockedpages },
1756 { "MmBuildMdlForNonPagedPool", (FUNC)ntoskrnl_build_npaged_mdl },
1757 { "KeInitializeSpinLock", (FUNC)ntoskrnl_init_lock },
1758 { "IoIsWdmVersionAvailable", (FUNC)ntoskrnl_wdmver },
1759 { "IoGetDeviceProperty", (FUNC)ntoskrnl_devprop },
1760 { "KeInitializeMutex", (FUNC)ntoskrnl_init_mutex },
1761 { "KeReleaseMutex", (FUNC)ntoskrnl_release_mutex },
1762 { "KeReadStateMutex", (FUNC)ntoskrnl_read_mutex },
1763 { "KeInitializeEvent", (FUNC)ntoskrnl_init_event },
1764 { "KeSetEvent", (FUNC)ntoskrnl_set_event },
1765 { "KeResetEvent", (FUNC)ntoskrnl_reset_event },
1766 { "KeClearEvent", (FUNC)ntoskrnl_clear_event },
1767 { "KeReadStateEvent", (FUNC)ntoskrnl_read_event },
1768 { "KeInitializeTimer", (FUNC)ntoskrnl_init_timer },
1769 { "KeInitializeTimerEx", (FUNC)ntoskrnl_init_timer_ex },
1770 { "KeSetTimer", (FUNC)ntoskrnl_set_timer },
1771 { "KeSetTimerEx", (FUNC)ntoskrnl_set_timer_ex },
1772 { "KeCancelTimer", (FUNC)ntoskrnl_cancel_timer },
1773 { "KeReadStateTimer", (FUNC)ntoskrnl_read_timer },
1774 { "KeInitializeDpc", (FUNC)ntoskrnl_init_dpc },
1775 { "KeInsertQueueDpc", (FUNC)ntoskrnl_queue_dpc },
1776 { "KeRemoveQueueDpc", (FUNC)ntoskrnl_dequeue_dpc },
1777 { "ObReferenceObjectByHandle", (FUNC)ntoskrnl_objref },
1778 { "ObfDereferenceObject", (FUNC)ntoskrnl_objderef },
1779 { "ZwClose", (FUNC)ntoskrnl_zwclose },
1780 { "PsCreateSystemThread", (FUNC)ntoskrnl_create_thread },
1781 { "PsTerminateSystemThread", (FUNC)ntoskrnl_thread_exit },
1784 * This last entry is a catch-all for any function we haven't
1785 * implemented yet. The PE import list patching routine will
1786 * use it for any function that doesn't have an explicit match
1787 * in this table.
1790 { NULL, (FUNC)dummy },
1792 /* End of list. */
1794 { NULL, NULL },