Restore queue depth settings across tag disable events. The system often
[dragonfly.git] / sys / emulation / ndis / subr_ntoskrnl.c
blobceb2c0d84c15ad3bc034f81484ea8a5b18d7445f
1 /*
2 * Copyright (c) 2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.40 2004/07/20 20:28:57 wpaul Exp $
33 * $DragonFly: src/sys/emulation/ndis/subr_ntoskrnl.c,v 1.13 2006/12/23 00:27:02 swildner Exp $
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
45 #include <sys/callout.h>
46 #if __FreeBSD_version > 502113
47 #include <sys/kdb.h>
48 #endif
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/kthread.h>
52 #include <sys/bus.h>
53 #include <sys/rman.h>
55 #include <machine/atomic.h>
56 #include <machine/clock.h>
57 #include <machine/stdarg.h>
59 #include "regcall.h"
60 #include "pe_var.h"
61 #include "resource_var.h"
62 #include "ntoskrnl_var.h"
63 #include "ndis_var.h"
64 #include "hal_var.h"
66 #define __regparm __attribute__((regparm(3)))
68 #define FUNC void(*)(void)
70 __stdcall static uint8_t ntoskrnl_unicode_equal(ndis_unicode_string *,
71 ndis_unicode_string *, uint8_t);
72 __stdcall static void ntoskrnl_unicode_copy(ndis_unicode_string *,
73 ndis_unicode_string *);
74 __stdcall static ndis_status ntoskrnl_unicode_to_ansi(ndis_ansi_string *,
75 ndis_unicode_string *, uint8_t);
76 __stdcall static ndis_status ntoskrnl_ansi_to_unicode(ndis_unicode_string *,
77 ndis_ansi_string *, uint8_t);
78 __stdcall static void *ntoskrnl_iobuildsynchfsdreq(uint32_t, void *,
79 void *, uint32_t, uint32_t *, void *, void *);
82 * registerized calls
84 __stdcall __regcall static uint32_t
85 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp));
86 __stdcall __regcall static void
87 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost));
88 __stdcall __regcall static slist_entry *
89 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry));
90 __stdcall __regcall static slist_entry *
91 ntoskrnl_pop_slist(REGARGS1(slist_header *head));
92 __stdcall __regcall static slist_entry *
93 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock);
94 __stdcall __regcall static slist_entry *
95 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock));
97 __stdcall __regcall static uint32_t
98 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend));
99 __stdcall __regcall static uint32_t
100 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend));
101 __stdcall __regcall static void
102 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc));
103 __stdcall __regcall static void
104 ntoskrnl_objderef(REGARGS1(void *object));
106 __stdcall static uint32_t ntoskrnl_waitforobjs(uint32_t,
107 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
108 int64_t *, wait_block *);
109 static void ntoskrnl_wakeup(void *);
110 static void ntoskrnl_timercall(void *);
111 static void ntoskrnl_run_dpc(void *);
112 __stdcall static void ntoskrnl_writereg_ushort(uint16_t *, uint16_t);
113 __stdcall static uint16_t ntoskrnl_readreg_ushort(uint16_t *);
114 __stdcall static void ntoskrnl_writereg_ulong(uint32_t *, uint32_t);
115 __stdcall static uint32_t ntoskrnl_readreg_ulong(uint32_t *);
116 __stdcall static void ntoskrnl_writereg_uchar(uint8_t *, uint8_t);
117 __stdcall static uint8_t ntoskrnl_readreg_uchar(uint8_t *);
118 __stdcall static int64_t _allmul(int64_t, int64_t);
119 __stdcall static int64_t _alldiv(int64_t, int64_t);
120 __stdcall static int64_t _allrem(int64_t, int64_t);
121 __regparm static int64_t _allshr(int64_t, uint8_t);
122 __regparm static int64_t _allshl(int64_t, uint8_t);
123 __stdcall static uint64_t _aullmul(uint64_t, uint64_t);
124 __stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
125 __stdcall static uint64_t _aullrem(uint64_t, uint64_t);
126 __regparm static uint64_t _aullshr(uint64_t, uint8_t);
127 __regparm static uint64_t _aullshl(uint64_t, uint8_t);
128 __stdcall static void *ntoskrnl_allocfunc(uint32_t, size_t, uint32_t);
129 __stdcall static void ntoskrnl_freefunc(void *);
130 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
131 static slist_entry *ntoskrnl_popsl(slist_header *);
132 __stdcall static void ntoskrnl_init_lookaside(paged_lookaside_list *,
133 lookaside_alloc_func *, lookaside_free_func *,
134 uint32_t, size_t, uint32_t, uint16_t);
135 __stdcall static void ntoskrnl_delete_lookaside(paged_lookaside_list *);
136 __stdcall static void ntoskrnl_init_nplookaside(npaged_lookaside_list *,
137 lookaside_alloc_func *, lookaside_free_func *,
138 uint32_t, size_t, uint32_t, uint16_t);
139 __stdcall static void ntoskrnl_delete_nplookaside(npaged_lookaside_list *);
140 __stdcall static void ntoskrnl_freemdl(ndis_buffer *);
141 __stdcall static uint32_t ntoskrnl_sizeofmdl(void *, size_t);
142 __stdcall static void ntoskrnl_build_npaged_mdl(ndis_buffer *);
143 __stdcall static void *ntoskrnl_mmaplockedpages(ndis_buffer *, uint8_t);
144 __stdcall static void *ntoskrnl_mmaplockedpages_cache(ndis_buffer *,
145 uint8_t, uint32_t, void *, uint32_t, uint32_t);
146 __stdcall static void ntoskrnl_munmaplockedpages(void *, ndis_buffer *);
147 __stdcall static void ntoskrnl_init_lock(kspin_lock *);
148 __stdcall static size_t ntoskrnl_memcmp(const void *, const void *, size_t);
149 __stdcall static void ntoskrnl_init_ansi_string(ndis_ansi_string *, char *);
150 __stdcall static void ntoskrnl_init_unicode_string(ndis_unicode_string *,
151 uint16_t *);
152 __stdcall static void ntoskrnl_free_unicode_string(ndis_unicode_string *);
153 __stdcall static void ntoskrnl_free_ansi_string(ndis_ansi_string *);
154 __stdcall static ndis_status ntoskrnl_unicode_to_int(ndis_unicode_string *,
155 uint32_t, uint32_t *);
156 static int atoi (const char *);
157 static long atol (const char *);
158 static int rand(void);
159 static void ntoskrnl_time(uint64_t *);
160 __stdcall static uint8_t ntoskrnl_wdmver(uint8_t, uint8_t);
161 static void ntoskrnl_thrfunc(void *);
162 __stdcall static ndis_status ntoskrnl_create_thread(ndis_handle *,
163 uint32_t, void *, ndis_handle, void *, void *, void *);
164 __stdcall static ndis_status ntoskrnl_thread_exit(ndis_status);
165 __stdcall static ndis_status ntoskrnl_devprop(device_object *, uint32_t,
166 uint32_t, void *, uint32_t *);
167 __stdcall static void ntoskrnl_init_mutex(kmutant *, uint32_t);
168 __stdcall static uint32_t ntoskrnl_release_mutex(kmutant *, uint8_t);
169 __stdcall static uint32_t ntoskrnl_read_mutex(kmutant *);
170 __stdcall static ndis_status ntoskrnl_objref(ndis_handle, uint32_t, void *,
171 uint8_t, void **, void **);
172 __stdcall static uint32_t ntoskrnl_zwclose(ndis_handle);
173 static uint32_t ntoskrnl_dbgprint(char *, ...);
174 __stdcall static void ntoskrnl_debugger(void);
175 __stdcall static void dummy(void);
177 static struct lwkt_token ntoskrnl_dispatchtoken;
178 static kspin_lock ntoskrnl_global;
179 static int ntoskrnl_kth = 0;
180 static struct nt_objref_head ntoskrnl_reflist;
182 static MALLOC_DEFINE(M_NDIS, "ndis", "ndis emulation");
185 ntoskrnl_libinit(void)
187 lwkt_token_init(&ntoskrnl_dispatchtoken);
188 ntoskrnl_init_lock(&ntoskrnl_global);
189 TAILQ_INIT(&ntoskrnl_reflist);
190 return(0);
194 ntoskrnl_libfini(void)
196 lwkt_token_uninit(&ntoskrnl_dispatchtoken);
197 return(0);
200 __stdcall static uint8_t
201 ntoskrnl_unicode_equal(ndis_unicode_string *str1,
202 ndis_unicode_string *str2,
203 uint8_t caseinsensitive)
205 int i;
207 if (str1->nus_len != str2->nus_len)
208 return(FALSE);
210 for (i = 0; i < str1->nus_len; i++) {
211 if (caseinsensitive == TRUE) {
212 if (toupper((char)(str1->nus_buf[i] & 0xFF)) !=
213 toupper((char)(str2->nus_buf[i] & 0xFF)))
214 return(FALSE);
215 } else {
216 if (str1->nus_buf[i] != str2->nus_buf[i])
217 return(FALSE);
221 return(TRUE);
224 __stdcall static void
225 ntoskrnl_unicode_copy(ndis_unicode_string *dest,
226 ndis_unicode_string *src)
229 if (dest->nus_maxlen >= src->nus_len)
230 dest->nus_len = src->nus_len;
231 else
232 dest->nus_len = dest->nus_maxlen;
233 memcpy(dest->nus_buf, src->nus_buf, dest->nus_len);
234 return;
237 __stdcall static ndis_status
238 ntoskrnl_unicode_to_ansi(ndis_ansi_string *dest,
239 ndis_unicode_string *src,
240 uint8_t allocate)
242 char *astr = NULL;
244 if (dest == NULL || src == NULL)
245 return(NDIS_STATUS_FAILURE);
247 if (allocate == TRUE) {
248 if (ndis_unicode_to_ascii(src->nus_buf, src->nus_len, &astr))
249 return(NDIS_STATUS_FAILURE);
250 dest->nas_buf = astr;
251 dest->nas_len = dest->nas_maxlen = strlen(astr);
252 } else {
253 dest->nas_len = src->nus_len / 2; /* XXX */
254 if (dest->nas_maxlen < dest->nas_len)
255 dest->nas_len = dest->nas_maxlen;
256 ndis_unicode_to_ascii(src->nus_buf, dest->nas_len * 2,
257 &dest->nas_buf);
259 return (NDIS_STATUS_SUCCESS);
262 __stdcall static ndis_status
263 ntoskrnl_ansi_to_unicode(ndis_unicode_string *dest,
264 ndis_ansi_string *src,
265 uint8_t allocate)
267 uint16_t *ustr = NULL;
269 if (dest == NULL || src == NULL)
270 return(NDIS_STATUS_FAILURE);
272 if (allocate == TRUE) {
273 if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
274 return(NDIS_STATUS_FAILURE);
275 dest->nus_buf = ustr;
276 dest->nus_len = dest->nus_maxlen = strlen(src->nas_buf) * 2;
277 } else {
278 dest->nus_len = src->nas_len * 2; /* XXX */
279 if (dest->nus_maxlen < dest->nus_len)
280 dest->nus_len = dest->nus_maxlen;
281 ndis_ascii_to_unicode(src->nas_buf, &dest->nus_buf);
283 return (NDIS_STATUS_SUCCESS);
286 __stdcall static void *
287 ntoskrnl_iobuildsynchfsdreq(uint32_t func, void *dobj, void *buf,
288 uint32_t len, uint32_t *off,
289 void *event, void *status)
291 return(NULL);
294 __stdcall __regcall static uint32_t
295 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp))
297 return(0);
300 __stdcall __regcall static void
301 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost))
305 static void
306 ntoskrnl_wakeup(void *arg)
308 nt_dispatch_header *obj;
309 wait_block *w;
310 list_entry *e;
311 struct thread *td;
312 struct lwkt_tokref tokref;
314 obj = arg;
316 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
317 obj->dh_sigstate = TRUE;
318 e = obj->dh_waitlisthead.nle_flink;
319 while (e != &obj->dh_waitlisthead) {
320 w = (wait_block *)e;
321 td = w->wb_kthread;
322 ndis_thresume(td);
324 * For synchronization objects, only wake up
325 * the first waiter.
327 if (obj->dh_type == EVENT_TYPE_SYNC)
328 break;
329 e = e->nle_flink;
331 lwkt_reltoken(&tokref);
333 return;
336 static void
337 ntoskrnl_time(uint64_t *tval)
339 struct timespec ts;
341 nanotime(&ts);
342 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
343 11644473600LL;
345 return;
349 * KeWaitForSingleObject() is a tricky beast, because it can be used
350 * with several different object types: semaphores, timers, events,
351 * mutexes and threads. Semaphores don't appear very often, but the
352 * other object types are quite common. KeWaitForSingleObject() is
353 * what's normally used to acquire a mutex, and it can be used to
354 * wait for a thread termination.
356 * The Windows NDIS API is implemented in terms of Windows kernel
357 * primitives, and some of the object manipulation is duplicated in
358 * NDIS. For example, NDIS has timers and events, which are actually
359 * Windows kevents and ktimers. Now, you're supposed to only use the
360 * NDIS variants of these objects within the confines of the NDIS API,
361 * but there are some naughty developers out there who will use
362 * KeWaitForSingleObject() on NDIS timer and event objects, so we
363 * have to support that as well. Conseqently, our NDIS timer and event
364 * code has to be closely tied into our ntoskrnl timer and event code,
365 * just as it is in Windows.
367 * KeWaitForSingleObject() may do different things for different kinds
368 * of objects:
370 * - For events, we check if the event has been signalled. If the
371 * event is already in the signalled state, we just return immediately,
372 * otherwise we wait for it to be set to the signalled state by someone
373 * else calling KeSetEvent(). Events can be either synchronization or
374 * notification events.
376 * - For timers, if the timer has already fired and the timer is in
377 * the signalled state, we just return, otherwise we wait on the
378 * timer. Unlike an event, timers get signalled automatically when
379 * they expire rather than someone having to trip them manually.
380 * Timers initialized with KeInitializeTimer() are always notification
381 * events: KeInitializeTimerEx() lets you initialize a timer as
382 * either a notification or synchronization event.
384 * - For mutexes, we try to acquire the mutex and if we can't, we wait
385 * on the mutex until it's available and then grab it. When a mutex is
386 * released, it enters the signaled state, which wakes up one of the
387 * threads waiting to acquire it. Mutexes are always synchronization
388 * events.
390 * - For threads, the only thing we do is wait until the thread object
391 * enters a signalled state, which occurs when the thread terminates.
392 * Threads are always notification events.
394 * A notification event wakes up all threads waiting on an object. A
395 * synchronization event wakes up just one. Also, a synchronization event
396 * is auto-clearing, which means we automatically set the event back to
397 * the non-signalled state once the wakeup is done.
400 __stdcall uint32_t
401 ntoskrnl_waitforobj(nt_dispatch_header *obj, uint32_t reason,
402 uint32_t mode, uint8_t alertable, int64_t *duetime)
404 struct thread *td = curthread;
405 kmutant *km;
406 wait_block w;
407 struct timeval tv;
408 int error = 0;
409 int ticks;
410 uint64_t curtime;
411 struct lwkt_tokref tokref;
413 if (obj == NULL)
414 return(STATUS_INVALID_PARAMETER);
416 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
419 * See if the object is a mutex. If so, and we already own
420 * it, then just increment the acquisition count and return.
422 * For any other kind of object, see if it's already in the
423 * signalled state, and if it is, just return. If the object
424 * is marked as a synchronization event, reset the state to
425 * unsignalled.
428 if (obj->dh_size == OTYPE_MUTEX) {
429 km = (kmutant *)obj;
430 if (km->km_ownerthread == NULL ||
431 km->km_ownerthread == curthread->td_proc) {
432 obj->dh_sigstate = FALSE;
433 km->km_acquirecnt++;
434 km->km_ownerthread = curthread->td_proc;
435 lwkt_reltoken(&tokref);
436 return (STATUS_SUCCESS);
438 } else if (obj->dh_sigstate == TRUE) {
439 if (obj->dh_type == EVENT_TYPE_SYNC)
440 obj->dh_sigstate = FALSE;
441 lwkt_reltoken(&tokref);
442 return (STATUS_SUCCESS);
445 w.wb_object = obj;
446 w.wb_kthread = td;
448 INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
451 * The timeout value is specified in 100 nanosecond units
452 * and can be a positive or negative number. If it's positive,
453 * then the duetime is absolute, and we need to convert it
454 * to an absolute offset relative to now in order to use it.
455 * If it's negative, then the duetime is relative and we
456 * just have to convert the units.
459 if (duetime != NULL) {
460 if (*duetime < 0) {
461 tv.tv_sec = - (*duetime) / 10000000;
462 tv.tv_usec = (- (*duetime) / 10) -
463 (tv.tv_sec * 1000000);
464 } else {
465 ntoskrnl_time(&curtime);
466 if (*duetime < curtime)
467 tv.tv_sec = tv.tv_usec = 0;
468 else {
469 tv.tv_sec = ((*duetime) - curtime) / 10000000;
470 tv.tv_usec = ((*duetime) - curtime) / 10 -
471 (tv.tv_sec * 1000000);
476 lwkt_reltoken(&tokref);
478 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
479 error = ndis_thsuspend(td, duetime == NULL ? 0 : ticks);
481 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
483 /* We timed out. Leave the object alone and return status. */
485 if (error == EWOULDBLOCK) {
486 REMOVE_LIST_ENTRY((&w.wb_waitlist));
487 lwkt_reltoken(&tokref);
488 return(STATUS_TIMEOUT);
492 * Mutexes are always synchronization objects, which means
493 * if several threads are waiting to acquire it, only one will
494 * be woken up. If that one is us, and the mutex is up for grabs,
495 * grab it.
498 if (obj->dh_size == OTYPE_MUTEX) {
499 km = (kmutant *)obj;
500 if (km->km_ownerthread == NULL) {
501 km->km_ownerthread = curthread->td_proc;
502 km->km_acquirecnt++;
506 if (obj->dh_type == EVENT_TYPE_SYNC)
507 obj->dh_sigstate = FALSE;
508 REMOVE_LIST_ENTRY((&w.wb_waitlist));
510 lwkt_reltoken(&tokref);
512 return(STATUS_SUCCESS);
515 __stdcall static uint32_t
516 ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[],
517 uint32_t wtype, uint32_t reason, uint32_t mode,
518 uint8_t alertable, int64_t *duetime,
519 wait_block *wb_array)
521 struct thread *td = curthread;
522 kmutant *km;
523 wait_block _wb_array[THREAD_WAIT_OBJECTS];
524 wait_block *w;
525 struct timeval tv;
526 int i, wcnt = 0, widx = 0, error = 0;
527 uint64_t curtime;
528 struct timespec t1, t2;
529 struct lwkt_tokref tokref;
531 if (cnt > MAX_WAIT_OBJECTS)
532 return(STATUS_INVALID_PARAMETER);
533 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
534 return(STATUS_INVALID_PARAMETER);
536 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
538 if (wb_array == NULL)
539 w = &_wb_array[0];
540 else
541 w = wb_array;
543 /* First pass: see if we can satisfy any waits immediately. */
545 for (i = 0; i < cnt; i++) {
546 if (obj[i]->dh_size == OTYPE_MUTEX) {
547 km = (kmutant *)obj[i];
548 if (km->km_ownerthread == NULL ||
549 km->km_ownerthread == curthread->td_proc) {
550 obj[i]->dh_sigstate = FALSE;
551 km->km_acquirecnt++;
552 km->km_ownerthread = curthread->td_proc;
553 if (wtype == WAITTYPE_ANY) {
554 lwkt_reltoken(&tokref);
555 return (STATUS_WAIT_0 + i);
558 } else if (obj[i]->dh_sigstate == TRUE) {
559 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
560 obj[i]->dh_sigstate = FALSE;
561 if (wtype == WAITTYPE_ANY) {
562 lwkt_reltoken(&tokref);
563 return (STATUS_WAIT_0 + i);
569 * Second pass: set up wait for anything we can't
570 * satisfy immediately.
573 for (i = 0; i < cnt; i++) {
574 if (obj[i]->dh_sigstate == TRUE)
575 continue;
576 INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
577 (&w[i].wb_waitlist));
578 w[i].wb_kthread = td;
579 w[i].wb_object = obj[i];
580 wcnt++;
583 if (duetime != NULL) {
584 if (*duetime < 0) {
585 tv.tv_sec = - (*duetime) / 10000000;
586 tv.tv_usec = (- (*duetime) / 10) -
587 (tv.tv_sec * 1000000);
588 } else {
589 ntoskrnl_time(&curtime);
590 if (*duetime < curtime)
591 tv.tv_sec = tv.tv_usec = 0;
592 else {
593 tv.tv_sec = ((*duetime) - curtime) / 10000000;
594 tv.tv_usec = ((*duetime) - curtime) / 10 -
595 (tv.tv_sec * 1000000);
600 while (wcnt) {
601 nanotime(&t1);
602 lwkt_reltoken(&tokref);
604 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
606 error = ndis_thsuspend(td, duetime == NULL ? 0 : ticks);
608 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
609 nanotime(&t2);
611 for (i = 0; i < cnt; i++) {
612 if (obj[i]->dh_size == OTYPE_MUTEX) {
613 km = (kmutant *)obj;
614 if (km->km_ownerthread == NULL) {
615 km->km_ownerthread =
616 curthread->td_proc;
617 km->km_acquirecnt++;
620 if (obj[i]->dh_sigstate == TRUE) {
621 widx = i;
622 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
623 obj[i]->dh_sigstate = FALSE;
624 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
625 wcnt--;
629 if (error || wtype == WAITTYPE_ANY)
630 break;
632 if (duetime != NULL) {
633 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
634 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
638 if (wcnt) {
639 for (i = 0; i < cnt; i++)
640 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
643 if (error == EWOULDBLOCK) {
644 lwkt_reltoken(&tokref);
645 return(STATUS_TIMEOUT);
648 if (wtype == WAITTYPE_ANY && wcnt) {
649 lwkt_reltoken(&tokref);
650 return(STATUS_WAIT_0 + widx);
653 lwkt_reltoken(&tokref);
655 return(STATUS_SUCCESS);
658 __stdcall static void
659 ntoskrnl_writereg_ushort(uint16_t *reg, uint16_t val)
661 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
662 return;
665 __stdcall static uint16_t
666 ntoskrnl_readreg_ushort(uint16_t *reg)
668 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
671 __stdcall static void
672 ntoskrnl_writereg_ulong(uint32_t *reg, uint32_t val)
674 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
675 return;
678 __stdcall static uint32_t
679 ntoskrnl_readreg_ulong(uint32_t *reg)
681 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
684 __stdcall static uint8_t
685 ntoskrnl_readreg_uchar(uint8_t *reg)
687 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
690 __stdcall static void
691 ntoskrnl_writereg_uchar(uint8_t *reg, uint8_t val)
693 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
694 return;
697 __stdcall static int64_t
698 _allmul(int64_t a, int64_t b)
700 return (a * b);
703 __stdcall static int64_t
704 _alldiv(int64_t a, int64_t b)
706 return (a / b);
709 __stdcall static int64_t
710 _allrem(int64_t a, int64_t b)
712 return (a % b);
715 __stdcall static uint64_t
716 _aullmul(uint64_t a, uint64_t b)
718 return (a * b);
721 __stdcall static uint64_t
722 _aulldiv(uint64_t a, uint64_t b)
724 return (a / b);
727 __stdcall static uint64_t
728 _aullrem(uint64_t a, uint64_t b)
730 return (a % b);
733 __regparm static int64_t
734 _allshl(int64_t a, uint8_t b)
736 return (a << b);
739 __regparm static uint64_t
740 _aullshl(uint64_t a, uint8_t b)
742 return (a << b);
745 __regparm static int64_t
746 _allshr(int64_t a, uint8_t b)
748 return (a >> b);
751 __regparm static uint64_t
752 _aullshr(uint64_t a, uint8_t b)
754 return (a >> b);
757 static slist_entry *
758 ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
760 slist_entry *oldhead;
762 oldhead = head->slh_list.slh_next;
763 entry->sl_next = head->slh_list.slh_next;
764 head->slh_list.slh_next = entry;
765 head->slh_list.slh_depth++;
766 head->slh_list.slh_seq++;
768 return(oldhead);
771 static slist_entry *
772 ntoskrnl_popsl(slist_header *head)
774 slist_entry *first;
776 first = head->slh_list.slh_next;
777 if (first != NULL) {
778 head->slh_list.slh_next = first->sl_next;
779 head->slh_list.slh_depth--;
780 head->slh_list.slh_seq++;
783 return(first);
786 __stdcall static void *
787 ntoskrnl_allocfunc(uint32_t pooltype, size_t size, uint32_t tag)
789 return(kmalloc(size, M_DEVBUF, M_WAITOK));
792 __stdcall static void
793 ntoskrnl_freefunc(void *buf)
795 kfree(buf, M_DEVBUF);
796 return;
799 __stdcall static void
800 ntoskrnl_init_lookaside(paged_lookaside_list *lookaside,
801 lookaside_alloc_func *allocfunc,
802 lookaside_free_func *freefunc,
803 uint32_t flags, size_t size,
804 uint32_t tag, uint16_t depth)
806 bzero((char *)lookaside, sizeof(paged_lookaside_list));
808 if (size < sizeof(slist_entry))
809 lookaside->nll_l.gl_size = sizeof(slist_entry);
810 else
811 lookaside->nll_l.gl_size = size;
812 lookaside->nll_l.gl_tag = tag;
813 if (allocfunc == NULL)
814 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
815 else
816 lookaside->nll_l.gl_allocfunc = allocfunc;
818 if (freefunc == NULL)
819 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
820 else
821 lookaside->nll_l.gl_freefunc = freefunc;
823 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
825 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
826 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
828 return;
831 __stdcall static void
832 ntoskrnl_delete_lookaside(paged_lookaside_list *lookaside)
834 void *buf;
835 __stdcall void (*freefunc)(void *);
837 freefunc = lookaside->nll_l.gl_freefunc;
838 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
839 freefunc(buf);
841 return;
844 __stdcall static void
845 ntoskrnl_init_nplookaside(npaged_lookaside_list *lookaside,
846 lookaside_alloc_func *allocfunc,
847 lookaside_free_func *freefunc,
848 uint32_t flags, size_t size,
849 uint32_t tag, uint16_t depth)
851 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
853 if (size < sizeof(slist_entry))
854 lookaside->nll_l.gl_size = sizeof(slist_entry);
855 else
856 lookaside->nll_l.gl_size = size;
857 lookaside->nll_l.gl_tag = tag;
858 if (allocfunc == NULL)
859 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
860 else
861 lookaside->nll_l.gl_allocfunc = allocfunc;
863 if (freefunc == NULL)
864 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
865 else
866 lookaside->nll_l.gl_freefunc = freefunc;
868 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
870 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
871 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
873 return;
876 __stdcall static void
877 ntoskrnl_delete_nplookaside(npaged_lookaside_list *lookaside)
879 void *buf;
880 __stdcall void (*freefunc)(void *);
882 freefunc = lookaside->nll_l.gl_freefunc;
883 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
884 freefunc(buf);
886 return;
890 * Note: the interlocked slist push and pop routines are
891 * declared to be _fastcall in Windows. gcc 3.4 is supposed
892 * to have support for this calling convention, however we
893 * don't have that version available yet, so we kludge things
894 * up using some inline assembly.
897 __stdcall __regcall static slist_entry *
898 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry))
900 slist_entry *oldhead;
902 oldhead = (slist_entry *)FASTCALL3(ntoskrnl_push_slist_ex,
903 head, entry, &ntoskrnl_global);
905 return(oldhead);
908 __stdcall __regcall static slist_entry *
909 ntoskrnl_pop_slist(REGARGS1(slist_header *head))
911 slist_entry *first;
913 first = (slist_entry *)FASTCALL2(ntoskrnl_pop_slist_ex,
914 head, &ntoskrnl_global);
916 return(first);
919 __stdcall __regcall static slist_entry *
920 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock)
922 slist_entry *oldhead;
923 uint8_t irql;
925 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
926 oldhead = ntoskrnl_pushsl(head, entry);
927 FASTCALL2(hal_unlock, lock, irql);
929 return(oldhead);
932 __stdcall __regcall static slist_entry *
933 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock))
935 slist_entry *first;
936 uint8_t irql;
938 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
939 first = ntoskrnl_popsl(head);
940 FASTCALL2(hal_unlock, lock, irql);
942 return(first);
945 __stdcall __regcall void
946 ntoskrnl_lock_dpc(REGARGS1(kspin_lock *lock))
948 while (atomic_poll_acquire_int((volatile u_int *)lock) == 0)
949 /* sit and spin */;
952 __stdcall __regcall void
953 ntoskrnl_unlock_dpc(REGARGS1(kspin_lock *lock))
955 atomic_poll_release_int((volatile u_int *)lock);
958 __stdcall __regcall static uint32_t
959 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend))
961 atomic_add_long((volatile u_long *)addend, 1);
962 return(*addend);
965 __stdcall __regcall static uint32_t
966 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend))
968 atomic_subtract_long((volatile u_long *)addend, 1);
969 return(*addend);
972 __stdcall __regcall static void
973 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc))
975 uint8_t irql;
977 irql = FASTCALL2(hal_lock, &ntoskrnl_global, DISPATCH_LEVEL);
978 *addend += inc;
979 FASTCALL2(hal_unlock, &ntoskrnl_global, irql);
981 return;
984 __stdcall static void
985 ntoskrnl_freemdl(ndis_buffer *mdl)
987 ndis_buffer *head;
989 if (mdl == NULL || mdl->nb_process == NULL)
990 return;
992 head = mdl->nb_process;
994 if (head->nb_flags != 0x1)
995 return;
997 mdl->nb_next = head->nb_next;
998 head->nb_next = mdl;
1000 /* Decrement count of busy buffers. */
1002 head->nb_bytecount--;
1005 * If the pool has been marked for deletion and there are
1006 * no more buffers outstanding, nuke the pool.
1009 if (head->nb_byteoffset && head->nb_bytecount == 0)
1010 kfree(head, M_DEVBUF);
1012 return;
1015 __stdcall static uint32_t
1016 ntoskrnl_sizeofmdl(void *vaddr, size_t len)
1018 uint32_t l;
1020 l = sizeof(struct ndis_buffer) +
1021 (sizeof(uint32_t) * SPAN_PAGES(vaddr, len));
1023 return(l);
1026 __stdcall static void
1027 ntoskrnl_build_npaged_mdl(ndis_buffer *mdl)
1029 mdl->nb_mappedsystemva = (char *)mdl->nb_startva + mdl->nb_byteoffset;
1030 return;
1033 __stdcall static void *
1034 ntoskrnl_mmaplockedpages(ndis_buffer *buf, uint8_t accessmode)
1036 return(MDL_VA(buf));
1039 __stdcall static void *
1040 ntoskrnl_mmaplockedpages_cache(ndis_buffer *buf, uint8_t accessmode,
1041 uint32_t cachetype, void *vaddr,
1042 uint32_t bugcheck, uint32_t prio)
1044 return(MDL_VA(buf));
1047 __stdcall static void
1048 ntoskrnl_munmaplockedpages(void *vaddr, ndis_buffer *buf)
1050 return;
1054 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1055 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1056 * to crit_enter()/crit_exit() in their use. We can't create a new mutex
1057 * lock here because there is no complimentary KeFreeSpinLock()
1058 * function. Instead, we grab a mutex from the mutex pool.
1060 __stdcall static void
1061 ntoskrnl_init_lock(kspin_lock *lock)
1063 *lock = 0;
1065 return;
1068 __stdcall static size_t
1069 ntoskrnl_memcmp(const void *s1, const void *s2, size_t len)
1071 size_t i, total = 0;
1072 uint8_t *m1, *m2;
1074 m1 = __DECONST(char *, s1);
1075 m2 = __DECONST(char *, s2);
1077 for (i = 0; i < len; i++) {
1078 if (m1[i] == m2[i])
1079 total++;
1081 return(total);
1084 __stdcall static void
1085 ntoskrnl_init_ansi_string(ndis_ansi_string *dst, char *src)
1087 ndis_ansi_string *a;
1089 a = dst;
1090 if (a == NULL)
1091 return;
1092 if (src == NULL) {
1093 a->nas_len = a->nas_maxlen = 0;
1094 a->nas_buf = NULL;
1095 } else {
1096 a->nas_buf = src;
1097 a->nas_len = a->nas_maxlen = strlen(src);
1100 return;
1103 __stdcall static void
1104 ntoskrnl_init_unicode_string(ndis_unicode_string *dst, uint16_t *src)
1106 ndis_unicode_string *u;
1107 int i;
1109 u = dst;
1110 if (u == NULL)
1111 return;
1112 if (src == NULL) {
1113 u->nus_len = u->nus_maxlen = 0;
1114 u->nus_buf = NULL;
1115 } else {
1116 i = 0;
1117 while(src[i] != 0)
1118 i++;
1119 u->nus_buf = src;
1120 u->nus_len = u->nus_maxlen = i * 2;
1123 return;
1126 __stdcall ndis_status
1127 ntoskrnl_unicode_to_int(ndis_unicode_string *ustr, uint32_t base,
1128 uint32_t *val)
1130 uint16_t *uchr;
1131 int len, neg = 0;
1132 char abuf[64];
1133 char *astr;
1135 uchr = ustr->nus_buf;
1136 len = ustr->nus_len;
1137 bzero(abuf, sizeof(abuf));
1139 if ((char)((*uchr) & 0xFF) == '-') {
1140 neg = 1;
1141 uchr++;
1142 len -= 2;
1143 } else if ((char)((*uchr) & 0xFF) == '+') {
1144 neg = 0;
1145 uchr++;
1146 len -= 2;
1149 if (base == 0) {
1150 if ((char)((*uchr) & 0xFF) == 'b') {
1151 base = 2;
1152 uchr++;
1153 len -= 2;
1154 } else if ((char)((*uchr) & 0xFF) == 'o') {
1155 base = 8;
1156 uchr++;
1157 len -= 2;
1158 } else if ((char)((*uchr) & 0xFF) == 'x') {
1159 base = 16;
1160 uchr++;
1161 len -= 2;
1162 } else
1163 base = 10;
1166 astr = abuf;
1167 if (neg) {
1168 strcpy(astr, "-");
1169 astr++;
1172 ndis_unicode_to_ascii(uchr, len, &astr);
1173 *val = strtoul(abuf, NULL, base);
1175 return(NDIS_STATUS_SUCCESS);
1178 __stdcall static void
1179 ntoskrnl_free_unicode_string(ndis_unicode_string *ustr)
1181 if (ustr->nus_buf == NULL)
1182 return;
1183 kfree(ustr->nus_buf, M_DEVBUF);
1184 ustr->nus_buf = NULL;
1185 return;
1188 __stdcall static void
1189 ntoskrnl_free_ansi_string(ndis_ansi_string *astr)
1191 if (astr->nas_buf == NULL)
1192 return;
1193 kfree(astr->nas_buf, M_DEVBUF);
1194 astr->nas_buf = NULL;
1195 return;
1198 static int
1199 atoi(const char *str)
1201 return (int)strtol(str, (char **)NULL, 10);
1204 static long
1205 atol(const char *str)
1207 return strtol(str, (char **)NULL, 10);
1210 static int
1211 rand(void)
1213 struct timeval tv;
1215 microtime(&tv);
1216 skrandom(tv.tv_usec);
1217 return((int)krandom());
1220 __stdcall static uint8_t
1221 ntoskrnl_wdmver(uint8_t major, uint8_t minor)
1223 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
1224 return(TRUE);
1225 return(FALSE);
1228 __stdcall static ndis_status
1229 ntoskrnl_devprop(device_object *devobj, uint32_t regprop, uint32_t buflen,
1230 void *prop, uint32_t *reslen)
1232 ndis_miniport_block *block;
1234 block = devobj->do_rsvd;
1236 switch (regprop) {
1237 case DEVPROP_DRIVER_KEYNAME:
1238 ndis_ascii_to_unicode(__DECONST(char *,
1239 device_get_nameunit(block->nmb_dev)), (uint16_t **)&prop);
1240 *reslen = strlen(device_get_nameunit(block->nmb_dev)) * 2;
1241 break;
1242 default:
1243 return(STATUS_INVALID_PARAMETER_2);
1244 break;
1247 return(STATUS_SUCCESS);
1250 __stdcall static void
1251 ntoskrnl_init_mutex(kmutant *kmutex, uint32_t level)
1253 INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
1254 kmutex->km_abandoned = FALSE;
1255 kmutex->km_apcdisable = 1;
1256 kmutex->km_header.dh_sigstate = TRUE;
1257 kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
1258 kmutex->km_header.dh_size = OTYPE_MUTEX;
1259 kmutex->km_acquirecnt = 0;
1260 kmutex->km_ownerthread = NULL;
1261 return;
1264 __stdcall static uint32_t
1265 ntoskrnl_release_mutex(kmutant *kmutex, uint8_t kwait)
1267 struct lwkt_tokref tokref;
1269 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
1270 if (kmutex->km_ownerthread != curthread->td_proc) {
1271 lwkt_reltoken(&tokref);
1272 return(STATUS_MUTANT_NOT_OWNED);
1274 kmutex->km_acquirecnt--;
1275 if (kmutex->km_acquirecnt == 0) {
1276 kmutex->km_ownerthread = NULL;
1277 lwkt_reltoken(&tokref);
1278 ntoskrnl_wakeup(&kmutex->km_header);
1279 } else
1280 lwkt_reltoken(&tokref);
1282 return(kmutex->km_acquirecnt);
1285 __stdcall static uint32_t
1286 ntoskrnl_read_mutex(kmutant *kmutex)
1288 return(kmutex->km_header.dh_sigstate);
1291 __stdcall void
1292 ntoskrnl_init_event(nt_kevent *kevent, uint32_t type, uint8_t state)
1294 INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
1295 kevent->k_header.dh_sigstate = state;
1296 kevent->k_header.dh_type = type;
1297 kevent->k_header.dh_size = OTYPE_EVENT;
1298 return;
1301 __stdcall uint32_t
1302 ntoskrnl_reset_event(nt_kevent *kevent)
1304 uint32_t prevstate;
1305 struct lwkt_tokref tokref;
1307 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
1308 prevstate = kevent->k_header.dh_sigstate;
1309 kevent->k_header.dh_sigstate = FALSE;
1310 lwkt_reltoken(&tokref);
1312 return(prevstate);
1315 __stdcall uint32_t
1316 ntoskrnl_set_event(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
1318 uint32_t prevstate;
1320 prevstate = kevent->k_header.dh_sigstate;
1321 ntoskrnl_wakeup(&kevent->k_header);
1323 return(prevstate);
1326 __stdcall void
1327 ntoskrnl_clear_event(nt_kevent *kevent)
1329 kevent->k_header.dh_sigstate = FALSE;
1330 return;
1333 __stdcall uint32_t
1334 ntoskrnl_read_event(nt_kevent *kevent)
1336 return(kevent->k_header.dh_sigstate);
1339 __stdcall static ndis_status
1340 ntoskrnl_objref(ndis_handle handle, uint32_t reqaccess, void *otype,
1341 uint8_t accessmode, void **object, void **handleinfo)
1343 nt_objref *nr;
1345 nr = kmalloc(sizeof(nt_objref), M_DEVBUF, M_WAITOK|M_ZERO);
1347 INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
1348 nr->no_obj = handle;
1349 nr->no_dh.dh_size = OTYPE_THREAD;
1350 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
1351 *object = nr;
1353 return(NDIS_STATUS_SUCCESS);
1356 __stdcall __regcall static void
1357 ntoskrnl_objderef(REGARGS1(void *object))
1359 nt_objref *nr;
1361 nr = object;
1362 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
1363 kfree(nr, M_DEVBUF);
1365 return;
1368 __stdcall static uint32_t
1369 ntoskrnl_zwclose(ndis_handle handle)
1371 return(STATUS_SUCCESS);
1375 * This is here just in case the thread returns without calling
1376 * PsTerminateSystemThread().
1378 static void
1379 ntoskrnl_thrfunc(void *arg)
1381 thread_context *thrctx;
1382 __stdcall uint32_t (*tfunc)(void *);
1383 void *tctx;
1384 uint32_t rval;
1386 thrctx = arg;
1387 tfunc = thrctx->tc_thrfunc;
1388 tctx = thrctx->tc_thrctx;
1389 kfree(thrctx, M_TEMP);
1391 rval = tfunc(tctx);
1393 ntoskrnl_thread_exit(rval);
1394 return; /* notreached */
1397 __stdcall static ndis_status
1398 ntoskrnl_create_thread(ndis_handle *handle, uint32_t reqaccess,
1399 void *objattrs, ndis_handle phandle,
1400 void *clientid, void *thrfunc, void *thrctx)
1402 int error;
1403 char tname[128];
1404 thread_context *tc;
1405 thread_t td;
1407 tc = kmalloc(sizeof(thread_context), M_TEMP, M_WAITOK);
1409 tc->tc_thrctx = thrctx;
1410 tc->tc_thrfunc = thrfunc;
1412 ksprintf(tname, "windows kthread %d", ntoskrnl_kth);
1413 error = kthread_create_stk(ntoskrnl_thrfunc, tc, &td,
1414 NDIS_KSTACK_PAGES * PAGE_SIZE, tname);
1415 *handle = td;
1417 ntoskrnl_kth++;
1419 return(error);
1423 * In Windows, the exit of a thread is an event that you're allowed
1424 * to wait on, assuming you've obtained a reference to the thread using
1425 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
1426 * simulate this behavior is to register each thread we create in a
1427 * reference list, and if someone holds a reference to us, we poke
1428 * them.
1430 __stdcall static ndis_status
1431 ntoskrnl_thread_exit(ndis_status status)
1433 struct nt_objref *nr;
1435 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
1436 if (nr->no_obj != curthread)
1437 continue;
1438 ntoskrnl_wakeup(&nr->no_dh);
1439 break;
1442 ntoskrnl_kth--;
1444 kthread_exit();
1445 return(0); /* notreached */
1448 static uint32_t
1449 ntoskrnl_dbgprint(char *fmt, ...)
1451 __va_list ap;
1453 if (bootverbose) {
1454 __va_start(ap, fmt);
1455 kvprintf(fmt, ap);
1458 return(STATUS_SUCCESS);
1461 __stdcall static void
1462 ntoskrnl_debugger(void)
1465 #if __FreeBSD_version < 502113
1466 Debugger("ntoskrnl_debugger(): breakpoint");
1467 #else
1468 kdb_enter("ntoskrnl_debugger(): breakpoint");
1469 #endif
1472 static void
1473 ntoskrnl_timercall(void *arg)
1475 ktimer *timer;
1477 timer = arg;
1479 timer->k_header.dh_inserted = FALSE;
1482 * If this is a periodic timer, re-arm it
1483 * so it will fire again. We do this before
1484 * calling any deferred procedure calls because
1485 * it's possible the DPC might cancel the timer,
1486 * in which case it would be wrong for us to
1487 * re-arm it again afterwards.
1490 if (timer->k_period) {
1491 timer->k_header.dh_inserted = TRUE;
1492 callout_reset(timer->k_handle, 1 + timer->k_period * hz / 1000,
1493 ntoskrnl_timercall, timer);
1494 } else {
1495 callout_deactivate(timer->k_handle);
1496 kfree(timer->k_handle, M_NDIS);
1497 timer->k_handle = NULL;
1500 if (timer->k_dpc != NULL)
1501 ntoskrnl_queue_dpc(timer->k_dpc, NULL, NULL);
1503 ntoskrnl_wakeup(&timer->k_header);
1506 __stdcall void
1507 ntoskrnl_init_timer(ktimer *timer)
1509 if (timer == NULL)
1510 return;
1512 ntoskrnl_init_timer_ex(timer, EVENT_TYPE_NOTIFY);
1515 __stdcall void
1516 ntoskrnl_init_timer_ex(ktimer *timer, uint32_t type)
1518 if (timer == NULL)
1519 return;
1521 INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
1522 timer->k_header.dh_sigstate = FALSE;
1523 timer->k_header.dh_inserted = FALSE;
1524 timer->k_header.dh_type = type;
1525 timer->k_header.dh_size = OTYPE_TIMER;
1526 timer->k_handle = NULL;
1528 return;
1532 * This is a wrapper for Windows deferred procedure calls that
1533 * have been placed on an NDIS thread work queue. We need it
1534 * since the DPC could be a _stdcall function. Also, as far as
1535 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
1537 static void
1538 ntoskrnl_run_dpc(void *arg)
1540 kdpc_func dpcfunc;
1541 kdpc *dpc;
1542 uint8_t irql;
1544 dpc = arg;
1545 dpcfunc = (kdpc_func)dpc->k_deferedfunc;
1546 irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
1547 dpcfunc(dpc, dpc->k_deferredctx, dpc->k_sysarg1, dpc->k_sysarg2);
1548 FASTCALL1(hal_lower_irql, irql);
1550 return;
1553 __stdcall void
1554 ntoskrnl_init_dpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
1556 if (dpc == NULL)
1557 return;
1559 dpc->k_deferedfunc = dpcfunc;
1560 dpc->k_deferredctx = dpcctx;
1562 return;
1565 __stdcall uint8_t
1566 ntoskrnl_queue_dpc(kdpc *dpc, void *sysarg1, void *sysarg2)
1568 dpc->k_sysarg1 = sysarg1;
1569 dpc->k_sysarg2 = sysarg2;
1570 if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1571 return(FALSE);
1573 return(TRUE);
1576 __stdcall uint8_t
1577 ntoskrnl_dequeue_dpc(kdpc *dpc)
1579 if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1580 return(FALSE);
1582 return(TRUE);
1585 __stdcall uint8_t
1586 ntoskrnl_set_timer_ex(ktimer *timer, int64_t duetime, uint32_t period,
1587 kdpc *dpc)
1589 struct timeval tv;
1590 uint64_t curtime;
1591 uint8_t pending;
1592 int ticks;
1594 if (timer == NULL)
1595 return(FALSE);
1597 if (timer->k_header.dh_inserted == TRUE) {
1598 if (timer->k_handle != NULL)
1599 callout_stop(timer->k_handle);
1600 timer->k_header.dh_inserted = FALSE;
1601 pending = TRUE;
1602 } else
1603 pending = FALSE;
1605 timer->k_duetime = duetime;
1606 timer->k_period = period;
1607 timer->k_header.dh_sigstate = FALSE;
1608 timer->k_dpc = dpc;
1610 if (duetime < 0) {
1611 tv.tv_sec = - (duetime) / 10000000;
1612 tv.tv_usec = (- (duetime) / 10) -
1613 (tv.tv_sec * 1000000);
1614 } else {
1615 ntoskrnl_time(&curtime);
1616 if (duetime < curtime)
1617 tv.tv_sec = tv.tv_usec = 0;
1618 else {
1619 tv.tv_sec = ((duetime) - curtime) / 10000000;
1620 tv.tv_usec = ((duetime) - curtime) / 10 -
1621 (tv.tv_sec * 1000000);
1625 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
1626 timer->k_header.dh_inserted = TRUE;
1627 if (timer->k_handle == NULL) {
1628 timer->k_handle = kmalloc(sizeof(struct callout), M_NDIS,
1629 M_INTWAIT);
1630 callout_init(timer->k_handle);
1632 callout_reset(timer->k_handle, ticks, ntoskrnl_timercall, timer);
1634 return(pending);
1637 __stdcall uint8_t
1638 ntoskrnl_set_timer(ktimer *timer, int64_t duetime, kdpc *dpc)
1640 return (ntoskrnl_set_timer_ex(timer, duetime, 0, dpc));
1643 __stdcall uint8_t
1644 ntoskrnl_cancel_timer(ktimer *timer)
1646 uint8_t pending;
1648 if (timer == NULL)
1649 return(FALSE);
1651 if (timer->k_header.dh_inserted == TRUE) {
1652 if (timer->k_handle != NULL) {
1653 callout_stop(timer->k_handle);
1654 kfree(timer->k_handle, M_NDIS);
1655 timer->k_handle = NULL;
1657 if (timer->k_dpc != NULL)
1658 ntoskrnl_dequeue_dpc(timer->k_dpc);
1659 pending = TRUE;
1660 } else
1661 pending = FALSE;
1664 return(pending);
1667 __stdcall uint8_t
1668 ntoskrnl_read_timer(ktimer *timer)
1670 return(timer->k_header.dh_sigstate);
1673 __stdcall static void
1674 dummy(void)
1676 kprintf ("ntoskrnl dummy called...\n");
1677 return;
1681 image_patch_table ntoskrnl_functbl[] = {
1682 { "RtlCompareMemory", (FUNC)ntoskrnl_memcmp },
1683 { "RtlEqualUnicodeString", (FUNC)ntoskrnl_unicode_equal },
1684 { "RtlCopyUnicodeString", (FUNC)ntoskrnl_unicode_copy },
1685 { "RtlUnicodeStringToAnsiString", (FUNC)ntoskrnl_unicode_to_ansi },
1686 { "RtlAnsiStringToUnicodeString", (FUNC)ntoskrnl_ansi_to_unicode },
1687 { "RtlInitAnsiString", (FUNC)ntoskrnl_init_ansi_string },
1688 { "RtlInitUnicodeString", (FUNC)ntoskrnl_init_unicode_string },
1689 { "RtlFreeAnsiString", (FUNC)ntoskrnl_free_ansi_string },
1690 { "RtlFreeUnicodeString", (FUNC)ntoskrnl_free_unicode_string },
1691 { "RtlUnicodeStringToInteger", (FUNC)ntoskrnl_unicode_to_int },
1692 { "sprintf", (FUNC)ksprintf },
1693 { "vsprintf", (FUNC)kvsprintf },
1694 { "_snprintf", (FUNC)ksnprintf },
1695 { "_vsnprintf", (FUNC)kvsnprintf },
1696 { "DbgPrint", (FUNC)ntoskrnl_dbgprint },
1697 { "DbgBreakPoint", (FUNC)ntoskrnl_debugger },
1698 { "strncmp", (FUNC)strncmp },
1699 { "strcmp", (FUNC)strcmp },
1700 { "strncpy", (FUNC)strncpy },
1701 { "strcpy", (FUNC)strcpy },
1702 { "strlen", (FUNC)strlen },
1703 { "memcpy", (FUNC)memcpy },
1704 { "memmove", (FUNC)memcpy },
1705 { "memset", (FUNC)memset },
1706 { "IofCallDriver", (FUNC)ntoskrnl_iofcalldriver },
1707 { "IofCompleteRequest", (FUNC)ntoskrnl_iofcompletereq },
1708 { "IoBuildSynchronousFsdRequest", (FUNC)ntoskrnl_iobuildsynchfsdreq },
1709 { "KeWaitForSingleObject", (FUNC)ntoskrnl_waitforobj },
1710 { "KeWaitForMultipleObjects", (FUNC)ntoskrnl_waitforobjs },
1711 { "_allmul", (FUNC)_allmul },
1712 { "_alldiv", (FUNC)_alldiv },
1713 { "_allrem", (FUNC)_allrem },
1714 { "_allshr", (FUNC)_allshr },
1715 { "_allshl", (FUNC)_allshl },
1716 { "_aullmul", (FUNC)_aullmul },
1717 { "_aulldiv", (FUNC)_aulldiv },
1718 { "_aullrem", (FUNC)_aullrem },
1719 { "_aullshr", (FUNC)_aullshr },
1720 { "_aullshl", (FUNC)_aullshl },
1721 { "atoi", (FUNC)atoi },
1722 { "atol", (FUNC)atol },
1723 { "rand", (FUNC)rand },
1724 { "WRITE_REGISTER_USHORT", (FUNC)ntoskrnl_writereg_ushort },
1725 { "READ_REGISTER_USHORT", (FUNC)ntoskrnl_readreg_ushort },
1726 { "WRITE_REGISTER_ULONG", (FUNC)ntoskrnl_writereg_ulong },
1727 { "READ_REGISTER_ULONG", (FUNC)ntoskrnl_readreg_ulong },
1728 { "READ_REGISTER_UCHAR", (FUNC)ntoskrnl_readreg_uchar },
1729 { "WRITE_REGISTER_UCHAR", (FUNC)ntoskrnl_writereg_uchar },
1730 { "ExInitializePagedLookasideList", (FUNC)ntoskrnl_init_lookaside },
1731 { "ExDeletePagedLookasideList", (FUNC)ntoskrnl_delete_lookaside },
1732 { "ExInitializeNPagedLookasideList", (FUNC)ntoskrnl_init_nplookaside },
1733 { "ExDeleteNPagedLookasideList", (FUNC)ntoskrnl_delete_nplookaside },
1734 { "InterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist },
1735 { "InterlockedPushEntrySList", (FUNC)ntoskrnl_push_slist },
1736 { "ExInterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist_ex },
1737 { "ExInterlockedPushEntrySList",(FUNC)ntoskrnl_push_slist_ex },
1738 { "KefAcquireSpinLockAtDpcLevel", (FUNC)ntoskrnl_lock_dpc },
1739 { "KefReleaseSpinLockFromDpcLevel", (FUNC)ntoskrnl_unlock_dpc },
1740 { "InterlockedIncrement", (FUNC)ntoskrnl_interlock_inc },
1741 { "InterlockedDecrement", (FUNC)ntoskrnl_interlock_dec },
1742 { "ExInterlockedAddLargeStatistic",
1743 (FUNC)ntoskrnl_interlock_addstat },
1744 { "IoFreeMdl", (FUNC)ntoskrnl_freemdl },
1745 { "MmSizeOfMdl", (FUNC)ntoskrnl_sizeofmdl },
1746 { "MmMapLockedPages", (FUNC)ntoskrnl_mmaplockedpages },
1747 { "MmMapLockedPagesSpecifyCache",
1748 (FUNC)ntoskrnl_mmaplockedpages_cache },
1749 { "MmUnmapLockedPages", (FUNC)ntoskrnl_munmaplockedpages },
1750 { "MmBuildMdlForNonPagedPool", (FUNC)ntoskrnl_build_npaged_mdl },
1751 { "KeInitializeSpinLock", (FUNC)ntoskrnl_init_lock },
1752 { "IoIsWdmVersionAvailable", (FUNC)ntoskrnl_wdmver },
1753 { "IoGetDeviceProperty", (FUNC)ntoskrnl_devprop },
1754 { "KeInitializeMutex", (FUNC)ntoskrnl_init_mutex },
1755 { "KeReleaseMutex", (FUNC)ntoskrnl_release_mutex },
1756 { "KeReadStateMutex", (FUNC)ntoskrnl_read_mutex },
1757 { "KeInitializeEvent", (FUNC)ntoskrnl_init_event },
1758 { "KeSetEvent", (FUNC)ntoskrnl_set_event },
1759 { "KeResetEvent", (FUNC)ntoskrnl_reset_event },
1760 { "KeClearEvent", (FUNC)ntoskrnl_clear_event },
1761 { "KeReadStateEvent", (FUNC)ntoskrnl_read_event },
1762 { "KeInitializeTimer", (FUNC)ntoskrnl_init_timer },
1763 { "KeInitializeTimerEx", (FUNC)ntoskrnl_init_timer_ex },
1764 { "KeSetTimer", (FUNC)ntoskrnl_set_timer },
1765 { "KeSetTimerEx", (FUNC)ntoskrnl_set_timer_ex },
1766 { "KeCancelTimer", (FUNC)ntoskrnl_cancel_timer },
1767 { "KeReadStateTimer", (FUNC)ntoskrnl_read_timer },
1768 { "KeInitializeDpc", (FUNC)ntoskrnl_init_dpc },
1769 { "KeInsertQueueDpc", (FUNC)ntoskrnl_queue_dpc },
1770 { "KeRemoveQueueDpc", (FUNC)ntoskrnl_dequeue_dpc },
1771 { "ObReferenceObjectByHandle", (FUNC)ntoskrnl_objref },
1772 { "ObfDereferenceObject", (FUNC)ntoskrnl_objderef },
1773 { "ZwClose", (FUNC)ntoskrnl_zwclose },
1774 { "PsCreateSystemThread", (FUNC)ntoskrnl_create_thread },
1775 { "PsTerminateSystemThread", (FUNC)ntoskrnl_thread_exit },
1778 * This last entry is a catch-all for any function we haven't
1779 * implemented yet. The PE import list patching routine will
1780 * use it for any function that doesn't have an explicit match
1781 * in this table.
1784 { NULL, (FUNC)dummy },
1786 /* End of list. */
1788 { NULL, NULL },