3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
44 #include <sys/mutex.h>
46 #include <sys/callout.h>
48 #include <sys/kernel.h>
50 #include <sys/condvar.h>
51 #include <sys/kthread.h>
52 #include <sys/module.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
57 #include <machine/atomic.h>
58 #include <machine/bus.h>
59 #include <machine/stdarg.h>
60 #include <machine/resource.h>
66 #include <vm/vm_param.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_extern.h>
73 #include <compat/ndis/pe_var.h>
74 #include <compat/ndis/cfg_var.h>
75 #include <compat/ndis/resource_var.h>
76 #include <compat/ndis/ntoskrnl_var.h>
77 #include <compat/ndis/hal_var.h>
78 #include <compat/ndis/ndis_var.h>
80 #ifdef NTOSKRNL_DEBUG_TIMERS
81 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS
);
83 SYSCTL_PROC(_debug
, OID_AUTO
, ntoskrnl_timers
, CTLTYPE_INT
| CTLFLAG_RW
,
84 NULL
, 0, sysctl_show_timers
, "I",
85 "Show ntoskrnl timer stats");
99 typedef struct kdpc_queue kdpc_queue
;
103 struct thread
*we_td
;
106 typedef struct wb_ext wb_ext
;
108 #define NTOSKRNL_TIMEOUTS 256
109 #ifdef NTOSKRNL_DEBUG_TIMERS
110 static uint64_t ntoskrnl_timer_fires
;
111 static uint64_t ntoskrnl_timer_sets
;
112 static uint64_t ntoskrnl_timer_reloads
;
113 static uint64_t ntoskrnl_timer_cancels
;
116 struct callout_entry
{
117 struct callout ce_callout
;
121 typedef struct callout_entry callout_entry
;
123 static struct list_entry ntoskrnl_calllist
;
124 static struct mtx ntoskrnl_calllock
;
125 struct kuser_shared_data kuser_shared_data
;
127 static struct list_entry ntoskrnl_intlist
;
128 static kspin_lock ntoskrnl_intlock
;
130 static uint8_t RtlEqualUnicodeString(unicode_string
*,
131 unicode_string
*, uint8_t);
132 static void RtlCopyString(ansi_string
*, const ansi_string
*);
133 static void RtlCopyUnicodeString(unicode_string
*,
135 static irp
*IoBuildSynchronousFsdRequest(uint32_t, device_object
*,
136 void *, uint32_t, uint64_t *, nt_kevent
*, io_status_block
*);
137 static irp
*IoBuildAsynchronousFsdRequest(uint32_t,
138 device_object
*, void *, uint32_t, uint64_t *, io_status_block
*);
139 static irp
*IoBuildDeviceIoControlRequest(uint32_t,
140 device_object
*, void *, uint32_t, void *, uint32_t,
141 uint8_t, nt_kevent
*, io_status_block
*);
142 static irp
*IoAllocateIrp(uint8_t, uint8_t);
143 static void IoReuseIrp(irp
*, uint32_t);
144 static void IoFreeIrp(irp
*);
145 static void IoInitializeIrp(irp
*, uint16_t, uint8_t);
146 static irp
*IoMakeAssociatedIrp(irp
*, uint8_t);
147 static uint32_t KeWaitForMultipleObjects(uint32_t,
148 nt_dispatch_header
**, uint32_t, uint32_t, uint32_t, uint8_t,
149 int64_t *, wait_block
*);
150 static void ntoskrnl_waittest(nt_dispatch_header
*, uint32_t);
151 static void ntoskrnl_satisfy_wait(nt_dispatch_header
*, struct thread
*);
152 static void ntoskrnl_satisfy_multiple_waits(wait_block
*);
153 static int ntoskrnl_is_signalled(nt_dispatch_header
*, struct thread
*);
154 static void ntoskrnl_insert_timer(ktimer
*, int);
155 static void ntoskrnl_remove_timer(ktimer
*);
156 #ifdef NTOSKRNL_DEBUG_TIMERS
157 static void ntoskrnl_show_timers(void);
159 static void ntoskrnl_timercall(void *);
160 static void ntoskrnl_dpc_thread(void *);
161 static void ntoskrnl_destroy_dpc_threads(void);
162 static void ntoskrnl_destroy_workitem_threads(void);
163 static void ntoskrnl_workitem_thread(void *);
164 static void ntoskrnl_workitem(device_object
*, void *);
165 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
166 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
167 static uint8_t ntoskrnl_insert_dpc(list_entry
*, kdpc
*);
168 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
169 static uint16_t READ_REGISTER_USHORT(uint16_t *);
170 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
171 static uint32_t READ_REGISTER_ULONG(uint32_t *);
172 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
173 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
174 static int64_t _allmul(int64_t, int64_t);
175 static int64_t _alldiv(int64_t, int64_t);
176 static int64_t _allrem(int64_t, int64_t);
177 static int64_t _allshr(int64_t, uint8_t);
178 static int64_t _allshl(int64_t, uint8_t);
179 static uint64_t _aullmul(uint64_t, uint64_t);
180 static uint64_t _aulldiv(uint64_t, uint64_t);
181 static uint64_t _aullrem(uint64_t, uint64_t);
182 static uint64_t _aullshr(uint64_t, uint8_t);
183 static uint64_t _aullshl(uint64_t, uint8_t);
184 static slist_entry
*ntoskrnl_pushsl(slist_header
*, slist_entry
*);
185 static void InitializeSListHead(slist_header
*);
186 static slist_entry
*ntoskrnl_popsl(slist_header
*);
187 static void ExFreePoolWithTag(void *, uint32_t);
188 static void ExInitializePagedLookasideList(paged_lookaside_list
*,
189 lookaside_alloc_func
*, lookaside_free_func
*,
190 uint32_t, size_t, uint32_t, uint16_t);
191 static void ExDeletePagedLookasideList(paged_lookaside_list
*);
192 static void ExInitializeNPagedLookasideList(npaged_lookaside_list
*,
193 lookaside_alloc_func
*, lookaside_free_func
*,
194 uint32_t, size_t, uint32_t, uint16_t);
195 static void ExDeleteNPagedLookasideList(npaged_lookaside_list
*);
197 *ExInterlockedPushEntrySList(slist_header
*,
198 slist_entry
*, kspin_lock
*);
200 *ExInterlockedPopEntrySList(slist_header
*, kspin_lock
*);
201 static uint32_t InterlockedIncrement(volatile uint32_t *);
202 static uint32_t InterlockedDecrement(volatile uint32_t *);
203 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
204 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
205 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
206 uint64_t, uint64_t, uint64_t, enum nt_caching_type
);
207 static void MmFreeContiguousMemory(void *);
208 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t,
209 enum nt_caching_type
);
210 static uint32_t MmSizeOfMdl(void *, size_t);
211 static void *MmMapLockedPages(mdl
*, uint8_t);
212 static void *MmMapLockedPagesSpecifyCache(mdl
*,
213 uint8_t, uint32_t, void *, uint32_t, uint32_t);
214 static void MmUnmapLockedPages(void *, mdl
*);
215 static device_t
ntoskrnl_finddev(device_t
, uint64_t, struct resource
**);
216 static void RtlZeroMemory(void *, size_t);
217 static void RtlSecureZeroMemory(void *, size_t);
218 static void RtlFillMemory(void *, size_t, uint8_t);
219 static void RtlMoveMemory(void *, const void *, size_t);
220 static ndis_status
RtlCharToInteger(const char *, uint32_t, uint32_t *);
221 static void RtlCopyMemory(void *, const void *, size_t);
222 static size_t RtlCompareMemory(const void *, const void *, size_t);
223 static ndis_status
RtlUnicodeStringToInteger(unicode_string
*,
224 uint32_t, uint32_t *);
225 static int atoi (const char *);
226 static long atol (const char *);
227 static int rand(void);
228 static void srand(unsigned int);
229 static void KeQuerySystemTime(uint64_t *);
230 static uint32_t KeTickCount(void);
231 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
232 static int32_t IoOpenDeviceRegistryKey(struct device_object
*, uint32_t,
234 static void ntoskrnl_thrfunc(void *);
235 static ndis_status
PsCreateSystemThread(ndis_handle
*,
236 uint32_t, void *, ndis_handle
, void *, void *, void *);
237 static ndis_status
PsTerminateSystemThread(ndis_status
);
238 static ndis_status
IoGetDeviceObjectPointer(unicode_string
*,
239 uint32_t, void *, device_object
*);
240 static ndis_status
IoGetDeviceProperty(device_object
*, uint32_t,
241 uint32_t, void *, uint32_t *);
242 static void KeInitializeMutex(kmutant
*, uint32_t);
243 static uint32_t KeReleaseMutex(kmutant
*, uint8_t);
244 static uint32_t KeReadStateMutex(kmutant
*);
245 static ndis_status
ObReferenceObjectByHandle(ndis_handle
,
246 uint32_t, void *, uint8_t, void **, void **);
247 static void ObfDereferenceObject(void *);
248 static uint32_t ZwClose(ndis_handle
);
249 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
251 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
252 static uint32_t IoWMIRegistrationControl(device_object
*, uint32_t);
253 static void *ntoskrnl_memset(void *, int, size_t);
254 static void *ntoskrnl_memmove(void *, void *, size_t);
255 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
256 static char *ntoskrnl_strstr(char *, char *);
257 static char *ntoskrnl_strncat(char *, char *, size_t);
258 static int ntoskrnl_toupper(int);
259 static int ntoskrnl_tolower(int);
260 static funcptr
ntoskrnl_findwrap(funcptr
);
261 static uint32_t DbgPrint(char *, ...);
262 static void DbgBreakPoint(void);
263 static void KeBugCheckEx(uint32_t, u_long
, u_long
, u_long
, u_long
);
264 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
265 static int32_t KeSetPriorityThread(struct thread
*, int32_t);
266 static void dummy(void);
268 static struct mtx ntoskrnl_dispatchlock
;
269 static struct mtx ntoskrnl_interlock
;
270 static kspin_lock ntoskrnl_cancellock
;
271 static int ntoskrnl_kth
= 0;
272 static struct nt_objref_head ntoskrnl_reflist
;
273 static uma_zone_t mdl_zone
;
274 static uma_zone_t iw_zone
;
275 static struct kdpc_queue
*kq_queues
;
276 static struct kdpc_queue
*wq_queues
;
277 static int wq_idx
= 0;
282 image_patch_table
*patch
;
289 mtx_init(&ntoskrnl_dispatchlock
,
290 "ntoskrnl dispatch lock", MTX_NDIS_LOCK
, MTX_DEF
|MTX_RECURSE
);
291 mtx_init(&ntoskrnl_interlock
, MTX_NTOSKRNL_SPIN_LOCK
, NULL
, MTX_SPIN
);
292 KeInitializeSpinLock(&ntoskrnl_cancellock
);
293 KeInitializeSpinLock(&ntoskrnl_intlock
);
294 TAILQ_INIT(&ntoskrnl_reflist
);
296 InitializeListHead(&ntoskrnl_calllist
);
297 InitializeListHead(&ntoskrnl_intlist
);
298 mtx_init(&ntoskrnl_calllock
, MTX_NTOSKRNL_SPIN_LOCK
, NULL
, MTX_SPIN
);
300 kq_queues
= ExAllocatePoolWithTag(NonPagedPool
,
301 #ifdef NTOSKRNL_MULTIPLE_DPCS
302 sizeof(kdpc_queue
) * mp_ncpus
, 0);
304 sizeof(kdpc_queue
), 0);
307 if (kq_queues
== NULL
)
310 wq_queues
= ExAllocatePoolWithTag(NonPagedPool
,
311 sizeof(kdpc_queue
) * WORKITEM_THREADS
, 0);
313 if (wq_queues
== NULL
)
316 #ifdef NTOSKRNL_MULTIPLE_DPCS
317 bzero((char *)kq_queues
, sizeof(kdpc_queue
) * mp_ncpus
);
319 bzero((char *)kq_queues
, sizeof(kdpc_queue
));
321 bzero((char *)wq_queues
, sizeof(kdpc_queue
) * WORKITEM_THREADS
);
324 * Launch the DPC threads.
327 #ifdef NTOSKRNL_MULTIPLE_DPCS
328 for (i
= 0; i
< mp_ncpus
; i
++) {
330 for (i
= 0; i
< 1; i
++) {
334 error
= kproc_create(ntoskrnl_dpc_thread
, kq
, &p
,
335 RFHIGHPID
, NDIS_KSTACK_PAGES
, "Windows DPC %d", i
);
337 panic("failed to launch DPC thread");
341 * Launch the workitem threads.
344 for (i
= 0; i
< WORKITEM_THREADS
; i
++) {
346 error
= kproc_create(ntoskrnl_workitem_thread
, kq
, &p
,
347 RFHIGHPID
, NDIS_KSTACK_PAGES
, "Windows Workitem %d", i
);
349 panic("failed to launch workitem thread");
352 patch
= ntoskrnl_functbl
;
353 while (patch
->ipt_func
!= NULL
) {
354 windrv_wrap((funcptr
)patch
->ipt_func
,
355 (funcptr
*)&patch
->ipt_wrap
,
356 patch
->ipt_argcnt
, patch
->ipt_ftype
);
360 for (i
= 0; i
< NTOSKRNL_TIMEOUTS
; i
++) {
361 e
= ExAllocatePoolWithTag(NonPagedPool
,
362 sizeof(callout_entry
), 0);
364 panic("failed to allocate timeouts");
365 mtx_lock_spin(&ntoskrnl_calllock
);
366 InsertHeadList((&ntoskrnl_calllist
), (&e
->ce_list
));
367 mtx_unlock_spin(&ntoskrnl_calllock
);
371 * MDLs are supposed to be variable size (they describe
372 * buffers containing some number of pages, but we don't
373 * know ahead of time how many pages that will be). But
374 * always allocating them off the heap is very slow. As
375 * a compromise, we create an MDL UMA zone big enough to
376 * handle any buffer requiring up to 16 pages, and we
377 * use those for any MDLs for buffers of 16 pages or less
378 * in size. For buffers larger than that (which we assume
379 * will be few and far between, we allocate the MDLs off
383 mdl_zone
= uma_zcreate("Windows MDL", MDL_ZONE_SIZE
,
384 NULL
, NULL
, NULL
, NULL
, UMA_ALIGN_PTR
, 0);
386 iw_zone
= uma_zcreate("Windows WorkItem", sizeof(io_workitem
),
387 NULL
, NULL
, NULL
, NULL
, UMA_ALIGN_PTR
, 0);
395 image_patch_table
*patch
;
399 patch
= ntoskrnl_functbl
;
400 while (patch
->ipt_func
!= NULL
) {
401 windrv_unwrap(patch
->ipt_wrap
);
405 /* Stop the workitem queues. */
406 ntoskrnl_destroy_workitem_threads();
407 /* Stop the DPC queues. */
408 ntoskrnl_destroy_dpc_threads();
410 ExFreePool(kq_queues
);
411 ExFreePool(wq_queues
);
413 uma_zdestroy(mdl_zone
);
414 uma_zdestroy(iw_zone
);
416 mtx_lock_spin(&ntoskrnl_calllock
);
417 while(!IsListEmpty(&ntoskrnl_calllist
)) {
418 l
= RemoveHeadList(&ntoskrnl_calllist
);
419 e
= CONTAINING_RECORD(l
, callout_entry
, ce_list
);
420 mtx_unlock_spin(&ntoskrnl_calllock
);
422 mtx_lock_spin(&ntoskrnl_calllock
);
424 mtx_unlock_spin(&ntoskrnl_calllock
);
426 mtx_destroy(&ntoskrnl_dispatchlock
);
427 mtx_destroy(&ntoskrnl_interlock
);
428 mtx_destroy(&ntoskrnl_calllock
);
434 * We need to be able to reference this externally from the wrapper;
435 * GCC only generates a local implementation of memset.
438 ntoskrnl_memset(buf
, ch
, size
)
443 return (memset(buf
, ch
, size
));
447 ntoskrnl_memmove(dst
, src
, size
)
452 bcopy(src
, dst
, size
);
457 ntoskrnl_memchr(void *buf
, unsigned char ch
, size_t len
)
460 unsigned char *p
= buf
;
465 } while (--len
!= 0);
471 ntoskrnl_strstr(s
, find
)
477 if ((c
= *find
++) != 0) {
481 if ((sc
= *s
++) == 0)
484 } while (strncmp(s
, find
, len
) != 0);
490 /* Taken from libc */
492 ntoskrnl_strncat(dst
, src
, n
)
504 if ((*d
= *s
++) == 0)
528 RtlEqualUnicodeString(unicode_string
*str1
, unicode_string
*str2
,
529 uint8_t caseinsensitive
)
533 if (str1
->us_len
!= str2
->us_len
)
536 for (i
= 0; i
< str1
->us_len
; i
++) {
537 if (caseinsensitive
== TRUE
) {
538 if (toupper((char)(str1
->us_buf
[i
] & 0xFF)) !=
539 toupper((char)(str2
->us_buf
[i
] & 0xFF)))
542 if (str1
->us_buf
[i
] != str2
->us_buf
[i
])
551 RtlCopyString(dst
, src
)
553 const ansi_string
*src
;
555 if (src
!= NULL
&& src
->as_buf
!= NULL
&& dst
->as_buf
!= NULL
) {
556 dst
->as_len
= min(src
->as_len
, dst
->as_maxlen
);
557 memcpy(dst
->as_buf
, src
->as_buf
, dst
->as_len
);
558 if (dst
->as_len
< dst
->as_maxlen
)
559 dst
->as_buf
[dst
->as_len
] = 0;
565 RtlCopyUnicodeString(dest
, src
)
566 unicode_string
*dest
;
570 if (dest
->us_maxlen
>= src
->us_len
)
571 dest
->us_len
= src
->us_len
;
573 dest
->us_len
= dest
->us_maxlen
;
574 memcpy(dest
->us_buf
, src
->us_buf
, dest
->us_len
);
578 ntoskrnl_ascii_to_unicode(ascii
, unicode
, len
)
587 for (i
= 0; i
< len
; i
++) {
588 *ustr
= (uint16_t)ascii
[i
];
594 ntoskrnl_unicode_to_ascii(unicode
, ascii
, len
)
603 for (i
= 0; i
< len
/ 2; i
++) {
604 *astr
= (uint8_t)unicode
[i
];
610 RtlUnicodeStringToAnsiString(ansi_string
*dest
, unicode_string
*src
, uint8_t allocate
)
612 if (dest
== NULL
|| src
== NULL
)
613 return (STATUS_INVALID_PARAMETER
);
615 dest
->as_len
= src
->us_len
/ 2;
616 if (dest
->as_maxlen
< dest
->as_len
)
617 dest
->as_len
= dest
->as_maxlen
;
619 if (allocate
== TRUE
) {
620 dest
->as_buf
= ExAllocatePoolWithTag(NonPagedPool
,
621 (src
->us_len
/ 2) + 1, 0);
622 if (dest
->as_buf
== NULL
)
623 return (STATUS_INSUFFICIENT_RESOURCES
);
624 dest
->as_len
= dest
->as_maxlen
= src
->us_len
/ 2;
626 dest
->as_len
= src
->us_len
/ 2; /* XXX */
627 if (dest
->as_maxlen
< dest
->as_len
)
628 dest
->as_len
= dest
->as_maxlen
;
631 ntoskrnl_unicode_to_ascii(src
->us_buf
, dest
->as_buf
,
634 return (STATUS_SUCCESS
);
638 RtlAnsiStringToUnicodeString(unicode_string
*dest
, ansi_string
*src
,
641 if (dest
== NULL
|| src
== NULL
)
642 return (STATUS_INVALID_PARAMETER
);
644 if (allocate
== TRUE
) {
645 dest
->us_buf
= ExAllocatePoolWithTag(NonPagedPool
,
647 if (dest
->us_buf
== NULL
)
648 return (STATUS_INSUFFICIENT_RESOURCES
);
649 dest
->us_len
= dest
->us_maxlen
= strlen(src
->as_buf
) * 2;
651 dest
->us_len
= src
->as_len
* 2; /* XXX */
652 if (dest
->us_maxlen
< dest
->us_len
)
653 dest
->us_len
= dest
->us_maxlen
;
656 ntoskrnl_ascii_to_unicode(src
->as_buf
, dest
->us_buf
,
659 return (STATUS_SUCCESS
);
663 ExAllocatePoolWithTag(pooltype
, len
, tag
)
670 buf
= malloc(len
, M_DEVBUF
, M_NOWAIT
|M_ZERO
);
678 ExFreePoolWithTag(buf
, tag
)
693 IoAllocateDriverObjectExtension(drv
, clid
, extlen
, ext
)
699 custom_extension
*ce
;
701 ce
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(custom_extension
)
705 return (STATUS_INSUFFICIENT_RESOURCES
);
708 InsertTailList((&drv
->dro_driverext
->dre_usrext
), (&ce
->ce_list
));
710 *ext
= (void *)(ce
+ 1);
712 return (STATUS_SUCCESS
);
716 IoGetDriverObjectExtension(drv
, clid
)
721 custom_extension
*ce
;
724 * Sanity check. Our dummy bus drivers don't have
725 * any driver extensions.
728 if (drv
->dro_driverext
== NULL
)
731 e
= drv
->dro_driverext
->dre_usrext
.nle_flink
;
732 while (e
!= &drv
->dro_driverext
->dre_usrext
) {
733 ce
= (custom_extension
*)e
;
734 if (ce
->ce_clid
== clid
)
735 return ((void *)(ce
+ 1));
744 IoCreateDevice(driver_object
*drv
, uint32_t devextlen
, unicode_string
*devname
,
745 uint32_t devtype
, uint32_t devchars
, uint8_t exclusive
,
746 device_object
**newdev
)
750 dev
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(device_object
), 0);
752 return (STATUS_INSUFFICIENT_RESOURCES
);
754 dev
->do_type
= devtype
;
755 dev
->do_drvobj
= drv
;
756 dev
->do_currirp
= NULL
;
760 dev
->do_devext
= ExAllocatePoolWithTag(NonPagedPool
,
763 if (dev
->do_devext
== NULL
) {
765 return (STATUS_INSUFFICIENT_RESOURCES
);
768 bzero(dev
->do_devext
, devextlen
);
770 dev
->do_devext
= NULL
;
772 dev
->do_size
= sizeof(device_object
) + devextlen
;
774 dev
->do_attacheddev
= NULL
;
775 dev
->do_nextdev
= NULL
;
776 dev
->do_devtype
= devtype
;
777 dev
->do_stacksize
= 1;
778 dev
->do_alignreq
= 1;
779 dev
->do_characteristics
= devchars
;
780 dev
->do_iotimer
= NULL
;
781 KeInitializeEvent(&dev
->do_devlock
, EVENT_TYPE_SYNC
, TRUE
);
784 * Vpd is used for disk/tape devices,
785 * but we don't support those. (Yet.)
789 dev
->do_devobj_ext
= ExAllocatePoolWithTag(NonPagedPool
,
790 sizeof(devobj_extension
), 0);
792 if (dev
->do_devobj_ext
== NULL
) {
793 if (dev
->do_devext
!= NULL
)
794 ExFreePool(dev
->do_devext
);
796 return (STATUS_INSUFFICIENT_RESOURCES
);
799 dev
->do_devobj_ext
->dve_type
= 0;
800 dev
->do_devobj_ext
->dve_size
= sizeof(devobj_extension
);
801 dev
->do_devobj_ext
->dve_devobj
= dev
;
804 * Attach this device to the driver object's list
805 * of devices. Note: this is not the same as attaching
806 * the device to the device stack. The driver's AddDevice
807 * routine must explicitly call IoAddDeviceToDeviceStack()
811 if (drv
->dro_devobj
== NULL
) {
812 drv
->dro_devobj
= dev
;
813 dev
->do_nextdev
= NULL
;
815 dev
->do_nextdev
= drv
->dro_devobj
;
816 drv
->dro_devobj
= dev
;
821 return (STATUS_SUCCESS
);
833 if (dev
->do_devobj_ext
!= NULL
)
834 ExFreePool(dev
->do_devobj_ext
);
836 if (dev
->do_devext
!= NULL
)
837 ExFreePool(dev
->do_devext
);
839 /* Unlink the device from the driver's device list. */
841 prev
= dev
->do_drvobj
->dro_devobj
;
843 dev
->do_drvobj
->dro_devobj
= dev
->do_nextdev
;
845 while (prev
->do_nextdev
!= dev
)
846 prev
= prev
->do_nextdev
;
847 prev
->do_nextdev
= dev
->do_nextdev
;
854 IoGetAttachedDevice(dev
)
864 while (d
->do_attacheddev
!= NULL
)
865 d
= d
->do_attacheddev
;
871 IoBuildSynchronousFsdRequest(func
, dobj
, buf
, len
, off
, event
, status
)
878 io_status_block
*status
;
882 ip
= IoBuildAsynchronousFsdRequest(func
, dobj
, buf
, len
, off
, status
);
885 ip
->irp_usrevent
= event
;
891 IoBuildAsynchronousFsdRequest(func
, dobj
, buf
, len
, off
, status
)
897 io_status_block
*status
;
900 io_stack_location
*sl
;
902 ip
= IoAllocateIrp(dobj
->do_stacksize
, TRUE
);
906 ip
->irp_usriostat
= status
;
907 ip
->irp_tail
.irp_overlay
.irp_thread
= NULL
;
909 sl
= IoGetNextIrpStackLocation(ip
);
910 sl
->isl_major
= func
;
914 sl
->isl_devobj
= dobj
;
915 sl
->isl_fileobj
= NULL
;
916 sl
->isl_completionfunc
= NULL
;
918 ip
->irp_userbuf
= buf
;
920 if (dobj
->do_flags
& DO_BUFFERED_IO
) {
921 ip
->irp_assoc
.irp_sysbuf
=
922 ExAllocatePoolWithTag(NonPagedPool
, len
, 0);
923 if (ip
->irp_assoc
.irp_sysbuf
== NULL
) {
927 bcopy(buf
, ip
->irp_assoc
.irp_sysbuf
, len
);
930 if (dobj
->do_flags
& DO_DIRECT_IO
) {
931 ip
->irp_mdl
= IoAllocateMdl(buf
, len
, FALSE
, FALSE
, ip
);
932 if (ip
->irp_mdl
== NULL
) {
933 if (ip
->irp_assoc
.irp_sysbuf
!= NULL
)
934 ExFreePool(ip
->irp_assoc
.irp_sysbuf
);
938 ip
->irp_userbuf
= NULL
;
939 ip
->irp_assoc
.irp_sysbuf
= NULL
;
942 if (func
== IRP_MJ_READ
) {
943 sl
->isl_parameters
.isl_read
.isl_len
= len
;
945 sl
->isl_parameters
.isl_read
.isl_byteoff
= *off
;
947 sl
->isl_parameters
.isl_read
.isl_byteoff
= 0;
950 if (func
== IRP_MJ_WRITE
) {
951 sl
->isl_parameters
.isl_write
.isl_len
= len
;
953 sl
->isl_parameters
.isl_write
.isl_byteoff
= *off
;
955 sl
->isl_parameters
.isl_write
.isl_byteoff
= 0;
962 IoBuildDeviceIoControlRequest(uint32_t iocode
, device_object
*dobj
, void *ibuf
,
963 uint32_t ilen
, void *obuf
, uint32_t olen
, uint8_t isinternal
,
964 nt_kevent
*event
, io_status_block
*status
)
967 io_stack_location
*sl
;
970 ip
= IoAllocateIrp(dobj
->do_stacksize
, TRUE
);
973 ip
->irp_usrevent
= event
;
974 ip
->irp_usriostat
= status
;
975 ip
->irp_tail
.irp_overlay
.irp_thread
= NULL
;
977 sl
= IoGetNextIrpStackLocation(ip
);
978 sl
->isl_major
= isinternal
== TRUE
?
979 IRP_MJ_INTERNAL_DEVICE_CONTROL
: IRP_MJ_DEVICE_CONTROL
;
983 sl
->isl_devobj
= dobj
;
984 sl
->isl_fileobj
= NULL
;
985 sl
->isl_completionfunc
= NULL
;
986 sl
->isl_parameters
.isl_ioctl
.isl_iocode
= iocode
;
987 sl
->isl_parameters
.isl_ioctl
.isl_ibuflen
= ilen
;
988 sl
->isl_parameters
.isl_ioctl
.isl_obuflen
= olen
;
990 switch(IO_METHOD(iocode
)) {
991 case METHOD_BUFFERED
:
997 ip
->irp_assoc
.irp_sysbuf
=
998 ExAllocatePoolWithTag(NonPagedPool
, buflen
, 0);
999 if (ip
->irp_assoc
.irp_sysbuf
== NULL
) {
1004 if (ilen
&& ibuf
!= NULL
) {
1005 bcopy(ibuf
, ip
->irp_assoc
.irp_sysbuf
, ilen
);
1006 bzero((char *)ip
->irp_assoc
.irp_sysbuf
+ ilen
,
1009 bzero(ip
->irp_assoc
.irp_sysbuf
, ilen
);
1010 ip
->irp_userbuf
= obuf
;
1012 case METHOD_IN_DIRECT
:
1013 case METHOD_OUT_DIRECT
:
1014 if (ilen
&& ibuf
!= NULL
) {
1015 ip
->irp_assoc
.irp_sysbuf
=
1016 ExAllocatePoolWithTag(NonPagedPool
, ilen
, 0);
1017 if (ip
->irp_assoc
.irp_sysbuf
== NULL
) {
1021 bcopy(ibuf
, ip
->irp_assoc
.irp_sysbuf
, ilen
);
1023 if (olen
&& obuf
!= NULL
) {
1024 ip
->irp_mdl
= IoAllocateMdl(obuf
, olen
,
1027 * Normally we would MmProbeAndLockPages()
1028 * here, but we don't have to in our
1033 case METHOD_NEITHER
:
1034 ip
->irp_userbuf
= obuf
;
1035 sl
->isl_parameters
.isl_ioctl
.isl_type3ibuf
= ibuf
;
1042 * Ideally, we should associate this IRP with the calling
1050 IoAllocateIrp(uint8_t stsize
, uint8_t chargequota
)
1054 i
= ExAllocatePoolWithTag(NonPagedPool
, IoSizeOfIrp(stsize
), 0);
1058 IoInitializeIrp(i
, IoSizeOfIrp(stsize
), stsize
);
1064 IoMakeAssociatedIrp(irp
*ip
, uint8_t stsize
)
1068 associrp
= IoAllocateIrp(stsize
, FALSE
);
1069 if (associrp
== NULL
)
1072 mtx_lock(&ntoskrnl_dispatchlock
);
1073 associrp
->irp_flags
|= IRP_ASSOCIATED_IRP
;
1074 associrp
->irp_tail
.irp_overlay
.irp_thread
=
1075 ip
->irp_tail
.irp_overlay
.irp_thread
;
1076 associrp
->irp_assoc
.irp_master
= ip
;
1077 mtx_unlock(&ntoskrnl_dispatchlock
);
1090 IoInitializeIrp(irp
*io
, uint16_t psize
, uint8_t ssize
)
1092 bzero((char *)io
, IoSizeOfIrp(ssize
));
1093 io
->irp_size
= psize
;
1094 io
->irp_stackcnt
= ssize
;
1095 io
->irp_currentstackloc
= ssize
;
1096 InitializeListHead(&io
->irp_thlist
);
1097 io
->irp_tail
.irp_overlay
.irp_csl
=
1098 (io_stack_location
*)(io
+ 1) + ssize
;
1102 IoReuseIrp(ip
, status
)
1108 allocflags
= ip
->irp_allocflags
;
1109 IoInitializeIrp(ip
, ip
->irp_size
, ip
->irp_stackcnt
);
1110 ip
->irp_iostat
.isb_status
= status
;
1111 ip
->irp_allocflags
= allocflags
;
1115 IoAcquireCancelSpinLock(uint8_t *irql
)
1117 KeAcquireSpinLock(&ntoskrnl_cancellock
, irql
);
1121 IoReleaseCancelSpinLock(uint8_t irql
)
1123 KeReleaseSpinLock(&ntoskrnl_cancellock
, irql
);
1127 IoCancelIrp(irp
*ip
)
1132 IoAcquireCancelSpinLock(&cancelirql
);
1133 cfunc
= IoSetCancelRoutine(ip
, NULL
);
1134 ip
->irp_cancel
= TRUE
;
1135 if (cfunc
== NULL
) {
1136 IoReleaseCancelSpinLock(cancelirql
);
1139 ip
->irp_cancelirql
= cancelirql
;
1140 MSCALL2(cfunc
, IoGetCurrentIrpStackLocation(ip
)->isl_devobj
, ip
);
1141 return (uint8_t)IoSetCancelValue(ip
, TRUE
);
1145 IofCallDriver(dobj
, ip
)
1146 device_object
*dobj
;
1149 driver_object
*drvobj
;
1150 io_stack_location
*sl
;
1152 driver_dispatch disp
;
1154 drvobj
= dobj
->do_drvobj
;
1156 if (ip
->irp_currentstackloc
<= 0)
1157 panic("IoCallDriver(): out of stack locations");
1159 IoSetNextIrpStackLocation(ip
);
1160 sl
= IoGetCurrentIrpStackLocation(ip
);
1162 sl
->isl_devobj
= dobj
;
1164 disp
= drvobj
->dro_dispatch
[sl
->isl_major
];
1165 status
= MSCALL2(disp
, dobj
, ip
);
1171 IofCompleteRequest(irp
*ip
, uint8_t prioboost
)
1174 device_object
*dobj
;
1175 io_stack_location
*sl
;
1178 KASSERT(ip
->irp_iostat
.isb_status
!= STATUS_PENDING
,
1179 ("incorrect IRP(%p) status (STATUS_PENDING)", ip
));
1181 sl
= IoGetCurrentIrpStackLocation(ip
);
1182 IoSkipCurrentIrpStackLocation(ip
);
1185 if (sl
->isl_ctl
& SL_PENDING_RETURNED
)
1186 ip
->irp_pendingreturned
= TRUE
;
1188 if (ip
->irp_currentstackloc
!= (ip
->irp_stackcnt
+ 1))
1189 dobj
= IoGetCurrentIrpStackLocation(ip
)->isl_devobj
;
1193 if (sl
->isl_completionfunc
!= NULL
&&
1194 ((ip
->irp_iostat
.isb_status
== STATUS_SUCCESS
&&
1195 sl
->isl_ctl
& SL_INVOKE_ON_SUCCESS
) ||
1196 (ip
->irp_iostat
.isb_status
!= STATUS_SUCCESS
&&
1197 sl
->isl_ctl
& SL_INVOKE_ON_ERROR
) ||
1198 (ip
->irp_cancel
== TRUE
&&
1199 sl
->isl_ctl
& SL_INVOKE_ON_CANCEL
))) {
1200 cf
= sl
->isl_completionfunc
;
1201 status
= MSCALL3(cf
, dobj
, ip
, sl
->isl_completionctx
);
1202 if (status
== STATUS_MORE_PROCESSING_REQUIRED
)
1205 if ((ip
->irp_currentstackloc
<= ip
->irp_stackcnt
) &&
1206 (ip
->irp_pendingreturned
== TRUE
))
1207 IoMarkIrpPending(ip
);
1210 /* move to the next. */
1211 IoSkipCurrentIrpStackLocation(ip
);
1213 } while (ip
->irp_currentstackloc
<= (ip
->irp_stackcnt
+ 1));
1215 if (ip
->irp_usriostat
!= NULL
)
1216 *ip
->irp_usriostat
= ip
->irp_iostat
;
1217 if (ip
->irp_usrevent
!= NULL
)
1218 KeSetEvent(ip
->irp_usrevent
, prioboost
, FALSE
);
1220 /* Handle any associated IRPs. */
1222 if (ip
->irp_flags
& IRP_ASSOCIATED_IRP
) {
1223 uint32_t masterirpcnt
;
1227 masterirp
= ip
->irp_assoc
.irp_master
;
1229 InterlockedDecrement(&masterirp
->irp_assoc
.irp_irpcnt
);
1231 while ((m
= ip
->irp_mdl
) != NULL
) {
1232 ip
->irp_mdl
= m
->mdl_next
;
1236 if (masterirpcnt
== 0)
1237 IoCompleteRequest(masterirp
, IO_NO_INCREMENT
);
1241 /* With any luck, these conditions will never arise. */
1243 if (ip
->irp_flags
& IRP_PAGING_IO
) {
1244 if (ip
->irp_mdl
!= NULL
)
1245 IoFreeMdl(ip
->irp_mdl
);
1259 KeAcquireSpinLock(&ntoskrnl_intlock
, &irql
);
1260 l
= ntoskrnl_intlist
.nle_flink
;
1261 while (l
!= &ntoskrnl_intlist
) {
1262 iobj
= CONTAINING_RECORD(l
, kinterrupt
, ki_list
);
1263 claimed
= MSCALL2(iobj
->ki_svcfunc
, iobj
, iobj
->ki_svcctx
);
1264 if (claimed
== TRUE
)
1268 KeReleaseSpinLock(&ntoskrnl_intlock
, irql
);
1272 KeAcquireInterruptSpinLock(iobj
)
1276 KeAcquireSpinLock(&ntoskrnl_intlock
, &irql
);
1281 KeReleaseInterruptSpinLock(kinterrupt
*iobj
, uint8_t irql
)
1283 KeReleaseSpinLock(&ntoskrnl_intlock
, irql
);
1287 KeSynchronizeExecution(iobj
, syncfunc
, syncctx
)
1294 KeAcquireSpinLock(&ntoskrnl_intlock
, &irql
);
1295 MSCALL1(syncfunc
, syncctx
);
1296 KeReleaseSpinLock(&ntoskrnl_intlock
, irql
);
1302 * IoConnectInterrupt() is passed only the interrupt vector and
1303 * irql that a device wants to use, but no device-specific tag
1304 * of any kind. This conflicts rather badly with FreeBSD's
1305 * bus_setup_intr(), which needs the device_t for the device
1306 * requesting interrupt delivery. In order to bypass this
1307 * inconsistency, we implement a second level of interrupt
1308 * dispatching on top of bus_setup_intr(). All devices use
1309 * ntoskrnl_intr() as their ISR, and any device requesting
1310 * interrupts will be registered with ntoskrnl_intr()'s interrupt
1311 * dispatch list. When an interrupt arrives, we walk the list
1312 * and invoke all the registered ISRs. This effectively makes all
1313 * interrupts shared, but it's the only way to duplicate the
1314 * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1318 IoConnectInterrupt(kinterrupt
**iobj
, void *svcfunc
, void *svcctx
,
1319 kspin_lock
*lock
, uint32_t vector
, uint8_t irql
, uint8_t syncirql
,
1320 uint8_t imode
, uint8_t shared
, uint32_t affinity
, uint8_t savefloat
)
1324 *iobj
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(kinterrupt
), 0);
1326 return (STATUS_INSUFFICIENT_RESOURCES
);
1328 (*iobj
)->ki_svcfunc
= svcfunc
;
1329 (*iobj
)->ki_svcctx
= svcctx
;
1332 KeInitializeSpinLock(&(*iobj
)->ki_lock_priv
);
1333 (*iobj
)->ki_lock
= &(*iobj
)->ki_lock_priv
;
1335 (*iobj
)->ki_lock
= lock
;
1337 KeAcquireSpinLock(&ntoskrnl_intlock
, &curirql
);
1338 InsertHeadList((&ntoskrnl_intlist
), (&(*iobj
)->ki_list
));
1339 KeReleaseSpinLock(&ntoskrnl_intlock
, curirql
);
1341 return (STATUS_SUCCESS
);
1345 IoDisconnectInterrupt(iobj
)
1353 KeAcquireSpinLock(&ntoskrnl_intlock
, &irql
);
1354 RemoveEntryList((&iobj
->ki_list
));
1355 KeReleaseSpinLock(&ntoskrnl_intlock
, irql
);
1361 IoAttachDeviceToDeviceStack(src
, dst
)
1365 device_object
*attached
;
1367 mtx_lock(&ntoskrnl_dispatchlock
);
1368 attached
= IoGetAttachedDevice(dst
);
1369 attached
->do_attacheddev
= src
;
1370 src
->do_attacheddev
= NULL
;
1371 src
->do_stacksize
= attached
->do_stacksize
+ 1;
1372 mtx_unlock(&ntoskrnl_dispatchlock
);
1378 IoDetachDevice(topdev
)
1379 device_object
*topdev
;
1381 device_object
*tail
;
1383 mtx_lock(&ntoskrnl_dispatchlock
);
1385 /* First, break the chain. */
1386 tail
= topdev
->do_attacheddev
;
1388 mtx_unlock(&ntoskrnl_dispatchlock
);
1391 topdev
->do_attacheddev
= tail
->do_attacheddev
;
1392 topdev
->do_refcnt
--;
1394 /* Now reduce the stacksize count for the takm_il objects. */
1396 tail
= topdev
->do_attacheddev
;
1397 while (tail
!= NULL
) {
1398 tail
->do_stacksize
--;
1399 tail
= tail
->do_attacheddev
;
1402 mtx_unlock(&ntoskrnl_dispatchlock
);
1406 * For the most part, an object is considered signalled if
1407 * dh_sigstate == TRUE. The exception is for mutant objects
1408 * (mutexes), where the logic works like this:
1410 * - If the thread already owns the object and sigstate is
1411 * less than or equal to 0, then the object is considered
1412 * signalled (recursive acquisition).
1413 * - If dh_sigstate == 1, the object is also considered
1418 ntoskrnl_is_signalled(obj
, td
)
1419 nt_dispatch_header
*obj
;
1424 if (obj
->dh_type
== DISP_TYPE_MUTANT
) {
1425 km
= (kmutant
*)obj
;
1426 if ((obj
->dh_sigstate
<= 0 && km
->km_ownerthread
== td
) ||
1427 obj
->dh_sigstate
== 1)
1432 if (obj
->dh_sigstate
> 0)
1438 ntoskrnl_satisfy_wait(obj
, td
)
1439 nt_dispatch_header
*obj
;
1444 switch (obj
->dh_type
) {
1445 case DISP_TYPE_MUTANT
:
1446 km
= (struct kmutant
*)obj
;
1449 * If sigstate reaches 0, the mutex is now
1450 * non-signalled (the new thread owns it).
1452 if (obj
->dh_sigstate
== 0) {
1453 km
->km_ownerthread
= td
;
1454 if (km
->km_abandoned
== TRUE
)
1455 km
->km_abandoned
= FALSE
;
1458 /* Synchronization objects get reset to unsignalled. */
1459 case DISP_TYPE_SYNCHRONIZATION_EVENT
:
1460 case DISP_TYPE_SYNCHRONIZATION_TIMER
:
1461 obj
->dh_sigstate
= 0;
1463 case DISP_TYPE_SEMAPHORE
:
1472 ntoskrnl_satisfy_multiple_waits(wb
)
1479 td
= wb
->wb_kthread
;
1482 ntoskrnl_satisfy_wait(wb
->wb_object
, td
);
1483 cur
->wb_awakened
= TRUE
;
1485 } while (cur
!= wb
);
1488 /* Always called with dispatcher lock held. */
1490 ntoskrnl_waittest(obj
, increment
)
1491 nt_dispatch_header
*obj
;
1494 wait_block
*w
, *next
;
1501 * Once an object has been signalled, we walk its list of
1502 * wait blocks. If a wait block can be awakened, then satisfy
1503 * waits as necessary and wake the thread.
1505 * The rules work like this:
1507 * If a wait block is marked as WAITTYPE_ANY, then
1508 * we can satisfy the wait conditions on the current
1509 * object and wake the thread right away. Satisfying
1510 * the wait also has the effect of breaking us out
1511 * of the search loop.
1513 * If the object is marked as WAITTYLE_ALL, then the
1514 * wait block will be part of a circularly linked
1515 * list of wait blocks belonging to a waiting thread
1516 * that's sleeping in KeWaitForMultipleObjects(). In
1517 * order to wake the thread, all the objects in the
1518 * wait list must be in the signalled state. If they
1519 * are, we then satisfy all of them and wake the
1524 e
= obj
->dh_waitlisthead
.nle_flink
;
1526 while (e
!= &obj
->dh_waitlisthead
&& obj
->dh_sigstate
> 0) {
1527 w
= CONTAINING_RECORD(e
, wait_block
, wb_waitlist
);
1531 if (w
->wb_waittype
== WAITTYPE_ANY
) {
1533 * Thread can be awakened if
1534 * any wait is satisfied.
1536 ntoskrnl_satisfy_wait(obj
, td
);
1538 w
->wb_awakened
= TRUE
;
1541 * Thread can only be woken up
1542 * if all waits are satisfied.
1543 * If the thread is waiting on multiple
1544 * objects, they should all be linked
1545 * through the wb_next pointers in the
1551 if (ntoskrnl_is_signalled(obj
, td
) == FALSE
) {
1555 next
= next
->wb_next
;
1557 ntoskrnl_satisfy_multiple_waits(w
);
1560 if (satisfied
== TRUE
)
1561 cv_broadcastpri(&we
->we_cv
,
1562 (w
->wb_oldpri
- (increment
* 4)) > PRI_MIN_KERN
?
1563 w
->wb_oldpri
- (increment
* 4) : PRI_MIN_KERN
);
1570 * Return the number of 100 nanosecond intervals since
1571 * January 1, 1601. (?!?!)
1580 *tval
= (uint64_t)ts
.tv_nsec
/ 100 + (uint64_t)ts
.tv_sec
* 10000000 +
1581 11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1585 KeQuerySystemTime(current_time
)
1586 uint64_t *current_time
;
1588 ntoskrnl_time(current_time
);
1595 getmicrouptime(&tv
);
1601 * KeWaitForSingleObject() is a tricky beast, because it can be used
1602 * with several different object types: semaphores, timers, events,
1603 * mutexes and threads. Semaphores don't appear very often, but the
1604 * other object types are quite common. KeWaitForSingleObject() is
1605 * what's normally used to acquire a mutex, and it can be used to
1606 * wait for a thread termination.
1608 * The Windows NDIS API is implemented in terms of Windows kernel
1609 * primitives, and some of the object manipulation is duplicated in
1610 * NDIS. For example, NDIS has timers and events, which are actually
1611 * Windows kevents and ktimers. Now, you're supposed to only use the
1612 * NDIS variants of these objects within the confines of the NDIS API,
1613 * but there are some naughty developers out there who will use
1614 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1615 * have to support that as well. Conseqently, our NDIS timer and event
1616 * code has to be closely tied into our ntoskrnl timer and event code,
1617 * just as it is in Windows.
1619 * KeWaitForSingleObject() may do different things for different kinds
1622 * - For events, we check if the event has been signalled. If the
1623 * event is already in the signalled state, we just return immediately,
1624 * otherwise we wait for it to be set to the signalled state by someone
1625 * else calling KeSetEvent(). Events can be either synchronization or
1626 * notification events.
1628 * - For timers, if the timer has already fired and the timer is in
1629 * the signalled state, we just return, otherwise we wait on the
1630 * timer. Unlike an event, timers get signalled automatically when
1631 * they expire rather than someone having to trip them manually.
1632 * Timers initialized with KeInitializeTimer() are always notification
1633 * events: KeInitializeTimerEx() lets you initialize a timer as
1634 * either a notification or synchronization event.
1636 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1637 * on the mutex until it's available and then grab it. When a mutex is
1638 * released, it enters the signalled state, which wakes up one of the
1639 * threads waiting to acquire it. Mutexes are always synchronization
1642 * - For threads, the only thing we do is wait until the thread object
1643 * enters a signalled state, which occurs when the thread terminates.
1644 * Threads are always notification events.
1646 * A notification event wakes up all threads waiting on an object. A
1647 * synchronization event wakes up just one. Also, a synchronization event
1648 * is auto-clearing, which means we automatically set the event back to
1649 * the non-signalled state once the wakeup is done.
1653 KeWaitForSingleObject(void *arg
, uint32_t reason
, uint32_t mode
,
1654 uint8_t alertable
, int64_t *duetime
)
1657 struct thread
*td
= curthread
;
1662 nt_dispatch_header
*obj
;
1667 return (STATUS_INVALID_PARAMETER
);
1669 mtx_lock(&ntoskrnl_dispatchlock
);
1671 cv_init(&we
.we_cv
, "KeWFS");
1675 * Check to see if this object is already signalled,
1676 * and just return without waiting if it is.
1678 if (ntoskrnl_is_signalled(obj
, td
) == TRUE
) {
1679 /* Sanity check the signal state value. */
1680 if (obj
->dh_sigstate
!= INT32_MIN
) {
1681 ntoskrnl_satisfy_wait(obj
, curthread
);
1682 mtx_unlock(&ntoskrnl_dispatchlock
);
1683 return (STATUS_SUCCESS
);
1686 * There's a limit to how many times we can
1687 * recursively acquire a mutant. If we hit
1688 * the limit, something is very wrong.
1690 if (obj
->dh_type
== DISP_TYPE_MUTANT
) {
1691 mtx_unlock(&ntoskrnl_dispatchlock
);
1692 panic("mutant limit exceeded");
1697 bzero((char *)&w
, sizeof(wait_block
));
1700 w
.wb_waittype
= WAITTYPE_ANY
;
1703 w
.wb_awakened
= FALSE
;
1704 w
.wb_oldpri
= td
->td_priority
;
1706 InsertTailList((&obj
->dh_waitlisthead
), (&w
.wb_waitlist
));
1709 * The timeout value is specified in 100 nanosecond units
1710 * and can be a positive or negative number. If it's positive,
1711 * then the duetime is absolute, and we need to convert it
1712 * to an absolute offset relative to now in order to use it.
1713 * If it's negative, then the duetime is relative and we
1714 * just have to convert the units.
1717 if (duetime
!= NULL
) {
1719 tv
.tv_sec
= - (*duetime
) / 10000000;
1720 tv
.tv_usec
= (- (*duetime
) / 10) -
1721 (tv
.tv_sec
* 1000000);
1723 ntoskrnl_time(&curtime
);
1724 if (*duetime
< curtime
)
1725 tv
.tv_sec
= tv
.tv_usec
= 0;
1727 tv
.tv_sec
= ((*duetime
) - curtime
) / 10000000;
1728 tv
.tv_usec
= ((*duetime
) - curtime
) / 10 -
1729 (tv
.tv_sec
* 1000000);
1734 if (duetime
== NULL
)
1735 cv_wait(&we
.we_cv
, &ntoskrnl_dispatchlock
);
1737 error
= cv_timedwait(&we
.we_cv
,
1738 &ntoskrnl_dispatchlock
, tvtohz(&tv
));
1740 RemoveEntryList(&w
.wb_waitlist
);
1742 cv_destroy(&we
.we_cv
);
1744 /* We timed out. Leave the object alone and return status. */
1746 if (error
== EWOULDBLOCK
) {
1747 mtx_unlock(&ntoskrnl_dispatchlock
);
1748 return (STATUS_TIMEOUT
);
1751 mtx_unlock(&ntoskrnl_dispatchlock
);
1753 return (STATUS_SUCCESS
);
1755 return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1756 mode, alertable, duetime, &w));
1761 KeWaitForMultipleObjects(uint32_t cnt
, nt_dispatch_header
*obj
[], uint32_t wtype
,
1762 uint32_t reason
, uint32_t mode
, uint8_t alertable
, int64_t *duetime
,
1763 wait_block
*wb_array
)
1765 struct thread
*td
= curthread
;
1766 wait_block
*whead
, *w
;
1767 wait_block _wb_array
[MAX_WAIT_OBJECTS
];
1768 nt_dispatch_header
*cur
;
1770 int i
, wcnt
= 0, error
= 0;
1772 struct timespec t1
, t2
;
1773 uint32_t status
= STATUS_SUCCESS
;
1776 if (cnt
> MAX_WAIT_OBJECTS
)
1777 return (STATUS_INVALID_PARAMETER
);
1778 if (cnt
> THREAD_WAIT_OBJECTS
&& wb_array
== NULL
)
1779 return (STATUS_INVALID_PARAMETER
);
1781 mtx_lock(&ntoskrnl_dispatchlock
);
1783 cv_init(&we
.we_cv
, "KeWFM");
1786 if (wb_array
== NULL
)
1791 bzero((char *)whead
, sizeof(wait_block
) * cnt
);
1793 /* First pass: see if we can satisfy any waits immediately. */
1798 for (i
= 0; i
< cnt
; i
++) {
1799 InsertTailList((&obj
[i
]->dh_waitlisthead
),
1802 w
->wb_object
= obj
[i
];
1803 w
->wb_waittype
= wtype
;
1805 w
->wb_awakened
= FALSE
;
1806 w
->wb_oldpri
= td
->td_priority
;
1810 if (ntoskrnl_is_signalled(obj
[i
], td
)) {
1812 * There's a limit to how many times
1813 * we can recursively acquire a mutant.
1814 * If we hit the limit, something
1817 if (obj
[i
]->dh_sigstate
== INT32_MIN
&&
1818 obj
[i
]->dh_type
== DISP_TYPE_MUTANT
) {
1819 mtx_unlock(&ntoskrnl_dispatchlock
);
1820 panic("mutant limit exceeded");
1824 * If this is a WAITTYPE_ANY wait, then
1825 * satisfy the waited object and exit
1829 if (wtype
== WAITTYPE_ANY
) {
1830 ntoskrnl_satisfy_wait(obj
[i
], td
);
1831 status
= STATUS_WAIT_0
+ i
;
1836 w
->wb_object
= NULL
;
1837 RemoveEntryList(&w
->wb_waitlist
);
1843 * If this is a WAITTYPE_ALL wait and all objects are
1844 * already signalled, satisfy the waits and exit now.
1847 if (wtype
== WAITTYPE_ALL
&& wcnt
== 0) {
1848 for (i
= 0; i
< cnt
; i
++)
1849 ntoskrnl_satisfy_wait(obj
[i
], td
);
1850 status
= STATUS_SUCCESS
;
1855 * Create a circular waitblock list. The waitcount
1856 * must always be non-zero when we get here.
1859 (w
- 1)->wb_next
= whead
;
1861 /* Wait on any objects that aren't yet signalled. */
1863 /* Calculate timeout, if any. */
1865 if (duetime
!= NULL
) {
1867 tv
.tv_sec
= - (*duetime
) / 10000000;
1868 tv
.tv_usec
= (- (*duetime
) / 10) -
1869 (tv
.tv_sec
* 1000000);
1871 ntoskrnl_time(&curtime
);
1872 if (*duetime
< curtime
)
1873 tv
.tv_sec
= tv
.tv_usec
= 0;
1875 tv
.tv_sec
= ((*duetime
) - curtime
) / 10000000;
1876 tv
.tv_usec
= ((*duetime
) - curtime
) / 10 -
1877 (tv
.tv_sec
* 1000000);
1885 if (duetime
== NULL
)
1886 cv_wait(&we
.we_cv
, &ntoskrnl_dispatchlock
);
1888 error
= cv_timedwait(&we
.we_cv
,
1889 &ntoskrnl_dispatchlock
, tvtohz(&tv
));
1891 /* Wait with timeout expired. */
1894 status
= STATUS_TIMEOUT
;
1900 /* See what's been signalled. */
1905 if (ntoskrnl_is_signalled(cur
, td
) == TRUE
||
1906 w
->wb_awakened
== TRUE
) {
1907 /* Sanity check the signal state value. */
1908 if (cur
->dh_sigstate
== INT32_MIN
&&
1909 cur
->dh_type
== DISP_TYPE_MUTANT
) {
1910 mtx_unlock(&ntoskrnl_dispatchlock
);
1911 panic("mutant limit exceeded");
1914 if (wtype
== WAITTYPE_ANY
) {
1915 status
= w
->wb_waitkey
&
1921 } while (w
!= whead
);
1924 * If all objects have been signalled, or if this
1925 * is a WAITTYPE_ANY wait and we were woke up by
1926 * someone, we can bail.
1930 status
= STATUS_SUCCESS
;
1935 * If this is WAITTYPE_ALL wait, and there's still
1936 * objects that haven't been signalled, deduct the
1937 * time that's elapsed so far from the timeout and
1938 * wait again (or continue waiting indefinitely if
1939 * there's no timeout).
1942 if (duetime
!= NULL
) {
1943 tv
.tv_sec
-= (t2
.tv_sec
- t1
.tv_sec
);
1944 tv
.tv_usec
-= (t2
.tv_nsec
- t1
.tv_nsec
) / 1000;
1951 cv_destroy(&we
.we_cv
);
1953 for (i
= 0; i
< cnt
; i
++) {
1954 if (whead
[i
].wb_object
!= NULL
)
1955 RemoveEntryList(&whead
[i
].wb_waitlist
);
1958 mtx_unlock(&ntoskrnl_dispatchlock
);
1964 WRITE_REGISTER_USHORT(uint16_t *reg
, uint16_t val
)
1966 bus_space_write_2(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
, val
);
1970 READ_REGISTER_USHORT(reg
)
1973 return (bus_space_read_2(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
));
1977 WRITE_REGISTER_ULONG(reg
, val
)
1981 bus_space_write_4(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
, val
);
1985 READ_REGISTER_ULONG(reg
)
1988 return (bus_space_read_4(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
));
1992 READ_REGISTER_UCHAR(uint8_t *reg
)
1994 return (bus_space_read_1(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
));
1998 WRITE_REGISTER_UCHAR(uint8_t *reg
, uint8_t val
)
2000 bus_space_write_1(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
, val
);
2052 _allshl(int64_t a
, uint8_t b
)
2058 _aullshl(uint64_t a
, uint8_t b
)
2064 _allshr(int64_t a
, uint8_t b
)
2070 _aullshr(uint64_t a
, uint8_t b
)
2075 static slist_entry
*
2076 ntoskrnl_pushsl(head
, entry
)
2080 slist_entry
*oldhead
;
2082 oldhead
= head
->slh_list
.slh_next
;
2083 entry
->sl_next
= head
->slh_list
.slh_next
;
2084 head
->slh_list
.slh_next
= entry
;
2085 head
->slh_list
.slh_depth
++;
2086 head
->slh_list
.slh_seq
++;
2092 InitializeSListHead(head
)
2095 memset(head
, 0, sizeof(*head
));
2098 static slist_entry
*
2099 ntoskrnl_popsl(head
)
2104 first
= head
->slh_list
.slh_next
;
2105 if (first
!= NULL
) {
2106 head
->slh_list
.slh_next
= first
->sl_next
;
2107 head
->slh_list
.slh_depth
--;
2108 head
->slh_list
.slh_seq
++;
2115 * We need this to make lookaside lists work for amd64.
2116 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2117 * list structure. For amd64 to work right, this has to be a
2118 * pointer to the wrapped version of the routine, not the
2119 * original. Letting the Windows driver invoke the original
2120 * function directly will result in a convention calling
2121 * mismatch and a pretty crash. On x86, this effectively
2122 * becomes a no-op since ipt_func and ipt_wrap are the same.
2126 ntoskrnl_findwrap(func
)
2129 image_patch_table
*patch
;
2131 patch
= ntoskrnl_functbl
;
2132 while (patch
->ipt_func
!= NULL
) {
2133 if ((funcptr
)patch
->ipt_func
== func
)
2134 return ((funcptr
)patch
->ipt_wrap
);
2142 ExInitializePagedLookasideList(paged_lookaside_list
*lookaside
,
2143 lookaside_alloc_func
*allocfunc
, lookaside_free_func
*freefunc
,
2144 uint32_t flags
, size_t size
, uint32_t tag
, uint16_t depth
)
2146 bzero((char *)lookaside
, sizeof(paged_lookaside_list
));
2148 if (size
< sizeof(slist_entry
))
2149 lookaside
->nll_l
.gl_size
= sizeof(slist_entry
);
2151 lookaside
->nll_l
.gl_size
= size
;
2152 lookaside
->nll_l
.gl_tag
= tag
;
2153 if (allocfunc
== NULL
)
2154 lookaside
->nll_l
.gl_allocfunc
=
2155 ntoskrnl_findwrap((funcptr
)ExAllocatePoolWithTag
);
2157 lookaside
->nll_l
.gl_allocfunc
= allocfunc
;
2159 if (freefunc
== NULL
)
2160 lookaside
->nll_l
.gl_freefunc
=
2161 ntoskrnl_findwrap((funcptr
)ExFreePool
);
2163 lookaside
->nll_l
.gl_freefunc
= freefunc
;
2166 KeInitializeSpinLock(&lookaside
->nll_obsoletelock
);
2169 lookaside
->nll_l
.gl_type
= NonPagedPool
;
2170 lookaside
->nll_l
.gl_depth
= depth
;
2171 lookaside
->nll_l
.gl_maxdepth
= LOOKASIDE_DEPTH
;
2175 ExDeletePagedLookasideList(lookaside
)
2176 paged_lookaside_list
*lookaside
;
2179 void (*freefunc
)(void *);
2181 freefunc
= lookaside
->nll_l
.gl_freefunc
;
2182 while((buf
= ntoskrnl_popsl(&lookaside
->nll_l
.gl_listhead
)) != NULL
)
2183 MSCALL1(freefunc
, buf
);
2187 ExInitializeNPagedLookasideList(npaged_lookaside_list
*lookaside
,
2188 lookaside_alloc_func
*allocfunc
, lookaside_free_func
*freefunc
,
2189 uint32_t flags
, size_t size
, uint32_t tag
, uint16_t depth
)
2191 bzero((char *)lookaside
, sizeof(npaged_lookaside_list
));
2193 if (size
< sizeof(slist_entry
))
2194 lookaside
->nll_l
.gl_size
= sizeof(slist_entry
);
2196 lookaside
->nll_l
.gl_size
= size
;
2197 lookaside
->nll_l
.gl_tag
= tag
;
2198 if (allocfunc
== NULL
)
2199 lookaside
->nll_l
.gl_allocfunc
=
2200 ntoskrnl_findwrap((funcptr
)ExAllocatePoolWithTag
);
2202 lookaside
->nll_l
.gl_allocfunc
= allocfunc
;
2204 if (freefunc
== NULL
)
2205 lookaside
->nll_l
.gl_freefunc
=
2206 ntoskrnl_findwrap((funcptr
)ExFreePool
);
2208 lookaside
->nll_l
.gl_freefunc
= freefunc
;
2211 KeInitializeSpinLock(&lookaside
->nll_obsoletelock
);
2214 lookaside
->nll_l
.gl_type
= NonPagedPool
;
2215 lookaside
->nll_l
.gl_depth
= depth
;
2216 lookaside
->nll_l
.gl_maxdepth
= LOOKASIDE_DEPTH
;
2220 ExDeleteNPagedLookasideList(lookaside
)
2221 npaged_lookaside_list
*lookaside
;
2224 void (*freefunc
)(void *);
2226 freefunc
= lookaside
->nll_l
.gl_freefunc
;
2227 while((buf
= ntoskrnl_popsl(&lookaside
->nll_l
.gl_listhead
)) != NULL
)
2228 MSCALL1(freefunc
, buf
);
2232 InterlockedPushEntrySList(head
, entry
)
2236 slist_entry
*oldhead
;
2238 mtx_lock_spin(&ntoskrnl_interlock
);
2239 oldhead
= ntoskrnl_pushsl(head
, entry
);
2240 mtx_unlock_spin(&ntoskrnl_interlock
);
2246 InterlockedPopEntrySList(head
)
2251 mtx_lock_spin(&ntoskrnl_interlock
);
2252 first
= ntoskrnl_popsl(head
);
2253 mtx_unlock_spin(&ntoskrnl_interlock
);
2258 static slist_entry
*
2259 ExInterlockedPushEntrySList(head
, entry
, lock
)
2264 return (InterlockedPushEntrySList(head
, entry
));
2267 static slist_entry
*
2268 ExInterlockedPopEntrySList(head
, lock
)
2272 return (InterlockedPopEntrySList(head
));
2276 ExQueryDepthSList(head
)
2281 mtx_lock_spin(&ntoskrnl_interlock
);
2282 depth
= head
->slh_list
.slh_depth
;
2283 mtx_unlock_spin(&ntoskrnl_interlock
);
2289 KeInitializeSpinLock(lock
)
2297 KefAcquireSpinLockAtDpcLevel(lock
)
2300 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2304 while (atomic_cmpset_acq_int((volatile u_int
*)lock
, 0, 1) == 0) {
2306 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2315 KefReleaseSpinLockFromDpcLevel(lock
)
2318 atomic_store_rel_int((volatile u_int
*)lock
, 0);
2322 KeAcquireSpinLockRaiseToDpc(kspin_lock
*lock
)
2326 if (KeGetCurrentIrql() > DISPATCH_LEVEL
)
2327 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2329 KeRaiseIrql(DISPATCH_LEVEL
, &oldirql
);
2330 KeAcquireSpinLockAtDpcLevel(lock
);
2336 KeAcquireSpinLockAtDpcLevel(kspin_lock
*lock
)
2338 while (atomic_cmpset_acq_int((volatile u_int
*)lock
, 0, 1) == 0)
2343 KeReleaseSpinLockFromDpcLevel(kspin_lock
*lock
)
2345 atomic_store_rel_int((volatile u_int
*)lock
, 0);
2347 #endif /* __i386__ */
2350 InterlockedExchange(dst
, val
)
2351 volatile uint32_t *dst
;
2356 mtx_lock_spin(&ntoskrnl_interlock
);
2359 mtx_unlock_spin(&ntoskrnl_interlock
);
2365 InterlockedIncrement(addend
)
2366 volatile uint32_t *addend
;
2368 atomic_add_long((volatile u_long
*)addend
, 1);
2373 InterlockedDecrement(addend
)
2374 volatile uint32_t *addend
;
2376 atomic_subtract_long((volatile u_long
*)addend
, 1);
2381 ExInterlockedAddLargeStatistic(addend
, inc
)
2385 mtx_lock_spin(&ntoskrnl_interlock
);
2387 mtx_unlock_spin(&ntoskrnl_interlock
);
2391 IoAllocateMdl(void *vaddr
, uint32_t len
, uint8_t secondarybuf
,
2392 uint8_t chargequota
, irp
*iopkt
)
2397 if (MmSizeOfMdl(vaddr
, len
) > MDL_ZONE_SIZE
)
2398 m
= ExAllocatePoolWithTag(NonPagedPool
,
2399 MmSizeOfMdl(vaddr
, len
), 0);
2401 m
= uma_zalloc(mdl_zone
, M_NOWAIT
| M_ZERO
);
2408 MmInitializeMdl(m
, vaddr
, len
);
2411 * MmInitializMdl() clears the flags field, so we
2412 * have to set this here. If the MDL came from the
2413 * MDL UMA zone, tag it so we can release it to
2414 * the right place later.
2417 m
->mdl_flags
= MDL_ZONE_ALLOCED
;
2419 if (iopkt
!= NULL
) {
2420 if (secondarybuf
== TRUE
) {
2422 last
= iopkt
->irp_mdl
;
2423 while (last
->mdl_next
!= NULL
)
2424 last
= last
->mdl_next
;
2427 if (iopkt
->irp_mdl
!= NULL
)
2428 panic("leaking an MDL in IoAllocateMdl()");
2443 if (m
->mdl_flags
& MDL_ZONE_ALLOCED
)
2444 uma_zfree(mdl_zone
, m
);
2450 MmAllocateContiguousMemory(size
, highest
)
2455 size_t pagelength
= roundup(size
, PAGE_SIZE
);
2457 addr
= ExAllocatePoolWithTag(NonPagedPool
, pagelength
, 0);
2463 MmAllocateContiguousMemorySpecifyCache(size
, lowest
, highest
,
2464 boundary
, cachetype
)
2469 enum nt_caching_type cachetype
;
2471 vm_memattr_t memattr
;
2474 switch (cachetype
) {
2476 memattr
= VM_MEMATTR_UNCACHEABLE
;
2478 case MmWriteCombined
:
2479 memattr
= VM_MEMATTR_WRITE_COMBINING
;
2481 case MmNonCachedUnordered
:
2482 memattr
= VM_MEMATTR_UNCACHEABLE
;
2485 case MmHardwareCoherentCached
:
2488 memattr
= VM_MEMATTR_DEFAULT
;
2492 ret
= (void *)kmem_alloc_contig(kernel_arena
, size
, M_ZERO
| M_NOWAIT
,
2493 lowest
, highest
, PAGE_SIZE
, boundary
, memattr
);
2495 malloc_type_allocated(M_DEVBUF
, round_page(size
));
2500 MmFreeContiguousMemory(base
)
2507 MmFreeContiguousMemorySpecifyCache(base
, size
, cachetype
)
2510 enum nt_caching_type cachetype
;
2512 contigfree(base
, size
, M_DEVBUF
);
2516 MmSizeOfMdl(vaddr
, len
)
2522 l
= sizeof(struct mdl
) +
2523 (sizeof(vm_offset_t
*) * SPAN_PAGES(vaddr
, len
));
2529 * The Microsoft documentation says this routine fills in the
2530 * page array of an MDL with the _physical_ page addresses that
2531 * comprise the buffer, but we don't really want to do that here.
2532 * Instead, we just fill in the page array with the kernel virtual
2533 * addresses of the buffers.
2536 MmBuildMdlForNonPagedPool(m
)
2539 vm_offset_t
*mdl_pages
;
2542 pagecnt
= SPAN_PAGES(m
->mdl_byteoffset
, m
->mdl_bytecount
);
2544 if (pagecnt
> (m
->mdl_size
- sizeof(mdl
)) / sizeof(vm_offset_t
*))
2545 panic("not enough pages in MDL to describe buffer");
2547 mdl_pages
= MmGetMdlPfnArray(m
);
2549 for (i
= 0; i
< pagecnt
; i
++)
2550 *mdl_pages
= (vm_offset_t
)m
->mdl_startva
+ (i
* PAGE_SIZE
);
2552 m
->mdl_flags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
2553 m
->mdl_mappedsystemva
= MmGetMdlVirtualAddress(m
);
2557 MmMapLockedPages(mdl
*buf
, uint8_t accessmode
)
2559 buf
->mdl_flags
|= MDL_MAPPED_TO_SYSTEM_VA
;
2560 return (MmGetMdlVirtualAddress(buf
));
2564 MmMapLockedPagesSpecifyCache(mdl
*buf
, uint8_t accessmode
, uint32_t cachetype
,
2565 void *vaddr
, uint32_t bugcheck
, uint32_t prio
)
2567 return (MmMapLockedPages(buf
, accessmode
));
2571 MmUnmapLockedPages(vaddr
, buf
)
2575 buf
->mdl_flags
&= ~MDL_MAPPED_TO_SYSTEM_VA
;
2579 * This function has a problem in that it will break if you
2580 * compile this module without PAE and try to use it on a PAE
2581 * kernel. Unfortunately, there's no way around this at the
2582 * moment. It's slightly less broken that using pmap_kextract().
2583 * You'd think the virtual memory subsystem would help us out
2584 * here, but it doesn't.
2588 MmGetPhysicalAddress(void *base
)
2590 return (pmap_extract(kernel_map
->pmap
, (vm_offset_t
)base
));
2594 MmGetSystemRoutineAddress(ustr
)
2595 unicode_string
*ustr
;
2599 if (RtlUnicodeStringToAnsiString(&astr
, ustr
, TRUE
))
2601 return (ndis_get_routine_address(ntoskrnl_functbl
, astr
.as_buf
));
2605 MmIsAddressValid(vaddr
)
2608 if (pmap_extract(kernel_map
->pmap
, (vm_offset_t
)vaddr
))
2615 MmMapIoSpace(paddr
, len
, cachetype
)
2620 devclass_t nexus_class
;
2621 device_t
*nexus_devs
, devp
;
2622 int nexus_count
= 0;
2623 device_t matching_dev
= NULL
;
2624 struct resource
*res
;
2628 /* There will always be at least one nexus. */
2630 nexus_class
= devclass_find("nexus");
2631 devclass_get_devices(nexus_class
, &nexus_devs
, &nexus_count
);
2633 for (i
= 0; i
< nexus_count
; i
++) {
2634 devp
= nexus_devs
[i
];
2635 matching_dev
= ntoskrnl_finddev(devp
, paddr
, &res
);
2640 free(nexus_devs
, M_TEMP
);
2642 if (matching_dev
== NULL
)
2645 v
= (vm_offset_t
)rman_get_virtual(res
);
2646 if (paddr
> rman_get_start(res
))
2647 v
+= paddr
- rman_get_start(res
);
2653 MmUnmapIoSpace(vaddr
, len
)
2661 ntoskrnl_finddev(dev
, paddr
, res
)
2664 struct resource
**res
;
2666 device_t
*children
= NULL
;
2667 device_t matching_dev
;
2670 struct resource_list
*rl
;
2671 struct resource_list_entry
*rle
;
2675 /* We only want devices that have been successfully probed. */
2677 if (device_is_alive(dev
) == FALSE
)
2680 rl
= BUS_GET_RESOURCE_LIST(device_get_parent(dev
), dev
);
2682 STAILQ_FOREACH(rle
, rl
, link
) {
2688 flags
= rman_get_flags(r
);
2690 if (rle
->type
== SYS_RES_MEMORY
&&
2691 paddr
>= rman_get_start(r
) &&
2692 paddr
<= rman_get_end(r
)) {
2693 if (!(flags
& RF_ACTIVE
))
2694 bus_activate_resource(dev
,
2695 SYS_RES_MEMORY
, 0, r
);
2703 * If this device has children, do another
2704 * level of recursion to inspect them.
2707 device_get_children(dev
, &children
, &childcnt
);
2709 for (i
= 0; i
< childcnt
; i
++) {
2710 matching_dev
= ntoskrnl_finddev(children
[i
], paddr
, res
);
2711 if (matching_dev
!= NULL
) {
2712 free(children
, M_TEMP
);
2713 return (matching_dev
);
2718 /* Won't somebody please think of the children! */
2720 if (children
!= NULL
)
2721 free(children
, M_TEMP
);
2727 * Workitems are unlike DPCs, in that they run in a user-mode thread
2728 * context rather than at DISPATCH_LEVEL in kernel context. In our
2729 * case we run them in kernel context anyway.
2732 ntoskrnl_workitem_thread(arg
)
2742 InitializeListHead(&kq
->kq_disp
);
2743 kq
->kq_td
= curthread
;
2745 KeInitializeSpinLock(&kq
->kq_lock
);
2746 KeInitializeEvent(&kq
->kq_proc
, EVENT_TYPE_SYNC
, FALSE
);
2749 KeWaitForSingleObject(&kq
->kq_proc
, 0, 0, TRUE
, NULL
);
2751 KeAcquireSpinLock(&kq
->kq_lock
, &irql
);
2755 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
2759 while (!IsListEmpty(&kq
->kq_disp
)) {
2760 l
= RemoveHeadList(&kq
->kq_disp
);
2761 iw
= CONTAINING_RECORD(l
,
2762 io_workitem
, iw_listentry
);
2763 InitializeListHead((&iw
->iw_listentry
));
2764 if (iw
->iw_func
== NULL
)
2766 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
2767 MSCALL2(iw
->iw_func
, iw
->iw_dobj
, iw
->iw_ctx
);
2768 KeAcquireSpinLock(&kq
->kq_lock
, &irql
);
2771 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
2775 return; /* notreached */
2779 RtlCharToInteger(src
, base
, val
)
2788 return (STATUS_ACCESS_VIOLATION
);
2789 while (*src
!= '\0' && *src
<= ' ')
2793 else if (*src
== '-') {
2804 } else if (*src
== 'o') {
2807 } else if (*src
== 'x') {
2812 } else if (!(base
== 2 || base
== 8 || base
== 10 || base
== 16))
2813 return (STATUS_INVALID_PARAMETER
);
2815 for (res
= 0; *src
; src
++) {
2819 else if (isxdigit(*src
))
2820 v
= tolower(*src
) - 'a' + 10;
2824 return (STATUS_INVALID_PARAMETER
);
2825 res
= res
* base
+ v
;
2827 *val
= negative
? -res
: res
;
2828 return (STATUS_SUCCESS
);
2832 ntoskrnl_destroy_workitem_threads(void)
2837 for (i
= 0; i
< WORKITEM_THREADS
; i
++) {
2840 KeSetEvent(&kq
->kq_proc
, IO_NO_INCREMENT
, FALSE
);
2842 tsleep(kq
->kq_td
->td_proc
, PWAIT
, "waitiw", hz
/10);
2847 IoAllocateWorkItem(dobj
)
2848 device_object
*dobj
;
2852 iw
= uma_zalloc(iw_zone
, M_NOWAIT
);
2856 InitializeListHead(&iw
->iw_listentry
);
2859 mtx_lock(&ntoskrnl_dispatchlock
);
2860 iw
->iw_idx
= wq_idx
;
2861 WORKIDX_INC(wq_idx
);
2862 mtx_unlock(&ntoskrnl_dispatchlock
);
2871 uma_zfree(iw_zone
, iw
);
2875 IoQueueWorkItem(iw
, iw_func
, qtype
, ctx
)
2877 io_workitem_func iw_func
;
2886 kq
= wq_queues
+ iw
->iw_idx
;
2888 KeAcquireSpinLock(&kq
->kq_lock
, &irql
);
2891 * Traverse the list and make sure this workitem hasn't
2892 * already been inserted. Queuing the same workitem
2893 * twice will hose the list but good.
2896 l
= kq
->kq_disp
.nle_flink
;
2897 while (l
!= &kq
->kq_disp
) {
2898 cur
= CONTAINING_RECORD(l
, io_workitem
, iw_listentry
);
2900 /* Already queued -- do nothing. */
2901 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
2907 iw
->iw_func
= iw_func
;
2910 InsertTailList((&kq
->kq_disp
), (&iw
->iw_listentry
));
2911 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
2913 KeSetEvent(&kq
->kq_proc
, IO_NO_INCREMENT
, FALSE
);
2917 ntoskrnl_workitem(dobj
, arg
)
2918 device_object
*dobj
;
2926 w
= (work_queue_item
*)dobj
;
2927 f
= (work_item_func
)w
->wqi_func
;
2928 uma_zfree(iw_zone
, iw
);
2929 MSCALL2(f
, w
, w
->wqi_ctx
);
2933 * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2934 * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2935 * problem with ExQueueWorkItem() is that it can't guard against
2936 * the condition where a driver submits a job to the work queue and
2937 * is then unloaded before the job is able to run. IoQueueWorkItem()
2938 * acquires a reference to the device's device_object via the
2939 * object manager and retains it until after the job has completed,
2940 * which prevents the driver from being unloaded before the job
2941 * runs. (We don't currently support this behavior, though hopefully
2942 * that will change once the object manager API is fleshed out a bit.)
2944 * Having said all that, the ExQueueWorkItem() API remains, because
2945 * there are still other parts of Windows that use it, including
2946 * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2947 * We fake up the ExQueueWorkItem() API on top of our implementation
2948 * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2949 * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2950 * queue item (provided by the caller) in to IoAllocateWorkItem()
2951 * instead of the device_object. We need to save this pointer so
2952 * we can apply a sanity check: as with the DPC queue and other
2953 * workitem queues, we can't allow the same work queue item to
2954 * be queued twice. If it's already pending, we silently return
2958 ExQueueWorkItem(w
, qtype
)
2963 io_workitem_func iwf
;
2971 * We need to do a special sanity test to make sure
2972 * the ExQueueWorkItem() API isn't used to queue
2973 * the same workitem twice. Rather than checking the
2974 * io_workitem pointer itself, we test the attached
2975 * device object, which is really a pointer to the
2976 * legacy work queue item structure.
2979 kq
= wq_queues
+ WORKITEM_LEGACY_THREAD
;
2980 KeAcquireSpinLock(&kq
->kq_lock
, &irql
);
2981 l
= kq
->kq_disp
.nle_flink
;
2982 while (l
!= &kq
->kq_disp
) {
2983 cur
= CONTAINING_RECORD(l
, io_workitem
, iw_listentry
);
2984 if (cur
->iw_dobj
== (device_object
*)w
) {
2985 /* Already queued -- do nothing. */
2986 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
2991 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
2993 iw
= IoAllocateWorkItem((device_object
*)w
);
2997 iw
->iw_idx
= WORKITEM_LEGACY_THREAD
;
2998 iwf
= (io_workitem_func
)ntoskrnl_findwrap((funcptr
)ntoskrnl_workitem
);
2999 IoQueueWorkItem(iw
, iwf
, qtype
, iw
);
3003 RtlZeroMemory(dst
, len
)
3011 RtlSecureZeroMemory(dst
, len
)
3015 memset(dst
, 0, len
);
3019 RtlFillMemory(void *dst
, size_t len
, uint8_t c
)
3021 memset(dst
, c
, len
);
3025 RtlMoveMemory(dst
, src
, len
)
3030 memmove(dst
, src
, len
);
3034 RtlCopyMemory(dst
, src
, len
)
3039 bcopy(src
, dst
, len
);
3043 RtlCompareMemory(s1
, s2
, len
)
3051 m1
= __DECONST(char *, s1
);
3052 m2
= __DECONST(char *, s2
);
3054 for (i
= 0; i
< len
&& m1
[i
] == m2
[i
]; i
++);
3059 RtlInitAnsiString(dst
, src
)
3069 a
->as_len
= a
->as_maxlen
= 0;
3073 a
->as_len
= a
->as_maxlen
= strlen(src
);
3078 RtlInitUnicodeString(dst
, src
)
3079 unicode_string
*dst
;
3089 u
->us_len
= u
->us_maxlen
= 0;
3096 u
->us_len
= u
->us_maxlen
= i
* 2;
3101 RtlUnicodeStringToInteger(ustr
, base
, val
)
3102 unicode_string
*ustr
;
3111 uchr
= ustr
->us_buf
;
3113 bzero(abuf
, sizeof(abuf
));
3115 if ((char)((*uchr
) & 0xFF) == '-') {
3119 } else if ((char)((*uchr
) & 0xFF) == '+') {
3126 if ((char)((*uchr
) & 0xFF) == 'b') {
3130 } else if ((char)((*uchr
) & 0xFF) == 'o') {
3134 } else if ((char)((*uchr
) & 0xFF) == 'x') {
3148 ntoskrnl_unicode_to_ascii(uchr
, astr
, len
);
3149 *val
= strtoul(abuf
, NULL
, base
);
3151 return (STATUS_SUCCESS
);
3155 RtlFreeUnicodeString(ustr
)
3156 unicode_string
*ustr
;
3158 if (ustr
->us_buf
== NULL
)
3160 ExFreePool(ustr
->us_buf
);
3161 ustr
->us_buf
= NULL
;
3165 RtlFreeAnsiString(astr
)
3168 if (astr
->as_buf
== NULL
)
3170 ExFreePool(astr
->as_buf
);
3171 astr
->as_buf
= NULL
;
3178 return (int)strtol(str
, (char **)NULL
, 10);
3185 return strtol(str
, (char **)NULL
, 10);
3192 return (random() / 2 + 1);
3196 srand(unsigned int seed
)
3203 IoIsWdmVersionAvailable(uint8_t major
, uint8_t minor
)
3205 if (major
== WDM_MAJOR
&& minor
== WDM_MINOR_WINXP
)
3211 IoOpenDeviceRegistryKey(struct device_object
*devobj
, uint32_t type
,
3212 uint32_t mask
, void **key
)
3214 return (NDIS_STATUS_INVALID_DEVICE_REQUEST
);
3218 IoGetDeviceObjectPointer(name
, reqaccess
, fileobj
, devobj
)
3219 unicode_string
*name
;
3222 device_object
*devobj
;
3224 return (STATUS_SUCCESS
);
3228 IoGetDeviceProperty(devobj
, regprop
, buflen
, prop
, reslen
)
3229 device_object
*devobj
;
3238 drv
= devobj
->do_drvobj
;
3241 case DEVPROP_DRIVER_KEYNAME
:
3243 *name
= drv
->dro_drivername
.us_buf
;
3244 *reslen
= drv
->dro_drivername
.us_len
;
3247 return (STATUS_INVALID_PARAMETER_2
);
3251 return (STATUS_SUCCESS
);
3255 KeInitializeMutex(kmutex
, level
)
3259 InitializeListHead((&kmutex
->km_header
.dh_waitlisthead
));
3260 kmutex
->km_abandoned
= FALSE
;
3261 kmutex
->km_apcdisable
= 1;
3262 kmutex
->km_header
.dh_sigstate
= 1;
3263 kmutex
->km_header
.dh_type
= DISP_TYPE_MUTANT
;
3264 kmutex
->km_header
.dh_size
= sizeof(kmutant
) / sizeof(uint32_t);
3265 kmutex
->km_ownerthread
= NULL
;
3269 KeReleaseMutex(kmutant
*kmutex
, uint8_t kwait
)
3273 mtx_lock(&ntoskrnl_dispatchlock
);
3274 prevstate
= kmutex
->km_header
.dh_sigstate
;
3275 if (kmutex
->km_ownerthread
!= curthread
) {
3276 mtx_unlock(&ntoskrnl_dispatchlock
);
3277 return (STATUS_MUTANT_NOT_OWNED
);
3280 kmutex
->km_header
.dh_sigstate
++;
3281 kmutex
->km_abandoned
= FALSE
;
3283 if (kmutex
->km_header
.dh_sigstate
== 1) {
3284 kmutex
->km_ownerthread
= NULL
;
3285 ntoskrnl_waittest(&kmutex
->km_header
, IO_NO_INCREMENT
);
3288 mtx_unlock(&ntoskrnl_dispatchlock
);
3294 KeReadStateMutex(kmutex
)
3297 return (kmutex
->km_header
.dh_sigstate
);
3301 KeInitializeEvent(nt_kevent
*kevent
, uint32_t type
, uint8_t state
)
3303 InitializeListHead((&kevent
->k_header
.dh_waitlisthead
));
3304 kevent
->k_header
.dh_sigstate
= state
;
3305 if (type
== EVENT_TYPE_NOTIFY
)
3306 kevent
->k_header
.dh_type
= DISP_TYPE_NOTIFICATION_EVENT
;
3308 kevent
->k_header
.dh_type
= DISP_TYPE_SYNCHRONIZATION_EVENT
;
3309 kevent
->k_header
.dh_size
= sizeof(nt_kevent
) / sizeof(uint32_t);
3313 KeResetEvent(kevent
)
3318 mtx_lock(&ntoskrnl_dispatchlock
);
3319 prevstate
= kevent
->k_header
.dh_sigstate
;
3320 kevent
->k_header
.dh_sigstate
= FALSE
;
3321 mtx_unlock(&ntoskrnl_dispatchlock
);
3327 KeSetEvent(nt_kevent
*kevent
, uint32_t increment
, uint8_t kwait
)
3331 nt_dispatch_header
*dh
;
3335 mtx_lock(&ntoskrnl_dispatchlock
);
3336 prevstate
= kevent
->k_header
.dh_sigstate
;
3337 dh
= &kevent
->k_header
;
3339 if (IsListEmpty(&dh
->dh_waitlisthead
))
3341 * If there's nobody in the waitlist, just set
3342 * the state to signalled.
3344 dh
->dh_sigstate
= 1;
3347 * Get the first waiter. If this is a synchronization
3348 * event, just wake up that one thread (don't bother
3349 * setting the state to signalled since we're supposed
3350 * to automatically clear synchronization events anyway).
3352 * If it's a notification event, or the first
3353 * waiter is doing a WAITTYPE_ALL wait, go through
3354 * the full wait satisfaction process.
3356 w
= CONTAINING_RECORD(dh
->dh_waitlisthead
.nle_flink
,
3357 wait_block
, wb_waitlist
);
3360 if (kevent
->k_header
.dh_type
== DISP_TYPE_NOTIFICATION_EVENT
||
3361 w
->wb_waittype
== WAITTYPE_ALL
) {
3362 if (prevstate
== 0) {
3363 dh
->dh_sigstate
= 1;
3364 ntoskrnl_waittest(dh
, increment
);
3367 w
->wb_awakened
|= TRUE
;
3368 cv_broadcastpri(&we
->we_cv
,
3369 (w
->wb_oldpri
- (increment
* 4)) > PRI_MIN_KERN
?
3370 w
->wb_oldpri
- (increment
* 4) : PRI_MIN_KERN
);
3374 mtx_unlock(&ntoskrnl_dispatchlock
);
3380 KeClearEvent(kevent
)
3383 kevent
->k_header
.dh_sigstate
= FALSE
;
3387 KeReadStateEvent(kevent
)
3390 return (kevent
->k_header
.dh_sigstate
);
3394 * The object manager in Windows is responsible for managing
3395 * references and access to various types of objects, including
3396 * device_objects, events, threads, timers and so on. However,
3397 * there's a difference in the way objects are handled in user
3398 * mode versus kernel mode.
3400 * In user mode (i.e. Win32 applications), all objects are
3401 * managed by the object manager. For example, when you create
3402 * a timer or event object, you actually end up with an
3403 * object_header (for the object manager's bookkeeping
3404 * purposes) and an object body (which contains the actual object
3405 * structure, e.g. ktimer, kevent, etc...). This allows Windows
3406 * to manage resource quotas and to enforce access restrictions
3407 * on basically every kind of system object handled by the kernel.
3409 * However, in kernel mode, you only end up using the object
3410 * manager some of the time. For example, in a driver, you create
3411 * a timer object by simply allocating the memory for a ktimer
3412 * structure and initializing it with KeInitializeTimer(). Hence,
3413 * the timer has no object_header and no reference counting or
3414 * security/resource checks are done on it. The assumption in
3415 * this case is that if you're running in kernel mode, you know
3416 * what you're doing, and you're already at an elevated privilege
3419 * There are some exceptions to this. The two most important ones
3420 * for our purposes are device_objects and threads. We need to use
3421 * the object manager to do reference counting on device_objects,
3422 * and for threads, you can only get a pointer to a thread's
3423 * dispatch header by using ObReferenceObjectByHandle() on the
3424 * handle returned by PsCreateSystemThread().
3428 ObReferenceObjectByHandle(ndis_handle handle
, uint32_t reqaccess
, void *otype
,
3429 uint8_t accessmode
, void **object
, void **handleinfo
)
3433 nr
= malloc(sizeof(nt_objref
), M_DEVBUF
, M_NOWAIT
|M_ZERO
);
3435 return (STATUS_INSUFFICIENT_RESOURCES
);
3437 InitializeListHead((&nr
->no_dh
.dh_waitlisthead
));
3438 nr
->no_obj
= handle
;
3439 nr
->no_dh
.dh_type
= DISP_TYPE_THREAD
;
3440 nr
->no_dh
.dh_sigstate
= 0;
3441 nr
->no_dh
.dh_size
= (uint8_t)(sizeof(struct thread
) /
3443 TAILQ_INSERT_TAIL(&ntoskrnl_reflist
, nr
, link
);
3446 return (STATUS_SUCCESS
);
3450 ObfDereferenceObject(object
)
3456 TAILQ_REMOVE(&ntoskrnl_reflist
, nr
, link
);
3464 return (STATUS_SUCCESS
);
3468 WmiQueryTraceInformation(traceclass
, traceinfo
, infolen
, reqlen
, buf
)
3469 uint32_t traceclass
;
3475 return (STATUS_NOT_FOUND
);
3479 WmiTraceMessage(uint64_t loghandle
, uint32_t messageflags
,
3480 void *guid
, uint16_t messagenum
, ...)
3482 return (STATUS_SUCCESS
);
3486 IoWMIRegistrationControl(dobj
, action
)
3487 device_object
*dobj
;
3490 return (STATUS_SUCCESS
);
3494 * This is here just in case the thread returns without calling
3495 * PsTerminateSystemThread().
3498 ntoskrnl_thrfunc(arg
)
3501 thread_context
*thrctx
;
3502 uint32_t (*tfunc
)(void *);
3507 tfunc
= thrctx
->tc_thrfunc
;
3508 tctx
= thrctx
->tc_thrctx
;
3509 free(thrctx
, M_TEMP
);
3511 rval
= MSCALL1(tfunc
, tctx
);
3513 PsTerminateSystemThread(rval
);
3514 return; /* notreached */
3518 PsCreateSystemThread(handle
, reqaccess
, objattrs
, phandle
,
3519 clientid
, thrfunc
, thrctx
)
3520 ndis_handle
*handle
;
3523 ndis_handle phandle
;
3532 tc
= malloc(sizeof(thread_context
), M_TEMP
, M_NOWAIT
);
3534 return (STATUS_INSUFFICIENT_RESOURCES
);
3536 tc
->tc_thrctx
= thrctx
;
3537 tc
->tc_thrfunc
= thrfunc
;
3539 error
= kproc_create(ntoskrnl_thrfunc
, tc
, &p
,
3540 RFHIGHPID
, NDIS_KSTACK_PAGES
, "Windows Kthread %d", ntoskrnl_kth
);
3544 return (STATUS_INSUFFICIENT_RESOURCES
);
3550 return (STATUS_SUCCESS
);
3554 * In Windows, the exit of a thread is an event that you're allowed
3555 * to wait on, assuming you've obtained a reference to the thread using
3556 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3557 * simulate this behavior is to register each thread we create in a
3558 * reference list, and if someone holds a reference to us, we poke
3562 PsTerminateSystemThread(status
)
3565 struct nt_objref
*nr
;
3567 mtx_lock(&ntoskrnl_dispatchlock
);
3568 TAILQ_FOREACH(nr
, &ntoskrnl_reflist
, link
) {
3569 if (nr
->no_obj
!= curthread
->td_proc
)
3571 nr
->no_dh
.dh_sigstate
= 1;
3572 ntoskrnl_waittest(&nr
->no_dh
, IO_NO_INCREMENT
);
3575 mtx_unlock(&ntoskrnl_dispatchlock
);
3580 return (0); /* notreached */
3584 DbgPrint(char *fmt
, ...)
3594 return (STATUS_SUCCESS
);
3601 kdb_enter(KDB_WHY_NDIS
, "DbgBreakPoint(): breakpoint");
3605 KeBugCheckEx(code
, param1
, param2
, param3
, param4
)
3612 panic("KeBugCheckEx: STOP 0x%X", code
);
3616 ntoskrnl_timercall(arg
)
3623 mtx_lock(&ntoskrnl_dispatchlock
);
3627 #ifdef NTOSKRNL_DEBUG_TIMERS
3628 ntoskrnl_timer_fires
++;
3630 ntoskrnl_remove_timer(timer
);
3633 * This should never happen, but complain
3637 if (timer
->k_header
.dh_inserted
== FALSE
) {
3638 mtx_unlock(&ntoskrnl_dispatchlock
);
3639 printf("NTOS: timer %p fired even though "
3640 "it was canceled\n", timer
);
3644 /* Mark the timer as no longer being on the timer queue. */
3646 timer
->k_header
.dh_inserted
= FALSE
;
3648 /* Now signal the object and satisfy any waits on it. */
3650 timer
->k_header
.dh_sigstate
= 1;
3651 ntoskrnl_waittest(&timer
->k_header
, IO_NO_INCREMENT
);
3654 * If this is a periodic timer, re-arm it
3655 * so it will fire again. We do this before
3656 * calling any deferred procedure calls because
3657 * it's possible the DPC might cancel the timer,
3658 * in which case it would be wrong for us to
3659 * re-arm it again afterwards.
3662 if (timer
->k_period
) {
3664 tv
.tv_usec
= timer
->k_period
* 1000;
3665 timer
->k_header
.dh_inserted
= TRUE
;
3666 ntoskrnl_insert_timer(timer
, tvtohz(&tv
));
3667 #ifdef NTOSKRNL_DEBUG_TIMERS
3668 ntoskrnl_timer_reloads
++;
3674 mtx_unlock(&ntoskrnl_dispatchlock
);
3676 /* If there's a DPC associated with the timer, queue it up. */
3679 KeInsertQueueDpc(dpc
, NULL
, NULL
);
3682 #ifdef NTOSKRNL_DEBUG_TIMERS
3684 sysctl_show_timers(SYSCTL_HANDLER_ARGS
)
3689 ntoskrnl_show_timers();
3690 return (sysctl_handle_int(oidp
, &ret
, 0, req
));
3694 ntoskrnl_show_timers()
3699 mtx_lock_spin(&ntoskrnl_calllock
);
3700 l
= ntoskrnl_calllist
.nle_flink
;
3701 while(l
!= &ntoskrnl_calllist
) {
3705 mtx_unlock_spin(&ntoskrnl_calllock
);
3708 printf("%d timers available (out of %d)\n", i
, NTOSKRNL_TIMEOUTS
);
3709 printf("timer sets: %qu\n", ntoskrnl_timer_sets
);
3710 printf("timer reloads: %qu\n", ntoskrnl_timer_reloads
);
3711 printf("timer cancels: %qu\n", ntoskrnl_timer_cancels
);
3712 printf("timer fires: %qu\n", ntoskrnl_timer_fires
);
3718 * Must be called with dispatcher lock held.
3722 ntoskrnl_insert_timer(timer
, ticks
)
3731 * Try and allocate a timer.
3733 mtx_lock_spin(&ntoskrnl_calllock
);
3734 if (IsListEmpty(&ntoskrnl_calllist
)) {
3735 mtx_unlock_spin(&ntoskrnl_calllock
);
3736 #ifdef NTOSKRNL_DEBUG_TIMERS
3737 ntoskrnl_show_timers();
3739 panic("out of timers!");
3741 l
= RemoveHeadList(&ntoskrnl_calllist
);
3742 mtx_unlock_spin(&ntoskrnl_calllock
);
3744 e
= CONTAINING_RECORD(l
, callout_entry
, ce_list
);
3747 timer
->k_callout
= c
;
3750 callout_reset(c
, ticks
, ntoskrnl_timercall
, timer
);
3754 ntoskrnl_remove_timer(timer
)
3759 e
= (callout_entry
*)timer
->k_callout
;
3760 callout_stop(timer
->k_callout
);
3762 mtx_lock_spin(&ntoskrnl_calllock
);
3763 InsertHeadList((&ntoskrnl_calllist
), (&e
->ce_list
));
3764 mtx_unlock_spin(&ntoskrnl_calllock
);
3768 KeInitializeTimer(timer
)
3774 KeInitializeTimerEx(timer
, EVENT_TYPE_NOTIFY
);
3778 KeInitializeTimerEx(timer
, type
)
3785 bzero((char *)timer
, sizeof(ktimer
));
3786 InitializeListHead((&timer
->k_header
.dh_waitlisthead
));
3787 timer
->k_header
.dh_sigstate
= FALSE
;
3788 timer
->k_header
.dh_inserted
= FALSE
;
3789 if (type
== EVENT_TYPE_NOTIFY
)
3790 timer
->k_header
.dh_type
= DISP_TYPE_NOTIFICATION_TIMER
;
3792 timer
->k_header
.dh_type
= DISP_TYPE_SYNCHRONIZATION_TIMER
;
3793 timer
->k_header
.dh_size
= sizeof(ktimer
) / sizeof(uint32_t);
3797 * DPC subsystem. A Windows Defered Procedure Call has the following
3799 * - It runs at DISPATCH_LEVEL.
3800 * - It can have one of 3 importance values that control when it
3801 * runs relative to other DPCs in the queue.
3802 * - On SMP systems, it can be set to run on a specific processor.
3803 * In order to satisfy the last property, we create a DPC thread for
3804 * each CPU in the system and bind it to that CPU. Each thread
3805 * maintains three queues with different importance levels, which
3806 * will be processed in order from lowest to highest.
3808 * In Windows, interrupt handlers run as DPCs. (Not to be confused
3809 * with ISRs, which run in interrupt context and can preempt DPCs.)
3810 * ISRs are given the highest importance so that they'll take
3811 * precedence over timers and other things.
3815 ntoskrnl_dpc_thread(arg
)
3825 InitializeListHead(&kq
->kq_disp
);
3826 kq
->kq_td
= curthread
;
3828 kq
->kq_running
= FALSE
;
3829 KeInitializeSpinLock(&kq
->kq_lock
);
3830 KeInitializeEvent(&kq
->kq_proc
, EVENT_TYPE_SYNC
, FALSE
);
3831 KeInitializeEvent(&kq
->kq_done
, EVENT_TYPE_SYNC
, FALSE
);
3834 * Elevate our priority. DPCs are used to run interrupt
3835 * handlers, and they should trigger as soon as possible
3836 * once scheduled by an ISR.
3839 thread_lock(curthread
);
3840 #ifdef NTOSKRNL_MULTIPLE_DPCS
3841 sched_bind(curthread
, kq
->kq_cpu
);
3843 sched_prio(curthread
, PRI_MIN_KERN
);
3844 thread_unlock(curthread
);
3847 KeWaitForSingleObject(&kq
->kq_proc
, 0, 0, TRUE
, NULL
);
3849 KeAcquireSpinLock(&kq
->kq_lock
, &irql
);
3853 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
3857 kq
->kq_running
= TRUE
;
3859 while (!IsListEmpty(&kq
->kq_disp
)) {
3860 l
= RemoveHeadList((&kq
->kq_disp
));
3861 d
= CONTAINING_RECORD(l
, kdpc
, k_dpclistentry
);
3862 InitializeListHead((&d
->k_dpclistentry
));
3863 KeReleaseSpinLockFromDpcLevel(&kq
->kq_lock
);
3864 MSCALL4(d
->k_deferedfunc
, d
, d
->k_deferredctx
,
3865 d
->k_sysarg1
, d
->k_sysarg2
);
3866 KeAcquireSpinLockAtDpcLevel(&kq
->kq_lock
);
3869 kq
->kq_running
= FALSE
;
3871 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
3873 KeSetEvent(&kq
->kq_done
, IO_NO_INCREMENT
, FALSE
);
3877 return; /* notreached */
3881 ntoskrnl_destroy_dpc_threads(void)
3888 #ifdef NTOSKRNL_MULTIPLE_DPCS
3889 for (i
= 0; i
< mp_ncpus
; i
++) {
3891 for (i
= 0; i
< 1; i
++) {
3896 KeInitializeDpc(&dpc
, NULL
, NULL
);
3897 KeSetTargetProcessorDpc(&dpc
, i
);
3898 KeInsertQueueDpc(&dpc
, NULL
, NULL
);
3900 tsleep(kq
->kq_td
->td_proc
, PWAIT
, "dpcw", hz
/10);
3905 ntoskrnl_insert_dpc(head
, dpc
)
3912 l
= head
->nle_flink
;
3914 d
= CONTAINING_RECORD(l
, kdpc
, k_dpclistentry
);
3920 if (dpc
->k_importance
== KDPC_IMPORTANCE_LOW
)
3921 InsertTailList((head
), (&dpc
->k_dpclistentry
));
3923 InsertHeadList((head
), (&dpc
->k_dpclistentry
));
3929 KeInitializeDpc(dpc
, dpcfunc
, dpcctx
)
3938 dpc
->k_deferedfunc
= dpcfunc
;
3939 dpc
->k_deferredctx
= dpcctx
;
3940 dpc
->k_num
= KDPC_CPU_DEFAULT
;
3941 dpc
->k_importance
= KDPC_IMPORTANCE_MEDIUM
;
3942 InitializeListHead((&dpc
->k_dpclistentry
));
3946 KeInsertQueueDpc(dpc
, sysarg1
, sysarg2
)
3960 #ifdef NTOSKRNL_MULTIPLE_DPCS
3961 KeRaiseIrql(DISPATCH_LEVEL
, &irql
);
3964 * By default, the DPC is queued to run on the same CPU
3965 * that scheduled it.
3968 if (dpc
->k_num
== KDPC_CPU_DEFAULT
)
3969 kq
+= curthread
->td_oncpu
;
3972 KeAcquireSpinLockAtDpcLevel(&kq
->kq_lock
);
3974 KeAcquireSpinLock(&kq
->kq_lock
, &irql
);
3977 r
= ntoskrnl_insert_dpc(&kq
->kq_disp
, dpc
);
3979 dpc
->k_sysarg1
= sysarg1
;
3980 dpc
->k_sysarg2
= sysarg2
;
3982 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
3987 KeSetEvent(&kq
->kq_proc
, IO_NO_INCREMENT
, FALSE
);
3993 KeRemoveQueueDpc(dpc
)
4002 #ifdef NTOSKRNL_MULTIPLE_DPCS
4003 KeRaiseIrql(DISPATCH_LEVEL
, &irql
);
4005 kq
= kq_queues
+ dpc
->k_num
;
4007 KeAcquireSpinLockAtDpcLevel(&kq
->kq_lock
);
4010 KeAcquireSpinLock(&kq
->kq_lock
, &irql
);
4013 if (dpc
->k_dpclistentry
.nle_flink
== &dpc
->k_dpclistentry
) {
4014 KeReleaseSpinLockFromDpcLevel(&kq
->kq_lock
);
4019 RemoveEntryList((&dpc
->k_dpclistentry
));
4020 InitializeListHead((&dpc
->k_dpclistentry
));
4022 KeReleaseSpinLock(&kq
->kq_lock
, irql
);
4028 KeSetImportanceDpc(dpc
, imp
)
4032 if (imp
!= KDPC_IMPORTANCE_LOW
&&
4033 imp
!= KDPC_IMPORTANCE_MEDIUM
&&
4034 imp
!= KDPC_IMPORTANCE_HIGH
)
4037 dpc
->k_importance
= (uint8_t)imp
;
4041 KeSetTargetProcessorDpc(kdpc
*dpc
, uint8_t cpu
)
4050 KeFlushQueuedDpcs(void)
4056 * Poke each DPC queue and wait
4057 * for them to drain.
4060 #ifdef NTOSKRNL_MULTIPLE_DPCS
4061 for (i
= 0; i
< mp_ncpus
; i
++) {
4063 for (i
= 0; i
< 1; i
++) {
4066 KeSetEvent(&kq
->kq_proc
, IO_NO_INCREMENT
, FALSE
);
4067 KeWaitForSingleObject(&kq
->kq_done
, 0, 0, TRUE
, NULL
);
4072 KeGetCurrentProcessorNumber(void)
4074 return ((uint32_t)curthread
->td_oncpu
);
4078 KeSetTimerEx(timer
, duetime
, period
, dpc
)
4091 mtx_lock(&ntoskrnl_dispatchlock
);
4093 if (timer
->k_header
.dh_inserted
== TRUE
) {
4094 ntoskrnl_remove_timer(timer
);
4095 #ifdef NTOSKRNL_DEBUG_TIMERS
4096 ntoskrnl_timer_cancels
++;
4098 timer
->k_header
.dh_inserted
= FALSE
;
4103 timer
->k_duetime
= duetime
;
4104 timer
->k_period
= period
;
4105 timer
->k_header
.dh_sigstate
= FALSE
;
4109 tv
.tv_sec
= - (duetime
) / 10000000;
4110 tv
.tv_usec
= (- (duetime
) / 10) -
4111 (tv
.tv_sec
* 1000000);
4113 ntoskrnl_time(&curtime
);
4114 if (duetime
< curtime
)
4115 tv
.tv_sec
= tv
.tv_usec
= 0;
4117 tv
.tv_sec
= ((duetime
) - curtime
) / 10000000;
4118 tv
.tv_usec
= ((duetime
) - curtime
) / 10 -
4119 (tv
.tv_sec
* 1000000);
4123 timer
->k_header
.dh_inserted
= TRUE
;
4124 ntoskrnl_insert_timer(timer
, tvtohz(&tv
));
4125 #ifdef NTOSKRNL_DEBUG_TIMERS
4126 ntoskrnl_timer_sets
++;
4129 mtx_unlock(&ntoskrnl_dispatchlock
);
4135 KeSetTimer(timer
, duetime
, dpc
)
4140 return (KeSetTimerEx(timer
, duetime
, 0, dpc
));
4144 * The Windows DDK documentation seems to say that cancelling
4145 * a timer that has a DPC will result in the DPC also being
4146 * cancelled, but this isn't really the case.
4150 KeCancelTimer(timer
)
4158 mtx_lock(&ntoskrnl_dispatchlock
);
4160 pending
= timer
->k_header
.dh_inserted
;
4162 if (timer
->k_header
.dh_inserted
== TRUE
) {
4163 timer
->k_header
.dh_inserted
= FALSE
;
4164 ntoskrnl_remove_timer(timer
);
4165 #ifdef NTOSKRNL_DEBUG_TIMERS
4166 ntoskrnl_timer_cancels
++;
4170 mtx_unlock(&ntoskrnl_dispatchlock
);
4176 KeReadStateTimer(timer
)
4179 return (timer
->k_header
.dh_sigstate
);
4183 KeDelayExecutionThread(uint8_t wait_mode
, uint8_t alertable
, int64_t *interval
)
4188 panic("invalid wait_mode %d", wait_mode
);
4190 KeInitializeTimer(&timer
);
4191 KeSetTimer(&timer
, *interval
, NULL
);
4192 KeWaitForSingleObject(&timer
, 0, 0, alertable
, NULL
);
4194 return STATUS_SUCCESS
;
4198 KeQueryInterruptTime(void)
4203 getmicrouptime(&tv
);
4205 ticks
= tvtohz(&tv
);
4207 return ticks
* howmany(10000000, hz
);
4210 static struct thread
*
4211 KeGetCurrentThread(void)
4218 KeSetPriorityThread(td
, pri
)
4225 return LOW_REALTIME_PRIORITY
;
4227 if (td
->td_priority
<= PRI_MIN_KERN
)
4228 old
= HIGH_PRIORITY
;
4229 else if (td
->td_priority
>= PRI_MAX_KERN
)
4232 old
= LOW_REALTIME_PRIORITY
;
4235 if (pri
== HIGH_PRIORITY
)
4236 sched_prio(td
, PRI_MIN_KERN
);
4237 if (pri
== LOW_REALTIME_PRIORITY
)
4238 sched_prio(td
, PRI_MIN_KERN
+ (PRI_MAX_KERN
- PRI_MIN_KERN
) / 2);
4239 if (pri
== LOW_PRIORITY
)
4240 sched_prio(td
, PRI_MAX_KERN
);
4249 printf("ntoskrnl dummy called...\n");
4253 image_patch_table ntoskrnl_functbl
[] = {
4254 IMPORT_SFUNC(RtlZeroMemory
, 2),
4255 IMPORT_SFUNC(RtlSecureZeroMemory
, 2),
4256 IMPORT_SFUNC(RtlFillMemory
, 3),
4257 IMPORT_SFUNC(RtlMoveMemory
, 3),
4258 IMPORT_SFUNC(RtlCharToInteger
, 3),
4259 IMPORT_SFUNC(RtlCopyMemory
, 3),
4260 IMPORT_SFUNC(RtlCopyString
, 2),
4261 IMPORT_SFUNC(RtlCompareMemory
, 3),
4262 IMPORT_SFUNC(RtlEqualUnicodeString
, 3),
4263 IMPORT_SFUNC(RtlCopyUnicodeString
, 2),
4264 IMPORT_SFUNC(RtlUnicodeStringToAnsiString
, 3),
4265 IMPORT_SFUNC(RtlAnsiStringToUnicodeString
, 3),
4266 IMPORT_SFUNC(RtlInitAnsiString
, 2),
4267 IMPORT_SFUNC_MAP(RtlInitString
, RtlInitAnsiString
, 2),
4268 IMPORT_SFUNC(RtlInitUnicodeString
, 2),
4269 IMPORT_SFUNC(RtlFreeAnsiString
, 1),
4270 IMPORT_SFUNC(RtlFreeUnicodeString
, 1),
4271 IMPORT_SFUNC(RtlUnicodeStringToInteger
, 3),
4272 IMPORT_CFUNC(sprintf
, 0),
4273 IMPORT_CFUNC(vsprintf
, 0),
4274 IMPORT_CFUNC_MAP(_snprintf
, snprintf
, 0),
4275 IMPORT_CFUNC_MAP(_vsnprintf
, vsnprintf
, 0),
4276 IMPORT_CFUNC(DbgPrint
, 0),
4277 IMPORT_SFUNC(DbgBreakPoint
, 0),
4278 IMPORT_SFUNC(KeBugCheckEx
, 5),
4279 IMPORT_CFUNC(strncmp
, 0),
4280 IMPORT_CFUNC(strcmp
, 0),
4281 IMPORT_CFUNC_MAP(stricmp
, strcasecmp
, 0),
4282 IMPORT_CFUNC(strncpy
, 0),
4283 IMPORT_CFUNC(strcpy
, 0),
4284 IMPORT_CFUNC(strlen
, 0),
4285 IMPORT_CFUNC_MAP(toupper
, ntoskrnl_toupper
, 0),
4286 IMPORT_CFUNC_MAP(tolower
, ntoskrnl_tolower
, 0),
4287 IMPORT_CFUNC_MAP(strstr
, ntoskrnl_strstr
, 0),
4288 IMPORT_CFUNC_MAP(strncat
, ntoskrnl_strncat
, 0),
4289 IMPORT_CFUNC_MAP(strchr
, index
, 0),
4290 IMPORT_CFUNC_MAP(strrchr
, rindex
, 0),
4291 IMPORT_CFUNC(memcpy
, 0),
4292 IMPORT_CFUNC_MAP(memmove
, ntoskrnl_memmove
, 0),
4293 IMPORT_CFUNC_MAP(memset
, ntoskrnl_memset
, 0),
4294 IMPORT_CFUNC_MAP(memchr
, ntoskrnl_memchr
, 0),
4295 IMPORT_SFUNC(IoAllocateDriverObjectExtension
, 4),
4296 IMPORT_SFUNC(IoGetDriverObjectExtension
, 2),
4297 IMPORT_FFUNC(IofCallDriver
, 2),
4298 IMPORT_FFUNC(IofCompleteRequest
, 2),
4299 IMPORT_SFUNC(IoAcquireCancelSpinLock
, 1),
4300 IMPORT_SFUNC(IoReleaseCancelSpinLock
, 1),
4301 IMPORT_SFUNC(IoCancelIrp
, 1),
4302 IMPORT_SFUNC(IoConnectInterrupt
, 11),
4303 IMPORT_SFUNC(IoDisconnectInterrupt
, 1),
4304 IMPORT_SFUNC(IoCreateDevice
, 7),
4305 IMPORT_SFUNC(IoDeleteDevice
, 1),
4306 IMPORT_SFUNC(IoGetAttachedDevice
, 1),
4307 IMPORT_SFUNC(IoAttachDeviceToDeviceStack
, 2),
4308 IMPORT_SFUNC(IoDetachDevice
, 1),
4309 IMPORT_SFUNC(IoBuildSynchronousFsdRequest
, 7),
4310 IMPORT_SFUNC(IoBuildAsynchronousFsdRequest
, 6),
4311 IMPORT_SFUNC(IoBuildDeviceIoControlRequest
, 9),
4312 IMPORT_SFUNC(IoAllocateIrp
, 2),
4313 IMPORT_SFUNC(IoReuseIrp
, 2),
4314 IMPORT_SFUNC(IoMakeAssociatedIrp
, 2),
4315 IMPORT_SFUNC(IoFreeIrp
, 1),
4316 IMPORT_SFUNC(IoInitializeIrp
, 3),
4317 IMPORT_SFUNC(KeAcquireInterruptSpinLock
, 1),
4318 IMPORT_SFUNC(KeReleaseInterruptSpinLock
, 2),
4319 IMPORT_SFUNC(KeSynchronizeExecution
, 3),
4320 IMPORT_SFUNC(KeWaitForSingleObject
, 5),
4321 IMPORT_SFUNC(KeWaitForMultipleObjects
, 8),
4322 IMPORT_SFUNC(_allmul
, 4),
4323 IMPORT_SFUNC(_alldiv
, 4),
4324 IMPORT_SFUNC(_allrem
, 4),
4325 IMPORT_RFUNC(_allshr
, 0),
4326 IMPORT_RFUNC(_allshl
, 0),
4327 IMPORT_SFUNC(_aullmul
, 4),
4328 IMPORT_SFUNC(_aulldiv
, 4),
4329 IMPORT_SFUNC(_aullrem
, 4),
4330 IMPORT_RFUNC(_aullshr
, 0),
4331 IMPORT_RFUNC(_aullshl
, 0),
4332 IMPORT_CFUNC(atoi
, 0),
4333 IMPORT_CFUNC(atol
, 0),
4334 IMPORT_CFUNC(rand
, 0),
4335 IMPORT_CFUNC(srand
, 0),
4336 IMPORT_SFUNC(WRITE_REGISTER_USHORT
, 2),
4337 IMPORT_SFUNC(READ_REGISTER_USHORT
, 1),
4338 IMPORT_SFUNC(WRITE_REGISTER_ULONG
, 2),
4339 IMPORT_SFUNC(READ_REGISTER_ULONG
, 1),
4340 IMPORT_SFUNC(READ_REGISTER_UCHAR
, 1),
4341 IMPORT_SFUNC(WRITE_REGISTER_UCHAR
, 2),
4342 IMPORT_SFUNC(ExInitializePagedLookasideList
, 7),
4343 IMPORT_SFUNC(ExDeletePagedLookasideList
, 1),
4344 IMPORT_SFUNC(ExInitializeNPagedLookasideList
, 7),
4345 IMPORT_SFUNC(ExDeleteNPagedLookasideList
, 1),
4346 IMPORT_FFUNC(InterlockedPopEntrySList
, 1),
4347 IMPORT_FFUNC(InitializeSListHead
, 1),
4348 IMPORT_FFUNC(InterlockedPushEntrySList
, 2),
4349 IMPORT_SFUNC(ExQueryDepthSList
, 1),
4350 IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList
,
4351 InterlockedPopEntrySList
, 1),
4352 IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList
,
4353 InterlockedPushEntrySList
, 2),
4354 IMPORT_FFUNC(ExInterlockedPopEntrySList
, 2),
4355 IMPORT_FFUNC(ExInterlockedPushEntrySList
, 3),
4356 IMPORT_SFUNC(ExAllocatePoolWithTag
, 3),
4357 IMPORT_SFUNC(ExFreePoolWithTag
, 2),
4358 IMPORT_SFUNC(ExFreePool
, 1),
4360 IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel
, 1),
4361 IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel
,1),
4362 IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc
, 1),
4365 * For AMD64, we can get away with just mapping
4366 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4367 * because the calling conventions end up being the same.
4368 * On i386, we have to be careful because KfAcquireSpinLock()
4369 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4371 IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel
, 1),
4372 IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel
, 1),
4373 IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc
, KfAcquireSpinLock
, 1),
4375 IMPORT_SFUNC_MAP(KeReleaseSpinLock
, KfReleaseSpinLock
, 1),
4376 IMPORT_FFUNC(InterlockedIncrement
, 1),
4377 IMPORT_FFUNC(InterlockedDecrement
, 1),
4378 IMPORT_FFUNC(InterlockedExchange
, 2),
4379 IMPORT_FFUNC(ExInterlockedAddLargeStatistic
, 2),
4380 IMPORT_SFUNC(IoAllocateMdl
, 5),
4381 IMPORT_SFUNC(IoFreeMdl
, 1),
4382 IMPORT_SFUNC(MmAllocateContiguousMemory
, 2 + 1),
4383 IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache
, 5 + 3),
4384 IMPORT_SFUNC(MmFreeContiguousMemory
, 1),
4385 IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache
, 3),
4386 IMPORT_SFUNC(MmSizeOfMdl
, 1),
4387 IMPORT_SFUNC(MmMapLockedPages
, 2),
4388 IMPORT_SFUNC(MmMapLockedPagesSpecifyCache
, 6),
4389 IMPORT_SFUNC(MmUnmapLockedPages
, 2),
4390 IMPORT_SFUNC(MmBuildMdlForNonPagedPool
, 1),
4391 IMPORT_SFUNC(MmGetPhysicalAddress
, 1),
4392 IMPORT_SFUNC(MmGetSystemRoutineAddress
, 1),
4393 IMPORT_SFUNC(MmIsAddressValid
, 1),
4394 IMPORT_SFUNC(MmMapIoSpace
, 3 + 1),
4395 IMPORT_SFUNC(MmUnmapIoSpace
, 2),
4396 IMPORT_SFUNC(KeInitializeSpinLock
, 1),
4397 IMPORT_SFUNC(IoIsWdmVersionAvailable
, 2),
4398 IMPORT_SFUNC(IoOpenDeviceRegistryKey
, 4),
4399 IMPORT_SFUNC(IoGetDeviceObjectPointer
, 4),
4400 IMPORT_SFUNC(IoGetDeviceProperty
, 5),
4401 IMPORT_SFUNC(IoAllocateWorkItem
, 1),
4402 IMPORT_SFUNC(IoFreeWorkItem
, 1),
4403 IMPORT_SFUNC(IoQueueWorkItem
, 4),
4404 IMPORT_SFUNC(ExQueueWorkItem
, 2),
4405 IMPORT_SFUNC(ntoskrnl_workitem
, 2),
4406 IMPORT_SFUNC(KeInitializeMutex
, 2),
4407 IMPORT_SFUNC(KeReleaseMutex
, 2),
4408 IMPORT_SFUNC(KeReadStateMutex
, 1),
4409 IMPORT_SFUNC(KeInitializeEvent
, 3),
4410 IMPORT_SFUNC(KeSetEvent
, 3),
4411 IMPORT_SFUNC(KeResetEvent
, 1),
4412 IMPORT_SFUNC(KeClearEvent
, 1),
4413 IMPORT_SFUNC(KeReadStateEvent
, 1),
4414 IMPORT_SFUNC(KeInitializeTimer
, 1),
4415 IMPORT_SFUNC(KeInitializeTimerEx
, 2),
4416 IMPORT_SFUNC(KeSetTimer
, 3),
4417 IMPORT_SFUNC(KeSetTimerEx
, 4),
4418 IMPORT_SFUNC(KeCancelTimer
, 1),
4419 IMPORT_SFUNC(KeReadStateTimer
, 1),
4420 IMPORT_SFUNC(KeInitializeDpc
, 3),
4421 IMPORT_SFUNC(KeInsertQueueDpc
, 3),
4422 IMPORT_SFUNC(KeRemoveQueueDpc
, 1),
4423 IMPORT_SFUNC(KeSetImportanceDpc
, 2),
4424 IMPORT_SFUNC(KeSetTargetProcessorDpc
, 2),
4425 IMPORT_SFUNC(KeFlushQueuedDpcs
, 0),
4426 IMPORT_SFUNC(KeGetCurrentProcessorNumber
, 1),
4427 IMPORT_SFUNC(ObReferenceObjectByHandle
, 6),
4428 IMPORT_FFUNC(ObfDereferenceObject
, 1),
4429 IMPORT_SFUNC(ZwClose
, 1),
4430 IMPORT_SFUNC(PsCreateSystemThread
, 7),
4431 IMPORT_SFUNC(PsTerminateSystemThread
, 1),
4432 IMPORT_SFUNC(IoWMIRegistrationControl
, 2),
4433 IMPORT_SFUNC(WmiQueryTraceInformation
, 5),
4434 IMPORT_CFUNC(WmiTraceMessage
, 0),
4435 IMPORT_SFUNC(KeQuerySystemTime
, 1),
4436 IMPORT_CFUNC(KeTickCount
, 0),
4437 IMPORT_SFUNC(KeDelayExecutionThread
, 3),
4438 IMPORT_SFUNC(KeQueryInterruptTime
, 0),
4439 IMPORT_SFUNC(KeGetCurrentThread
, 0),
4440 IMPORT_SFUNC(KeSetPriorityThread
, 2),
4443 * This last entry is a catch-all for any function we haven't
4444 * implemented yet. The PE import list patching routine will
4445 * use it for any function that doesn't have an explicit match
4449 { NULL
, (FUNC
)dummy
, NULL
, 0, WINDRV_WRAP_STDCALL
},
4453 { NULL
, NULL
, NULL
}