Annotation of src/sys/compat/ndis/subr_ntoskrnl.c, Revision 1.13
1.1 rittera 1: /*-
2: * Copyright (c) 2003
3: * Bill Paul <wpaul@windriver.com>. All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. All advertising materials mentioning features or use of this software
14: * must display the following acknowledgement:
15: * This product includes software developed by Bill Paul.
16: * 4. Neither the name of the author nor the names of any co-contributors
17: * may be used to endorse or promote products derived from this software
18: * without specific prior written permission.
19: *
20: * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23: * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30: * THE POSSIBILITY OF SUCH DAMAGE.
31: */
32:
33: #include <sys/cdefs.h>
1.2 rittera 34: #ifdef __FreeBSD__
1.1 rittera 35: __FBSDID("$FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.43.2.5 2005/03/31 04:24:36 wpaul Exp $");
1.2 rittera 36: #endif
37: #ifdef __NetBSD__
1.13 ! ad 38: __KERNEL_RCSID(0, "$NetBSD: subr_ntoskrnl.c,v 1.12 2007/12/05 08:45:30 ad Exp $");
1.2 rittera 39: #endif
1.1 rittera 40:
1.2 rittera 41: #ifdef __FreeBSD__
1.1 rittera 42: #include <sys/ctype.h>
1.2 rittera 43: #endif
1.1 rittera 44: #include <sys/unistd.h>
45: #include <sys/param.h>
46: #include <sys/types.h>
47: #include <sys/errno.h>
48: #include <sys/systm.h>
49: #include <sys/malloc.h>
50: #include <sys/lock.h>
1.2 rittera 51: #ifdef __FreeBSD__
1.1 rittera 52: #include <sys/mutex.h>
1.2 rittera 53: #endif
1.1 rittera 54:
55: #include <sys/callout.h>
56: #if __FreeBSD_version > 502113
57: #include <sys/kdb.h>
58: #endif
59: #include <sys/kernel.h>
60: #include <sys/proc.h>
61: #include <sys/kthread.h>
62: #include <sys/module.h>
1.11 ad 63: #include <sys/atomic.h>
1.2 rittera 64: #ifdef __FreeBSD__
1.1 rittera 65: #include <machine/clock.h>
66: #include <machine/bus_memio.h>
67: #include <machine/bus_pio.h>
1.2 rittera 68: #endif
1.10 ad 69: #include <sys/bus.h>
1.1 rittera 70: #include <machine/stdarg.h>
71:
1.2 rittera 72: #ifdef __FreeBSD__
1.1 rittera 73: #include <sys/bus.h>
74: #include <sys/rman.h>
1.2 rittera 75: #endif
1.1 rittera 76:
1.2 rittera 77: #ifdef __NetBSD__
78: #include <uvm/uvm.h>
79: #include <uvm/uvm_param.h>
80: #include <uvm/uvm_pmap.h>
81: #include <sys/pool.h>
82: #include <sys/reboot.h> /* for AB_VERBOSE */
83: #else
1.1 rittera 84: #include <vm/vm.h>
85: #include <vm/vm_param.h>
86: #include <vm/pmap.h>
87: #include <vm/uma.h>
1.2 rittera 88: #endif
1.1 rittera 89:
90: #include <compat/ndis/pe_var.h>
91: #include <compat/ndis/ntoskrnl_var.h>
92: #include <compat/ndis/hal_var.h>
93: #include <compat/ndis/resource_var.h>
94: #include <compat/ndis/ndis_var.h>
1.3 rittera 95: #ifdef __NetBSD__
96: #include <compat/ndis/nbcompat.h>
97: #endif
1.1 rittera 98:
99: #define __regparm __attribute__((regparm(3)))
100:
1.3 rittera 101: #ifdef __NetBSD__
102: /* Turn on DbgPrint() from Windows Driver*/
103: #define boothowto AB_VERBOSE
104: #endif
105:
1.1 rittera 106: __stdcall static uint8_t RtlEqualUnicodeString(ndis_unicode_string *,
107: ndis_unicode_string *, uint8_t);
108: __stdcall static void RtlCopyUnicodeString(ndis_unicode_string *,
109: ndis_unicode_string *);
110: __stdcall static ndis_status RtlUnicodeStringToAnsiString(ndis_ansi_string *,
111: ndis_unicode_string *, uint8_t);
112: __stdcall static ndis_status RtlAnsiStringToUnicodeString(ndis_unicode_string *,
113: ndis_ansi_string *, uint8_t);
114: __stdcall static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
115: void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
116: __stdcall static irp *IoBuildAsynchronousFsdRequest(uint32_t,
117: device_object *, void *, uint32_t, uint64_t *, io_status_block *);
118: __stdcall static irp *IoBuildDeviceIoControlRequest(uint32_t,
119: device_object *, void *, uint32_t, void *, uint32_t,
120: uint8_t, nt_kevent *, io_status_block *);
121: __stdcall static irp *IoAllocateIrp(uint8_t, uint8_t);
122: __stdcall static void IoReuseIrp(irp *, uint32_t);
123: __stdcall static void IoFreeIrp(irp *);
124: __stdcall static void IoInitializeIrp(irp *, uint16_t, uint8_t);
125: __stdcall static irp *IoMakeAssociatedIrp(irp *, uint8_t);
126: __stdcall static uint32_t KeWaitForMultipleObjects(uint32_t,
127: nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
128: int64_t *, wait_block *);
129: static void ntoskrnl_wakeup(void *);
130: static void ntoskrnl_timercall(void *);
131: static void ntoskrnl_run_dpc(void *);
132: __stdcall static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
133: __stdcall static uint16_t READ_REGISTER_USHORT(uint16_t *);
134: __stdcall static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
135: __stdcall static uint32_t READ_REGISTER_ULONG(uint32_t *);
136: __stdcall static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
137: __stdcall static uint8_t READ_REGISTER_UCHAR(uint8_t *);
138: __stdcall static int64_t _allmul(int64_t, int64_t);
139: __stdcall static int64_t _alldiv(int64_t, int64_t);
140: __stdcall static int64_t _allrem(int64_t, int64_t);
141: __regparm static int64_t _allshr(int64_t, uint8_t);
142: __regparm static int64_t _allshl(int64_t, uint8_t);
143: __stdcall static uint64_t _aullmul(uint64_t, uint64_t);
144: __stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
145: __stdcall static uint64_t _aullrem(uint64_t, uint64_t);
146: __regparm static uint64_t _aullshr(uint64_t, uint8_t);
147: __regparm static uint64_t _aullshl(uint64_t, uint8_t);
148: static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
149: static slist_entry *ntoskrnl_popsl(slist_header *);
150: __stdcall static void ExInitializePagedLookasideList(paged_lookaside_list *,
151: lookaside_alloc_func *, lookaside_free_func *,
152: uint32_t, size_t, uint32_t, uint16_t);
153: __stdcall static void ExDeletePagedLookasideList(paged_lookaside_list *);
154: __stdcall static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
155: lookaside_alloc_func *, lookaside_free_func *,
156: uint32_t, size_t, uint32_t, uint16_t);
157: __stdcall static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
158: __fastcall static slist_entry
159: *InterlockedPushEntrySList(REGARGS2(slist_header *head,
160: slist_entry *entry));
161: __fastcall static slist_entry *InterlockedPopEntrySList(REGARGS1(slist_header
162: *head));
163: __fastcall static slist_entry
164: *ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
165: slist_entry *entry), kspin_lock *lock);
166: __fastcall static slist_entry
167: *ExInterlockedPopEntrySList(REGARGS2(slist_header *head,
168: kspin_lock *lock));
169: __stdcall static uint16_t
170: ExQueryDepthSList(slist_header *);
171: __fastcall static uint32_t
172: InterlockedIncrement(REGARGS1(volatile uint32_t *addend));
173: __fastcall static uint32_t
174: InterlockedDecrement(REGARGS1(volatile uint32_t *addend));
175: __fastcall static void
176: ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t));
177: __stdcall static uint32_t MmSizeOfMdl(void *, size_t);
178: __stdcall static void MmBuildMdlForNonPagedPool(mdl *);
179: __stdcall static void *MmMapLockedPages(mdl *, uint8_t);
180: __stdcall static void *MmMapLockedPagesSpecifyCache(mdl *,
181: uint8_t, uint32_t, void *, uint32_t, uint32_t);
182: __stdcall static void MmUnmapLockedPages(void *, mdl *);
183: __stdcall static size_t RtlCompareMemory(const void *, const void *, size_t);
184: __stdcall static void RtlInitAnsiString(ndis_ansi_string *, char *);
185: __stdcall static void RtlInitUnicodeString(ndis_unicode_string *,
186: uint16_t *);
187: __stdcall static void RtlFreeUnicodeString(ndis_unicode_string *);
188: __stdcall static void RtlFreeAnsiString(ndis_ansi_string *);
189: __stdcall static ndis_status RtlUnicodeStringToInteger(ndis_unicode_string *,
190: uint32_t, uint32_t *);
191: static int atoi (const char *);
192: static long atol (const char *);
193: static int rand(void);
194: static void srand(unsigned int);
195: static void ntoskrnl_time(uint64_t *);
196: __stdcall static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
197: static void ntoskrnl_thrfunc(void *);
198: __stdcall static ndis_status PsCreateSystemThread(ndis_handle *,
199: uint32_t, void *, ndis_handle, void *, void *, void *);
200: __stdcall static ndis_status PsTerminateSystemThread(ndis_status);
201: __stdcall static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
202: uint32_t, void *, uint32_t *);
203: __stdcall static void KeInitializeMutex(kmutant *, uint32_t);
204: __stdcall static uint32_t KeReleaseMutex(kmutant *, uint8_t);
205: __stdcall static uint32_t KeReadStateMutex(kmutant *);
206: __stdcall static ndis_status ObReferenceObjectByHandle(ndis_handle,
207: uint32_t, void *, uint8_t, void **, void **);
208: __fastcall static void ObfDereferenceObject(REGARGS1(void *object));
209: __stdcall static uint32_t ZwClose(ndis_handle);
210: static void *ntoskrnl_memset(void *, int, size_t);
211: static funcptr ntoskrnl_findwrap(funcptr);
212: static uint32_t DbgPrint(char *, ...);
213: __stdcall static void DbgBreakPoint(void);
214: __stdcall static void dummy(void);
215:
1.3 rittera 216: #ifdef __FreeBSD__
1.1 rittera 217: static struct mtx ntoskrnl_dispatchlock;
1.3 rittera 218: #else /* __NetBSD__ */
219: static struct simplelock ntoskrnl_dispatchlock;
220: #define DISPATCH_LOCK() do {s = splnet(); simple_lock(&ntoskrnl_dispatchlock);} while(0)
221: #define DISPATCH_UNLOCK() do {simple_unlock(&ntoskrnl_dispatchlock); splx(s);} while(0)
222: #endif
223:
1.1 rittera 224: static kspin_lock ntoskrnl_global;
225: static kspin_lock ntoskrnl_cancellock;
226: static int ntoskrnl_kth = 0;
227: static struct nt_objref_head ntoskrnl_reflist;
1.2 rittera 228: #ifdef __FreeBSD__
1.1 rittera 229: static uma_zone_t mdl_zone;
1.2 rittera 230: #else
231: static struct pool mdl_pool;
232: #endif
1.1 rittera 233:
234: int
235: ntoskrnl_libinit()
236: {
237: image_patch_table *patch;
1.3 rittera 238: #ifdef __FreeBSD__
1.1 rittera 239: mtx_init(&ntoskrnl_dispatchlock,
240: "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF);
1.3 rittera 241: #else /* __NetBSD__ */
242: simple_lock_init(&ntoskrnl_dispatchlock);
243: #endif
1.1 rittera 244: KeInitializeSpinLock(&ntoskrnl_global);
245: KeInitializeSpinLock(&ntoskrnl_cancellock);
246: TAILQ_INIT(&ntoskrnl_reflist);
247:
248: patch = ntoskrnl_functbl;
249: while (patch->ipt_func != NULL) {
250: windrv_wrap((funcptr)patch->ipt_func,
251: (funcptr *)&patch->ipt_wrap);
252: patch++;
253: }
254:
255: /*
256: * MDLs are supposed to be variable size (they describe
257: * buffers containing some number of pages, but we don't
258: * know ahead of time how many pages that will be). But
259: * always allocating them off the heap is very slow. As
260: * a compromize, we create an MDL UMA zone big enough to
261: * handle any buffer requiring up to 16 pages, and we
262: * use those for any MDLs for buffers of 16 pages or less
263: * in size. For buffers larger than that (which we assume
264: * will be few and far between, we allocate the MDLs off
265: * the heap.
266: */
267:
1.2 rittera 268: #ifdef __FreeBSD__
1.1 rittera 269: mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
270: NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1.2 rittera 271: #else
1.8 ad 272: pool_init(&mdl_pool, MDL_ZONE_SIZE, 0, 0, 0, "winmdl", NULL,
273: IPL_VM);
1.2 rittera 274: #endif
1.1 rittera 275:
276: return(0);
277: }
278:
279: int
280: ntoskrnl_libfini()
281: {
282: image_patch_table *patch;
283:
284: patch = ntoskrnl_functbl;
285: while (patch->ipt_func != NULL) {
286: windrv_unwrap(patch->ipt_wrap);
287: patch++;
288: }
289:
1.2 rittera 290: #ifdef __FreeBSD__
1.1 rittera 291: uma_zdestroy(mdl_zone);
1.12 ad 292: mtx_destroy(&ntoskrnl_dispatchlock);
1.2 rittera 293: #else
294: pool_destroy(&mdl_pool);
1.12 ad 295: /* XXX destroy lock */
1.2 rittera 296: #endif
1.1 rittera 297:
298: return(0);
299: }
300:
301: /*
302: * We need to be able to reference this externally from the wrapper;
303: * GCC only generates a local implementation of memset.
304: */
305: static void *
306: ntoskrnl_memset(buf, ch, size)
307: void *buf;
308: int ch;
309: size_t size;
310: {
311: return(memset(buf, ch, size));
312: }
313:
314: __stdcall static uint8_t
315: RtlEqualUnicodeString(str1, str2, caseinsensitive)
316: ndis_unicode_string *str1;
317: ndis_unicode_string *str2;
318: uint8_t caseinsensitive;
319: {
320: int i;
321:
322: if (str1->us_len != str2->us_len)
323: return(FALSE);
324:
325: for (i = 0; i < str1->us_len; i++) {
326: if (caseinsensitive == TRUE) {
327: if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
328: toupper((char)(str2->us_buf[i] & 0xFF)))
329: return(FALSE);
330: } else {
331: if (str1->us_buf[i] != str2->us_buf[i])
332: return(FALSE);
333: }
334: }
335:
336: return(TRUE);
337: }
338:
339: __stdcall static void
340: RtlCopyUnicodeString(dest, src)
341: ndis_unicode_string *dest;
342: ndis_unicode_string *src;
343: {
344:
345: if (dest->us_maxlen >= src->us_len)
346: dest->us_len = src->us_len;
347: else
348: dest->us_len = dest->us_maxlen;
349: memcpy(dest->us_buf, src->us_buf, dest->us_len);
350: return;
351: }
352:
353: __stdcall static ndis_status
354: RtlUnicodeStringToAnsiString(dest, src, allocate)
355: ndis_ansi_string *dest;
356: ndis_unicode_string *src;
357: uint8_t allocate;
358: {
359: char *astr = NULL;
360:
361: if (dest == NULL || src == NULL)
362: return(NDIS_STATUS_FAILURE);
363:
364: if (allocate == TRUE) {
365: if (ndis_unicode_to_ascii(src->us_buf, src->us_len, &astr))
366: return(NDIS_STATUS_FAILURE);
367: dest->nas_buf = astr;
368: dest->nas_len = dest->nas_maxlen = strlen(astr);
369: } else {
370: dest->nas_len = src->us_len / 2; /* XXX */
371: if (dest->nas_maxlen < dest->nas_len)
372: dest->nas_len = dest->nas_maxlen;
373: ndis_unicode_to_ascii(src->us_buf, dest->nas_len * 2,
374: &dest->nas_buf);
375: }
376: return (NDIS_STATUS_SUCCESS);
377: }
378:
379: __stdcall static ndis_status
380: RtlAnsiStringToUnicodeString(dest, src, allocate)
381: ndis_unicode_string *dest;
382: ndis_ansi_string *src;
383: uint8_t allocate;
384: {
385: uint16_t *ustr = NULL;
386:
387: if (dest == NULL || src == NULL)
388: return(NDIS_STATUS_FAILURE);
389:
390: if (allocate == TRUE) {
391: if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
392: return(NDIS_STATUS_FAILURE);
393: dest->us_buf = ustr;
394: dest->us_len = dest->us_maxlen = strlen(src->nas_buf) * 2;
395: } else {
396: dest->us_len = src->nas_len * 2; /* XXX */
397: if (dest->us_maxlen < dest->us_len)
398: dest->us_len = dest->us_maxlen;
399: ndis_ascii_to_unicode(src->nas_buf, &dest->us_buf);
400: }
401: return (NDIS_STATUS_SUCCESS);
402: }
403:
404: __stdcall void *
1.4 christos 405: ExAllocatePoolWithTag(
1.5 christos 406: uint32_t pooltype,
1.4 christos 407: size_t len,
1.5 christos 408: uint32_t tag)
1.1 rittera 409: {
410: void *buf;
411:
412: buf = malloc(len, M_DEVBUF, M_NOWAIT);
413: if (buf == NULL)
414: return(NULL);
415: return(buf);
416: }
417:
418: __stdcall void
419: ExFreePool(buf)
420: void *buf;
421: {
422: free(buf, M_DEVBUF);
423: return;
424: }
425:
426: __stdcall uint32_t
427: IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
428: driver_object *drv;
429: void *clid;
430: uint32_t extlen;
431: void **ext;
432: {
433: custom_extension *ce;
434:
435: ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
436: + extlen, 0);
437:
438: if (ce == NULL)
439: return(STATUS_INSUFFICIENT_RESOURCES);
440:
441: ce->ce_clid = clid;
442: INSERT_LIST_TAIL((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
443:
444: *ext = (void *)(ce + 1);
445:
446: return(STATUS_SUCCESS);
447: }
448:
449: __stdcall void *
450: IoGetDriverObjectExtension(drv, clid)
451: driver_object *drv;
452: void *clid;
453: {
454: list_entry *e;
455: custom_extension *ce;
456:
1.3 rittera 457: printf("in IoGetDriverObjectExtension\n");
458:
1.1 rittera 459: e = drv->dro_driverext->dre_usrext.nle_flink;
460: while (e != &drv->dro_driverext->dre_usrext) {
461: ce = (custom_extension *)e;
462: if (ce->ce_clid == clid)
1.3 rittera 463: printf("found\n");
1.1 rittera 464: return((void *)(ce + 1));
465: e = e->nle_flink;
466: }
1.3 rittera 467: printf("not found\n");
1.1 rittera 468: return(NULL);
469: }
470:
471:
472: __stdcall uint32_t
1.4 christos 473: IoCreateDevice(
474: driver_object *drv,
475: uint32_t devextlen,
1.5 christos 476: unicode_string *devname,
1.4 christos 477: uint32_t devtype,
478: uint32_t devchars,
1.5 christos 479: uint8_t exclusive,
1.4 christos 480: device_object **newdev)
1.1 rittera 481: {
482: device_object *dev;
1.3 rittera 483:
484: #ifdef NDIS_LKM
485: printf("In IoCreateDevice: drv = %x, devextlen = %x\n", drv, devextlen);
486: #endif
487:
1.1 rittera 488: dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
1.3 rittera 489: #ifdef NDIS_LKM
490: printf("dev = %x\n", dev);
491: #endif
1.1 rittera 492: if (dev == NULL)
493: return(STATUS_INSUFFICIENT_RESOURCES);
494:
495: dev->do_type = devtype;
496: dev->do_drvobj = drv;
497: dev->do_currirp = NULL;
498: dev->do_flags = 0;
499:
500: if (devextlen) {
501: dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
502: devextlen, 0);
503:
504: if (dev->do_devext == NULL) {
505: ExFreePool(dev);
506: return(STATUS_INSUFFICIENT_RESOURCES);
507: }
508:
509: bzero(dev->do_devext, devextlen);
510: } else
511: dev->do_devext = NULL;
512:
513: dev->do_size = sizeof(device_object) + devextlen;
514: dev->do_refcnt = 1;
515: dev->do_attacheddev = NULL;
516: dev->do_nextdev = NULL;
517: dev->do_devtype = devtype;
518: dev->do_stacksize = 1;
519: dev->do_alignreq = 1;
520: dev->do_characteristics = devchars;
521: dev->do_iotimer = NULL;
522: KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
523:
524: /*
525: * Vpd is used for disk/tape devices,
526: * but we don't support those. (Yet.)
527: */
528: dev->do_vpb = NULL;
529:
530: dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
531: sizeof(devobj_extension), 0);
532:
533: if (dev->do_devobj_ext == NULL) {
534: if (dev->do_devext != NULL)
535: ExFreePool(dev->do_devext);
536: ExFreePool(dev);
537: return(STATUS_INSUFFICIENT_RESOURCES);
538: }
539:
540: dev->do_devobj_ext->dve_type = 0;
541: dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
542: dev->do_devobj_ext->dve_devobj = dev;
543:
544: /*
545: * Attach this device to the driver object's list
546: * of devices. Note: this is not the same as attaching
547: * the device to the device stack. The driver's AddDevice
548: * routine must explicitly call IoAddDeviceToDeviceStack()
549: * to do that.
550: */
551:
552: if (drv->dro_devobj == NULL) {
553: drv->dro_devobj = dev;
554: dev->do_nextdev = NULL;
555: } else {
556: dev->do_nextdev = drv->dro_devobj;
557: drv->dro_devobj = dev;
558: }
559:
560: *newdev = dev;
561:
562: return(STATUS_SUCCESS);
563: }
564:
565: __stdcall void
566: IoDeleteDevice(dev)
567: device_object *dev;
568: {
569: device_object *prev;
570:
571: if (dev == NULL)
572: return;
573:
574: if (dev->do_devobj_ext != NULL)
575: ExFreePool(dev->do_devobj_ext);
576:
577: if (dev->do_devext != NULL)
578: ExFreePool(dev->do_devext);
579:
580: /* Unlink the device from the driver's device list. */
581:
582: prev = dev->do_drvobj->dro_devobj;
583: if (prev == dev)
584: dev->do_drvobj->dro_devobj = dev->do_nextdev;
585: else {
586: while (prev->do_nextdev != dev)
587: prev = prev->do_nextdev;
588: prev->do_nextdev = dev->do_nextdev;
589: }
590:
591: ExFreePool(dev);
592:
593: return;
594: }
595:
596: __stdcall device_object *
597: IoGetAttachedDevice(dev)
598: device_object *dev;
599: {
600: device_object *d;
601:
602: if (dev == NULL)
603: return (NULL);
604:
605: d = dev;
606:
607: while (d->do_attacheddev != NULL)
608: d = d->do_attacheddev;
609:
610: return (d);
611: }
612:
613: __stdcall static irp *
614: IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
615: uint32_t func;
616: device_object *dobj;
617: void *buf;
618: uint32_t len;
619: uint64_t *off;
620: nt_kevent *event;
621: io_status_block *status;
622: {
623: irp *ip;
624:
625: ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
626: if (ip == NULL)
627: return(NULL);
628: ip->irp_usrevent = event;
629:
630: return(ip);
631: }
632:
633: __stdcall static irp *
634: IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
635: uint32_t func;
636: device_object *dobj;
637: void *buf;
638: uint32_t len;
639: uint64_t *off;
640: io_status_block *status;
641: {
642: irp *ip;
643: io_stack_location *sl;
644:
645: ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
646: if (ip == NULL)
647: return(NULL);
648:
649: ip->irp_usriostat = status;
650: ip->irp_tail.irp_overlay.irp_thread = NULL;
651:
652: sl = IoGetNextIrpStackLocation(ip);
653: sl->isl_major = func;
654: sl->isl_minor = 0;
655: sl->isl_flags = 0;
656: sl->isl_ctl = 0;
657: sl->isl_devobj = dobj;
658: sl->isl_fileobj = NULL;
659: sl->isl_completionfunc = NULL;
660:
661: ip->irp_userbuf = buf;
662:
663: if (dobj->do_flags & DO_BUFFERED_IO) {
664: ip->irp_assoc.irp_sysbuf =
665: ExAllocatePoolWithTag(NonPagedPool, len, 0);
666: if (ip->irp_assoc.irp_sysbuf == NULL) {
667: IoFreeIrp(ip);
668: return(NULL);
669: }
670: bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
671: }
672:
673: if (dobj->do_flags & DO_DIRECT_IO) {
674: ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
675: if (ip->irp_mdl == NULL) {
676: if (ip->irp_assoc.irp_sysbuf != NULL)
677: ExFreePool(ip->irp_assoc.irp_sysbuf);
678: IoFreeIrp(ip);
679: return(NULL);
680: }
681: ip->irp_userbuf = NULL;
682: ip->irp_assoc.irp_sysbuf = NULL;
683: }
684:
685: if (func == IRP_MJ_READ) {
686: sl->isl_parameters.isl_read.isl_len = len;
687: if (off != NULL)
688: sl->isl_parameters.isl_read.isl_byteoff = *off;
689: else
690: sl->isl_parameters.isl_read.isl_byteoff = 0;
691: }
692:
693: if (func == IRP_MJ_WRITE) {
694: sl->isl_parameters.isl_write.isl_len = len;
695: if (off != NULL)
696: sl->isl_parameters.isl_write.isl_byteoff = *off;
697: else
698: sl->isl_parameters.isl_write.isl_byteoff = 0;
699: }
700:
701: return(ip);
702: }
703:
704: __stdcall static irp *
705: IoBuildDeviceIoControlRequest(iocode, dobj, ibuf, ilen, obuf, olen,
706: isinternal, event, status)
707: uint32_t iocode;
708: device_object *dobj;
709: void *ibuf;
710: uint32_t ilen;
711: void *obuf;
712: uint32_t olen;
713: uint8_t isinternal;
714: nt_kevent *event;
715: io_status_block *status;
716: {
717: irp *ip;
718: io_stack_location *sl;
719: uint32_t buflen;
720:
721: ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
722: if (ip == NULL)
723: return(NULL);
724: ip->irp_usrevent = event;
725: ip->irp_usriostat = status;
726: ip->irp_tail.irp_overlay.irp_thread = NULL;
727:
728: sl = IoGetNextIrpStackLocation(ip);
729: sl->isl_major = isinternal == TRUE ?
730: IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
731: sl->isl_minor = 0;
732: sl->isl_flags = 0;
733: sl->isl_ctl = 0;
734: sl->isl_devobj = dobj;
735: sl->isl_fileobj = NULL;
736: sl->isl_completionfunc = NULL;
737: sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
738: sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
739: sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
740:
741: switch(IO_METHOD(iocode)) {
742: case METHOD_BUFFERED:
743: if (ilen > olen)
744: buflen = ilen;
745: else
746: buflen = olen;
747: if (buflen) {
748: ip->irp_assoc.irp_sysbuf =
749: ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
750: if (ip->irp_assoc.irp_sysbuf == NULL) {
751: IoFreeIrp(ip);
752: return(NULL);
753: }
754: }
755: if (ilen && ibuf != NULL) {
756: bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
757: bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
758: buflen - ilen);
759: } else
760: bzero(ip->irp_assoc.irp_sysbuf, ilen);
761: ip->irp_userbuf = obuf;
762: break;
763: case METHOD_IN_DIRECT:
764: case METHOD_OUT_DIRECT:
765: if (ilen && ibuf != NULL) {
766: ip->irp_assoc.irp_sysbuf =
767: ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
768: if (ip->irp_assoc.irp_sysbuf == NULL) {
769: IoFreeIrp(ip);
770: return(NULL);
771: }
772: bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
773: }
774: if (olen && obuf != NULL) {
775: ip->irp_mdl = IoAllocateMdl(obuf, olen,
776: FALSE, FALSE, ip);
777: /*
778: * Normally we would MmProbeAndLockPages()
779: * here, but we don't have to in our
780: * imlementation.
781: */
782: }
783: break;
784: case METHOD_NEITHER:
785: ip->irp_userbuf = obuf;
786: sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
787: break;
788: default:
789: break;
790: }
791:
792: /*
793: * Ideally, we should associate this IRP with the calling
794: * thread here.
795: */
796:
797: return (ip);
798: }
799:
800: __stdcall static irp *
1.4 christos 801: IoAllocateIrp(
802: uint8_t stsize,
1.5 christos 803: uint8_t chargequota)
1.1 rittera 804: {
805: irp *i;
806:
807: i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
808: if (i == NULL)
809: return (NULL);
810:
811: IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
812:
813: return (i);
814: }
815:
816: __stdcall static irp *
817: IoMakeAssociatedIrp(ip, stsize)
818: irp *ip;
819: uint8_t stsize;
820: {
821: irp *associrp;
1.3 rittera 822: #ifdef __NetBSD__
823: int s;
824: #endif
1.1 rittera 825:
826: associrp = IoAllocateIrp(stsize, FALSE);
827: if (associrp == NULL)
828: return(NULL);
829:
1.3 rittera 830: #ifdef __NetBSD__
831: DISPATCH_LOCK();
832: #else
1.1 rittera 833: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 834: #endif
835:
1.1 rittera 836: associrp->irp_flags |= IRP_ASSOCIATED_IRP;
837: associrp->irp_tail.irp_overlay.irp_thread =
838: ip->irp_tail.irp_overlay.irp_thread;
839: associrp->irp_assoc.irp_master = ip;
1.3 rittera 840:
841: #ifdef __FreeBSD__
1.1 rittera 842: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 843: #else /* __NetBSD__ */
844: DISPATCH_UNLOCK();
845: #endif
1.1 rittera 846:
847: return(associrp);
848: }
849:
850: __stdcall static void
851: IoFreeIrp(ip)
852: irp *ip;
853: {
854: ExFreePool(ip);
855: return;
856: }
857:
858: __stdcall static void
859: IoInitializeIrp(io, psize, ssize)
860: irp *io;
861: uint16_t psize;
862: uint8_t ssize;
863: {
864: bzero((char *)io, IoSizeOfIrp(ssize));
865: io->irp_size = psize;
866: io->irp_stackcnt = ssize;
867: io->irp_currentstackloc = ssize;
868: INIT_LIST_HEAD(&io->irp_thlist);
869: io->irp_tail.irp_overlay.irp_csl =
870: (io_stack_location *)(io + 1) + ssize;
871:
872: return;
873: }
874:
875: __stdcall static void
876: IoReuseIrp(ip, status)
877: irp *ip;
878: uint32_t status;
879: {
880: uint8_t allocflags;
881:
882: allocflags = ip->irp_allocflags;
883: IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
884: ip->irp_iostat.isb_status = status;
885: ip->irp_allocflags = allocflags;
886:
887: return;
888: }
889:
890: __stdcall void
891: IoAcquireCancelSpinLock(irql)
892: uint8_t *irql;
893: {
894: KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
895: return;
896: }
897:
898: __stdcall void
899: IoReleaseCancelSpinLock(irql)
900: uint8_t irql;
901: {
902: KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
903: return;
904: }
905:
906: __stdcall uint8_t
907: IoCancelIrp(irp *ip)
908: {
909: cancel_func cfunc;
910:
911: IoAcquireCancelSpinLock(&ip->irp_cancelirql);
912: cfunc = IoSetCancelRoutine(ip, NULL);
913: ip->irp_cancel = TRUE;
914: if (ip->irp_cancelfunc == NULL) {
915: IoReleaseCancelSpinLock(ip->irp_cancelirql);
916: return(FALSE);
917: }
918: MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
919: return(TRUE);
920: }
921:
922: __fastcall uint32_t
923: IofCallDriver(REGARGS2(device_object *dobj, irp *ip))
924: {
925: driver_object *drvobj;
926: io_stack_location *sl;
927: uint32_t status;
928: driver_dispatch disp;
929:
930: drvobj = dobj->do_drvobj;
931:
932: if (ip->irp_currentstackloc <= 0)
933: panic("IoCallDriver(): out of stack locations");
934:
935: IoSetNextIrpStackLocation(ip);
936: sl = IoGetCurrentIrpStackLocation(ip);
937:
938: sl->isl_devobj = dobj;
939:
940: disp = drvobj->dro_dispatch[sl->isl_major];
941: status = MSCALL2(disp, dobj, ip);
942:
943: return(status);
944: }
945:
946: __fastcall void
947: IofCompleteRequest(REGARGS2(irp *ip, uint8_t prioboost))
948: {
949: uint32_t i;
950: uint32_t status;
951: device_object *dobj;
952: io_stack_location *sl;
953: completion_func cf;
954:
955: ip->irp_pendingreturned =
956: IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
957: sl = (io_stack_location *)(ip + 1);
958:
959: for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
960: if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
961: IoSkipCurrentIrpStackLocation(ip);
962: dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
963: } else
964: dobj = NULL;
965:
966: if (sl[i].isl_completionfunc != NULL &&
967: ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
968: sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
969: (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
970: sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
971: (ip->irp_cancel == TRUE &&
972: sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
973: cf = sl->isl_completionfunc;
974: status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
975: if (status == STATUS_MORE_PROCESSING_REQUIRED)
976: return;
977: }
978:
979: if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
980: SL_PENDING_RETURNED)
981: ip->irp_pendingreturned = TRUE;
982: }
983:
984: /* Handle any associated IRPs. */
985:
986: if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
987: uint32_t masterirpcnt;
988: irp *masterirp;
989: mdl *m;
990:
991: masterirp = ip->irp_assoc.irp_master;
992: masterirpcnt = FASTCALL1(InterlockedDecrement,
993: &masterirp->irp_assoc.irp_irpcnt);
994:
995: while ((m = ip->irp_mdl) != NULL) {
996: ip->irp_mdl = m->mdl_next;
997: IoFreeMdl(m);
998: }
999: IoFreeIrp(ip);
1000: if (masterirpcnt == 0)
1001: IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1002: return;
1003: }
1004:
1005: /* With any luck, these conditions will never arise. */
1006:
1007: if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
1008: if (ip->irp_usriostat != NULL)
1009: *ip->irp_usriostat = ip->irp_iostat;
1010: if (ip->irp_usrevent != NULL)
1011: KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1012: if (ip->irp_flags & IRP_PAGING_IO) {
1013: if (ip->irp_mdl != NULL)
1014: IoFreeMdl(ip->irp_mdl);
1015: IoFreeIrp(ip);
1016: }
1017: }
1018:
1019: return;
1020: }
1021:
1022: __stdcall device_object *
1023: IoAttachDeviceToDeviceStack(src, dst)
1024: device_object *src;
1025: device_object *dst;
1026: {
1027: device_object *attached;
1.3 rittera 1028: #ifdef __NetBSD__
1029: int s;
1030: #endif
1.1 rittera 1031:
1.3 rittera 1032: #ifdef __NetBSD__
1033: DISPATCH_LOCK();
1034: #else
1.1 rittera 1035: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 1036: #endif
1037:
1.1 rittera 1038: attached = IoGetAttachedDevice(dst);
1039: attached->do_attacheddev = src;
1040: src->do_attacheddev = NULL;
1041: src->do_stacksize = attached->do_stacksize + 1;
1.3 rittera 1042:
1043: #ifdef __FreeBSD__
1.1 rittera 1044: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1045: #else /* __NetBSD__ */
1046: DISPATCH_UNLOCK();
1047: #endif
1.1 rittera 1048:
1049: return(attached);
1050: }
1051:
1052: __stdcall void
1053: IoDetachDevice(topdev)
1054: device_object *topdev;
1055: {
1056: device_object *tail;
1.3 rittera 1057: #ifdef __NetBSD__
1058: int s;
1059: #endif
1.1 rittera 1060:
1.3 rittera 1061: #ifdef __NetBSD__
1062: DISPATCH_LOCK();
1063: #else
1.1 rittera 1064: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 1065: #endif
1.1 rittera 1066:
1067: /* First, break the chain. */
1068: tail = topdev->do_attacheddev;
1069: if (tail == NULL) {
1.3 rittera 1070: #ifdef __FreeBSD__
1.1 rittera 1071: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1072: #else /* __NetBSD__ */
1073: DISPATCH_UNLOCK();
1074: #endif
1.1 rittera 1075: return;
1076: }
1077: topdev->do_attacheddev = tail->do_attacheddev;
1078: topdev->do_refcnt--;
1079:
1080: /* Now reduce the stacksize count for the tail objects. */
1081:
1082: tail = topdev->do_attacheddev;
1083: while (tail != NULL) {
1084: tail->do_stacksize--;
1085: tail = tail->do_attacheddev;
1086: }
1087:
1.3 rittera 1088: #ifdef __FreeBSD__
1.1 rittera 1089: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1090: #else /* __NetBSD__ */
1091: DISPATCH_UNLOCK();
1092: #endif
1.1 rittera 1093:
1094: return;
1095: }
1096:
1097: /* Always called with dispatcher lock held. */
1098: static void
1099: ntoskrnl_wakeup(arg)
1100: void *arg;
1101: {
1102: nt_dispatch_header *obj;
1103: wait_block *w;
1104: list_entry *e;
1.2 rittera 1105: #ifdef __FreeBSD__
1.1 rittera 1106: struct thread *td;
1.2 rittera 1107: #endif
1.1 rittera 1108:
1109: obj = arg;
1110:
1111: obj->dh_sigstate = TRUE;
1112: e = obj->dh_waitlisthead.nle_flink;
1113: while (e != &obj->dh_waitlisthead) {
1114: w = (wait_block *)e;
1.3 rittera 1115: /* TODO: is this correct? */
1.2 rittera 1116: #ifdef __FreeBSD__
1.1 rittera 1117: td = w->wb_kthread;
1118: ndis_thresume(td->td_proc);
1.2 rittera 1119: #else
1120: ndis_thresume(curproc);
1121: #endif
1.1 rittera 1122: /*
1123: * For synchronization objects, only wake up
1124: * the first waiter.
1125: */
1126: if (obj->dh_type == EVENT_TYPE_SYNC)
1127: break;
1128: e = e->nle_flink;
1129: }
1130:
1131: return;
1132: }
1133:
1134: static void
1135: ntoskrnl_time(tval)
1136: uint64_t *tval;
1137: {
1138: struct timespec ts;
1.3 rittera 1139: #ifdef __NetBSD__
1140: struct timeval tv;
1141: microtime(&tv);
1142: TIMEVAL_TO_TIMESPEC(&tv,&ts);
1143: #else
1144: nanotime(&ts);
1145: #endif
1.1 rittera 1146:
1147: *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1.2 rittera 1148: (uint64_t)11644473600ULL;
1.1 rittera 1149:
1150: return;
1151: }
1152:
1153: /*
1154: * KeWaitForSingleObject() is a tricky beast, because it can be used
1155: * with several different object types: semaphores, timers, events,
1156: * mutexes and threads. Semaphores don't appear very often, but the
1157: * other object types are quite common. KeWaitForSingleObject() is
1158: * what's normally used to acquire a mutex, and it can be used to
1159: * wait for a thread termination.
1160: *
1161: * The Windows NDIS API is implemented in terms of Windows kernel
1162: * primitives, and some of the object manipulation is duplicated in
1163: * NDIS. For example, NDIS has timers and events, which are actually
1164: * Windows kevents and ktimers. Now, you're supposed to only use the
1165: * NDIS variants of these objects within the confines of the NDIS API,
1166: * but there are some naughty developers out there who will use
1167: * KeWaitForSingleObject() on NDIS timer and event objects, so we
1168: * have to support that as well. Conseqently, our NDIS timer and event
1169: * code has to be closely tied into our ntoskrnl timer and event code,
1170: * just as it is in Windows.
1171: *
1172: * KeWaitForSingleObject() may do different things for different kinds
1173: * of objects:
1174: *
1175: * - For events, we check if the event has been signalled. If the
1176: * event is already in the signalled state, we just return immediately,
1177: * otherwise we wait for it to be set to the signalled state by someone
1178: * else calling KeSetEvent(). Events can be either synchronization or
1179: * notification events.
1180: *
1181: * - For timers, if the timer has already fired and the timer is in
1182: * the signalled state, we just return, otherwise we wait on the
1183: * timer. Unlike an event, timers get signalled automatically when
1184: * they expire rather than someone having to trip them manually.
1185: * Timers initialized with KeInitializeTimer() are always notification
1186: * events: KeInitializeTimerEx() lets you initialize a timer as
1187: * either a notification or synchronization event.
1188: *
1189: * - For mutexes, we try to acquire the mutex and if we can't, we wait
1190: * on the mutex until it's available and then grab it. When a mutex is
1191: * released, it enters the signaled state, which wakes up one of the
1192: * threads waiting to acquire it. Mutexes are always synchronization
1193: * events.
1194: *
1195: * - For threads, the only thing we do is wait until the thread object
1196: * enters a signalled state, which occurs when the thread terminates.
1197: * Threads are always notification events.
1198: *
1199: * A notification event wakes up all threads waiting on an object. A
1200: * synchronization event wakes up just one. Also, a synchronization event
1201: * is auto-clearing, which means we automatically set the event back to
1202: * the non-signalled state once the wakeup is done.
1203: */
1204:
1205: __stdcall uint32_t
1.4 christos 1206: KeWaitForSingleObject(
1207: nt_dispatch_header *obj,
1.5 christos 1208: uint32_t reason,
1209: uint32_t mode,
1210: uint8_t alertable,
1.4 christos 1211: int64_t *duetime)
1.1 rittera 1212: {
1.2 rittera 1213: #ifdef __FreeBSD__
1.1 rittera 1214: struct thread *td = curthread;
1.2 rittera 1215: #endif
1.1 rittera 1216: kmutant *km;
1217: wait_block w;
1218: struct timeval tv;
1219: int error = 0;
1220: uint64_t curtime;
1.3 rittera 1221: #ifdef __NetBSD__
1222: int s;
1223: #endif
1.1 rittera 1224:
1225: if (obj == NULL)
1226: return(STATUS_INVALID_PARAMETER);
1227:
1.3 rittera 1228: #ifdef __NetBSD__
1229: DISPATCH_LOCK();
1230: #else
1.1 rittera 1231: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 1232: #endif
1.1 rittera 1233:
1234: /*
1235: * See if the object is a mutex. If so, and we already own
1236: * it, then just increment the acquisition count and return.
1237: *
1238: * For any other kind of object, see if it's already in the
1239: * signalled state, and if it is, just return. If the object
1240: * is marked as a synchronization event, reset the state to
1241: * unsignalled.
1242: */
1243:
1244: if (obj->dh_size == OTYPE_MUTEX) {
1245: km = (kmutant *)obj;
1246: if (km->km_ownerthread == NULL ||
1.2 rittera 1247: #ifdef __FreeBSD__
1.1 rittera 1248: km->km_ownerthread == curthread->td_proc) {
1.2 rittera 1249: #else
1250: km->km_ownerthread == curproc) {
1251: #endif
1.1 rittera 1252: obj->dh_sigstate = FALSE;
1253: km->km_acquirecnt++;
1.2 rittera 1254: #ifdef __FreeBSD__
1.1 rittera 1255: km->km_ownerthread = curthread->td_proc;
1.2 rittera 1256: #else
1257: km->km_ownerthread = curproc;
1258: #endif
1.3 rittera 1259:
1260: #ifdef __FreeBSD__
1.1 rittera 1261: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1262: #else /* __NetBSD__ */
1263: DISPATCH_UNLOCK();
1264: #endif
1.1 rittera 1265: return (STATUS_SUCCESS);
1266: }
1267: } else if (obj->dh_sigstate == TRUE) {
1268: if (obj->dh_type == EVENT_TYPE_SYNC)
1269: obj->dh_sigstate = FALSE;
1.3 rittera 1270:
1271: #ifdef __FreeBSD__
1.1 rittera 1272: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1273: #else /* __NetBSD__ */
1274: DISPATCH_UNLOCK();
1275: #endif
1.1 rittera 1276: return (STATUS_SUCCESS);
1277: }
1278:
1279: w.wb_object = obj;
1.2 rittera 1280: #ifdef __FreeBSD__
1.1 rittera 1281: w.wb_kthread = td;
1.2 rittera 1282: #endif
1.1 rittera 1283:
1284: INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
1285:
1286: /*
1287: * The timeout value is specified in 100 nanosecond units
1288: * and can be a positive or negative number. If it's positive,
1289: * then the duetime is absolute, and we need to convert it
1290: * to an absolute offset relative to now in order to use it.
1291: * If it's negative, then the duetime is relative and we
1292: * just have to convert the units.
1293: */
1294:
1295: if (duetime != NULL) {
1296: if (*duetime < 0) {
1297: tv.tv_sec = - (*duetime) / 10000000;
1298: tv.tv_usec = (- (*duetime) / 10) -
1299: (tv.tv_sec * 1000000);
1300: } else {
1301: ntoskrnl_time(&curtime);
1302: if (*duetime < curtime)
1303: tv.tv_sec = tv.tv_usec = 0;
1304: else {
1305: tv.tv_sec = ((*duetime) - curtime) / 10000000;
1306: tv.tv_usec = ((*duetime) - curtime) / 10 -
1307: (tv.tv_sec * 1000000);
1308: }
1309: }
1310: }
1311:
1.2 rittera 1312: #ifdef __FreeBSD__
1.1 rittera 1313: error = ndis_thsuspend(td->td_proc, &ntoskrnl_dispatchlock,
1314: duetime == NULL ? 0 : tvtohz(&tv));
1.2 rittera 1315: #else
1316: error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1317: duetime == NULL ? 0 : tvtohz(&tv));
1318: #endif
1.1 rittera 1319:
1320: /* We timed out. Leave the object alone and return status. */
1321:
1322: if (error == EWOULDBLOCK) {
1323: REMOVE_LIST_ENTRY((&w.wb_waitlist));
1.3 rittera 1324: #ifdef __FreeBSD__
1.1 rittera 1325: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1326: #else /* __NetBSD__ */
1327: DISPATCH_UNLOCK();
1328: #endif
1.1 rittera 1329: return(STATUS_TIMEOUT);
1330: }
1331:
1332: /*
1333: * Mutexes are always synchronization objects, which means
1334: * if several threads are waiting to acquire it, only one will
1335: * be woken up. If that one is us, and the mutex is up for grabs,
1336: * grab it.
1337: */
1338:
1339: if (obj->dh_size == OTYPE_MUTEX) {
1340: km = (kmutant *)obj;
1341: if (km->km_ownerthread == NULL) {
1.2 rittera 1342: #ifdef __FreeBSD__
1.1 rittera 1343: km->km_ownerthread = curthread->td_proc;
1.2 rittera 1344: #else
1345: km->km_ownerthread = curproc;
1346: #endif
1.1 rittera 1347: km->km_acquirecnt++;
1348: }
1349: }
1350:
1351: if (obj->dh_type == EVENT_TYPE_SYNC)
1352: obj->dh_sigstate = FALSE;
1353: REMOVE_LIST_ENTRY((&w.wb_waitlist));
1.3 rittera 1354:
1355: #ifdef __FreeBSD__
1.1 rittera 1356: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1357: #else /* __NetBSD__ */
1358: DISPATCH_UNLOCK();
1359: #endif
1.1 rittera 1360:
1361: return(STATUS_SUCCESS);
1362: }
1363:
1364: __stdcall static uint32_t
1.4 christos 1365: KeWaitForMultipleObjects(
1366: uint32_t cnt,
1367: nt_dispatch_header *obj[],
1368: uint32_t wtype,
1.5 christos 1369: uint32_t reason,
1370: uint32_t mode,
1371: uint8_t alertable,
1.4 christos 1372: int64_t *duetime,
1373: wait_block *wb_array)
1.1 rittera 1374: {
1.2 rittera 1375: #ifdef __FreeBSD__
1.1 rittera 1376: struct thread *td = curthread;
1.2 rittera 1377: #endif
1.1 rittera 1378: kmutant *km;
1379: wait_block _wb_array[THREAD_WAIT_OBJECTS];
1380: wait_block *w;
1381: struct timeval tv;
1382: int i, wcnt = 0, widx = 0, error = 0;
1383: uint64_t curtime;
1384: struct timespec t1, t2;
1.3 rittera 1385: #ifdef __NetBSD__
1386: struct timeval tv1,tv2;
1387: int s;
1388: #endif
1389:
1.1 rittera 1390:
1391: if (cnt > MAX_WAIT_OBJECTS)
1392: return(STATUS_INVALID_PARAMETER);
1393: if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1394: return(STATUS_INVALID_PARAMETER);
1395:
1.3 rittera 1396: #ifdef __NetBSD__
1397: DISPATCH_LOCK();
1398: #else
1.1 rittera 1399: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 1400: #endif
1.1 rittera 1401:
1402: if (wb_array == NULL)
1403: w = &_wb_array[0];
1404: else
1405: w = wb_array;
1406:
1407: /* First pass: see if we can satisfy any waits immediately. */
1408:
1409: for (i = 0; i < cnt; i++) {
1410: if (obj[i]->dh_size == OTYPE_MUTEX) {
1411: km = (kmutant *)obj[i];
1412: if (km->km_ownerthread == NULL ||
1.2 rittera 1413: #ifdef __FreeBSD__
1.1 rittera 1414: km->km_ownerthread == curthread->td_proc) {
1.2 rittera 1415: #else
1416: km->km_ownerthread == curproc) {
1417: #endif
1.1 rittera 1418: obj[i]->dh_sigstate = FALSE;
1419: km->km_acquirecnt++;
1.2 rittera 1420: #ifdef __FreeBSD__
1.1 rittera 1421: km->km_ownerthread = curthread->td_proc;
1.2 rittera 1422: #else
1423: km->km_ownerthread = curproc;
1424: #endif
1.1 rittera 1425: if (wtype == WAITTYPE_ANY) {
1.3 rittera 1426: #ifdef __FreeBSD__
1.1 rittera 1427: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1428: #else /* __NetBSD__ */
1429: DISPATCH_UNLOCK();
1430: #endif
1.1 rittera 1431: return (STATUS_WAIT_0 + i);
1432: }
1433: }
1434: } else if (obj[i]->dh_sigstate == TRUE) {
1435: if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1436: obj[i]->dh_sigstate = FALSE;
1437: if (wtype == WAITTYPE_ANY) {
1.3 rittera 1438: #ifdef __FreeBSD__
1.1 rittera 1439: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1440: #else /* __NetBSD__ */
1441: DISPATCH_UNLOCK();
1442: #endif
1.1 rittera 1443: return (STATUS_WAIT_0 + i);
1444: }
1445: }
1446: }
1447:
1448: /*
1449: * Second pass: set up wait for anything we can't
1450: * satisfy immediately.
1451: */
1452:
1453: for (i = 0; i < cnt; i++) {
1454: if (obj[i]->dh_sigstate == TRUE)
1455: continue;
1456: INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
1457: (&w[i].wb_waitlist));
1.2 rittera 1458: #ifdef __FreeBSD__
1.1 rittera 1459: w[i].wb_kthread = td;
1.2 rittera 1460: #endif
1.1 rittera 1461: w[i].wb_object = obj[i];
1462: wcnt++;
1463: }
1464:
1465: if (duetime != NULL) {
1466: if (*duetime < 0) {
1467: tv.tv_sec = - (*duetime) / 10000000;
1468: tv.tv_usec = (- (*duetime) / 10) -
1469: (tv.tv_sec * 1000000);
1470: } else {
1471: ntoskrnl_time(&curtime);
1472: if (*duetime < curtime)
1473: tv.tv_sec = tv.tv_usec = 0;
1474: else {
1475: tv.tv_sec = ((*duetime) - curtime) / 10000000;
1476: tv.tv_usec = ((*duetime) - curtime) / 10 -
1477: (tv.tv_sec * 1000000);
1478: }
1479: }
1480: }
1481:
1482: while (wcnt) {
1.3 rittera 1483: #ifdef __FreeBSD__
1484: nanotime(&t1);
1485: #else
1486: microtime(&tv1);
1487: TIMEVAL_TO_TIMESPEC(&tv1,&t1);
1488: #endif
1.1 rittera 1489:
1.2 rittera 1490: #ifdef __FreeBSD__
1.1 rittera 1491: error = ndis_thsuspend(td->td_proc, &ntoskrnl_dispatchlock,
1492: duetime == NULL ? 0 : tvtohz(&tv));
1.2 rittera 1493: #else
1494: error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1495: duetime == NULL ? 0 : tvtohz(&tv));
1496: #endif
1.3 rittera 1497: #ifdef __FreeBSD__
1498: nanotime(&t2);
1499: #else
1500: microtime(&tv2);
1501: TIMEVAL_TO_TIMESPEC(&tv2,&t2);
1502: #endif
1.1 rittera 1503:
1504: for (i = 0; i < cnt; i++) {
1505: if (obj[i]->dh_size == OTYPE_MUTEX) {
1506: km = (kmutant *)obj;
1507: if (km->km_ownerthread == NULL) {
1508: km->km_ownerthread =
1.2 rittera 1509: #ifdef __FreeBSD__
1.1 rittera 1510: curthread->td_proc;
1.2 rittera 1511: #else
1512: curproc;
1513: #endif
1.1 rittera 1514: km->km_acquirecnt++;
1515: }
1516: }
1517: if (obj[i]->dh_sigstate == TRUE) {
1518: widx = i;
1519: if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1520: obj[i]->dh_sigstate = FALSE;
1521: REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1522: wcnt--;
1523: }
1524: }
1525:
1526: if (error || wtype == WAITTYPE_ANY)
1527: break;
1528:
1529: if (duetime != NULL) {
1530: tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1531: tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1532: }
1533: }
1534:
1535: if (wcnt) {
1536: for (i = 0; i < cnt; i++)
1537: REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1538: }
1539:
1540: if (error == EWOULDBLOCK) {
1.3 rittera 1541: #ifdef __FreeBSD__
1.1 rittera 1542: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1543: #else /* __NetBSD__ */
1544: DISPATCH_UNLOCK();
1545: #endif
1.1 rittera 1546: return(STATUS_TIMEOUT);
1547: }
1548:
1549: if (wtype == WAITTYPE_ANY && wcnt) {
1.3 rittera 1550: #ifdef __FreeBSD__
1.1 rittera 1551: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1552: #else /* __NetBSD__ */
1553: DISPATCH_UNLOCK();
1554: #endif
1.1 rittera 1555: return(STATUS_WAIT_0 + widx);
1556: }
1557:
1.3 rittera 1558: #ifdef __FreeBSD__
1.1 rittera 1559: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 1560: #else /* __NetBSD__ */
1561: DISPATCH_UNLOCK();
1562: #endif
1.1 rittera 1563:
1564: return(STATUS_SUCCESS);
1565: }
1566:
1567: __stdcall static void
1568: WRITE_REGISTER_USHORT(reg, val)
1569: uint16_t *reg;
1570: uint16_t val;
1571: {
1572: bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1573: return;
1574: }
1575:
1576: __stdcall static uint16_t
1577: READ_REGISTER_USHORT(reg)
1578: uint16_t *reg;
1579: {
1580: return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1581: }
1582:
1583: __stdcall static void
1584: WRITE_REGISTER_ULONG(reg, val)
1585: uint32_t *reg;
1586: uint32_t val;
1587: {
1588: bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1589: return;
1590: }
1591:
1592: __stdcall static uint32_t
1593: READ_REGISTER_ULONG(reg)
1594: uint32_t *reg;
1595: {
1596: return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1597: }
1598:
1599: __stdcall static uint8_t
1600: READ_REGISTER_UCHAR(reg)
1601: uint8_t *reg;
1602: {
1603: return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1604: }
1605:
1606: __stdcall static void
1607: WRITE_REGISTER_UCHAR(reg, val)
1608: uint8_t *reg;
1609: uint8_t val;
1610: {
1611: bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1612: return;
1613: }
1614:
1615: __stdcall static int64_t
1616: _allmul(a, b)
1617: int64_t a;
1618: int64_t b;
1619: {
1620: return (a * b);
1621: }
1622:
1623: __stdcall static int64_t
1624: _alldiv(a, b)
1625: int64_t a;
1626: int64_t b;
1627: {
1628: return (a / b);
1629: }
1630:
1631: __stdcall static int64_t
1632: _allrem(a, b)
1633: int64_t a;
1634: int64_t b;
1635: {
1636: return (a % b);
1637: }
1638:
1639: __stdcall static uint64_t
1640: _aullmul(a, b)
1641: uint64_t a;
1642: uint64_t b;
1643: {
1644: return (a * b);
1645: }
1646:
1647: __stdcall static uint64_t
1648: _aulldiv(a, b)
1649: uint64_t a;
1650: uint64_t b;
1651: {
1652: return (a / b);
1653: }
1654:
1655: __stdcall static uint64_t
1656: _aullrem(a, b)
1657: uint64_t a;
1658: uint64_t b;
1659: {
1660: return (a % b);
1661: }
1662:
1663: __regparm static int64_t
1664: _allshl(a, b)
1665: int64_t a;
1666: uint8_t b;
1667: {
1668: return (a << b);
1669: }
1670:
1671: __regparm static uint64_t
1672: _aullshl(a, b)
1673: uint64_t a;
1674: uint8_t b;
1675: {
1676: return (a << b);
1677: }
1678:
1679: __regparm static int64_t
1680: _allshr(a, b)
1681: int64_t a;
1682: uint8_t b;
1683: {
1684: return (a >> b);
1685: }
1686:
1687: __regparm static uint64_t
1688: _aullshr(a, b)
1689: uint64_t a;
1690: uint8_t b;
1691: {
1692: return (a >> b);
1693: }
1694:
1695: static slist_entry *
1696: ntoskrnl_pushsl(head, entry)
1697: slist_header *head;
1698: slist_entry *entry;
1699: {
1700: slist_entry *oldhead;
1701:
1702: oldhead = head->slh_list.slh_next;
1703: entry->sl_next = head->slh_list.slh_next;
1704: head->slh_list.slh_next = entry;
1705: head->slh_list.slh_depth++;
1706: head->slh_list.slh_seq++;
1707:
1708: return(oldhead);
1709: }
1710:
1711: static slist_entry *
1712: ntoskrnl_popsl(head)
1713: slist_header *head;
1714: {
1715: slist_entry *first;
1716:
1717: first = head->slh_list.slh_next;
1718: if (first != NULL) {
1719: head->slh_list.slh_next = first->sl_next;
1720: head->slh_list.slh_depth--;
1721: head->slh_list.slh_seq++;
1722: }
1723:
1724: return(first);
1725: }
1726:
1727: /*
1728: * We need this to make lookaside lists work for amd64.
1729: * We pass a pointer to ExAllocatePoolWithTag() the lookaside
1730: * list structure. For amd64 to work right, this has to be a
1731: * pointer to the wrapped version of the routine, not the
1732: * original. Letting the Windows driver invoke the original
1733: * function directly will result in a convention calling
1734: * mismatch and a pretty crash. On x86, this effectively
1735: * becomes a no-op since ipt_func and ipt_wrap are the same.
1736: */
1737:
1738: static funcptr
1739: ntoskrnl_findwrap(func)
1740: funcptr func;
1741: {
1742: image_patch_table *patch;
1743:
1744: patch = ntoskrnl_functbl;
1745: while (patch->ipt_func != NULL) {
1746: if ((funcptr)patch->ipt_func == func)
1747: return((funcptr)patch->ipt_wrap);
1748: patch++;
1749: }
1750:
1751: return(NULL);
1752: }
1753:
1754: __stdcall static void
1.4 christos 1755: ExInitializePagedLookasideList(
1756: paged_lookaside_list *lookaside,
1757: lookaside_alloc_func *allocfunc,
1758: lookaside_free_func *freefunc,
1.5 christos 1759: uint32_t flags,
1.4 christos 1760: size_t size,
1761: uint32_t tag,
1762: uint16_t depth)
1.1 rittera 1763: {
1764: bzero((char *)lookaside, sizeof(paged_lookaside_list));
1765:
1766: if (size < sizeof(slist_entry))
1767: lookaside->nll_l.gl_size = sizeof(slist_entry);
1768: else
1769: lookaside->nll_l.gl_size = size;
1770: lookaside->nll_l.gl_tag = tag;
1771: if (allocfunc == NULL)
1772: lookaside->nll_l.gl_allocfunc =
1773: ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1774: else
1775: lookaside->nll_l.gl_allocfunc = allocfunc;
1776:
1777: if (freefunc == NULL)
1778: lookaside->nll_l.gl_freefunc =
1779: ntoskrnl_findwrap((funcptr)ExFreePool);
1780: else
1781: lookaside->nll_l.gl_freefunc = freefunc;
1782:
1783: #ifdef __i386__
1784: KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1785: #endif
1786:
1787: lookaside->nll_l.gl_type = NonPagedPool;
1788: lookaside->nll_l.gl_depth = depth;
1789: lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1790:
1791: return;
1792: }
1793:
1794: __stdcall static void
1795: ExDeletePagedLookasideList(lookaside)
1796: paged_lookaside_list *lookaside;
1797: {
1798: void *buf;
1799: __stdcall void (*freefunc)(void *);
1800:
1801: freefunc = lookaside->nll_l.gl_freefunc;
1802: while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1803: MSCALL1(freefunc, buf);
1804:
1805: return;
1806: }
1807:
1808: __stdcall static void
1.4 christos 1809: ExInitializeNPagedLookasideList(
1810: npaged_lookaside_list *lookaside,
1811: lookaside_alloc_func *allocfunc,
1812: lookaside_free_func *freefunc,
1.5 christos 1813: uint32_t flags,
1.4 christos 1814: size_t size,
1815: uint32_t tag,
1816: uint16_t depth)
1.1 rittera 1817: {
1818: bzero((char *)lookaside, sizeof(npaged_lookaside_list));
1819:
1820: if (size < sizeof(slist_entry))
1821: lookaside->nll_l.gl_size = sizeof(slist_entry);
1822: else
1823: lookaside->nll_l.gl_size = size;
1824: lookaside->nll_l.gl_tag = tag;
1825: if (allocfunc == NULL)
1826: lookaside->nll_l.gl_allocfunc =
1827: ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1828: else
1829: lookaside->nll_l.gl_allocfunc = allocfunc;
1830:
1831: if (freefunc == NULL)
1832: lookaside->nll_l.gl_freefunc =
1833: ntoskrnl_findwrap((funcptr)ExFreePool);
1834: else
1835: lookaside->nll_l.gl_freefunc = freefunc;
1836:
1837: #ifdef __i386__
1838: KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1839: #endif
1840:
1841: lookaside->nll_l.gl_type = NonPagedPool;
1842: lookaside->nll_l.gl_depth = depth;
1843: lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1844:
1845: return;
1846: }
1847:
1848: __stdcall static void
1849: ExDeleteNPagedLookasideList(lookaside)
1850: npaged_lookaside_list *lookaside;
1851: {
1852: void *buf;
1853: __stdcall void (*freefunc)(void *);
1854:
1855: freefunc = lookaside->nll_l.gl_freefunc;
1856: while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1857: MSCALL1(freefunc, buf);
1858:
1859: return;
1860: }
1861:
1862: /*
1863: * Note: the interlocked slist push and pop routines are
1864: * declared to be _fastcall in Windows. gcc 3.4 is supposed
1865: * to have support for this calling convention, however we
1866: * don't have that version available yet, so we kludge things
1867: * up using __regparm__(3) and some argument shuffling.
1868: */
1869:
1870: __fastcall static slist_entry *
1871: InterlockedPushEntrySList(REGARGS2(slist_header *head, slist_entry *entry))
1872: {
1873: slist_entry *oldhead;
1874:
1875: oldhead = (slist_entry *)FASTCALL3(ExInterlockedPushEntrySList,
1876: head, entry, &ntoskrnl_global);
1877:
1878: return(oldhead);
1879: }
1880:
1881: __fastcall static slist_entry *
1882: InterlockedPopEntrySList(REGARGS1(slist_header *head))
1883: {
1884: slist_entry *first;
1885:
1886: first = (slist_entry *)FASTCALL2(ExInterlockedPopEntrySList,
1887: head, &ntoskrnl_global);
1888:
1889: return(first);
1890: }
1891:
1892: __fastcall static slist_entry *
1893: ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
1894: slist_entry *entry), kspin_lock *lock)
1895: {
1896: slist_entry *oldhead;
1897: uint8_t irql;
1898:
1899: KeAcquireSpinLock(lock, &irql);
1900: oldhead = ntoskrnl_pushsl(head, entry);
1901: KeReleaseSpinLock(lock, irql);
1902:
1903: return(oldhead);
1904: }
1905:
1906: __fastcall static slist_entry *
1907: ExInterlockedPopEntrySList(REGARGS2(slist_header *head, kspin_lock *lock))
1908: {
1909: slist_entry *first;
1910: uint8_t irql;
1911:
1912: KeAcquireSpinLock(lock, &irql);
1913: first = ntoskrnl_popsl(head);
1914: KeReleaseSpinLock(lock, irql);
1915:
1916: return(first);
1917: }
1918:
1919: __stdcall static uint16_t
1920: ExQueryDepthSList(head)
1921: slist_header *head;
1922: {
1923: uint16_t depth;
1924: uint8_t irql;
1925:
1926: KeAcquireSpinLock(&ntoskrnl_global, &irql);
1927: depth = head->slh_list.slh_depth;
1928: KeReleaseSpinLock(&ntoskrnl_global, irql);
1929:
1930: return(depth);
1931: }
1932:
1.3 rittera 1933: /* TODO: Make sure that LOCKDEBUG isn't defined otherwise a "struct simplelock" will
1934: * TODO: be more than 4 bytes. I'm using a kspin_lock as a simplelock, and the
1935: * TODO: kspin lock is 4 bytes, so this is OK as long as LOCKDEBUG isn't defined.
1936: */
1937:
1.1 rittera 1938: /*
1939: * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1940: * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1941: * to splnet()/splx() in their use. We can't create a new mutex
1942: * lock here because there is no complimentary KeFreeSpinLock()
1943: * function. Instead, we grab a mutex from the mutex pool.
1944: */
1945: __stdcall void
1946: KeInitializeSpinLock(lock)
1947: kspin_lock *lock;
1948: {
1.3 rittera 1949: #ifdef __FreeBSD__
1.1 rittera 1950: *lock = 0;
1.3 rittera 1951: #else /* __NetBSD__ */
1952: simple_lock_init((struct simplelock *)lock);
1953: #endif
1.1 rittera 1954:
1955: return;
1956: }
1957:
1958: #ifdef __i386__
1959: __fastcall void
1960: KefAcquireSpinLockAtDpcLevel(REGARGS1(kspin_lock *lock))
1961: {
1.3 rittera 1962: #ifdef __FreeBSD__
1.1 rittera 1963: while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
1964: /* sit and spin */;
1.3 rittera 1965: #else /* __NetBSD__ */
1966: simple_lock((struct simplelock *)lock);
1967: #endif
1.1 rittera 1968:
1969: return;
1970: }
1971:
1972: __fastcall void
1973: KefReleaseSpinLockFromDpcLevel(REGARGS1(kspin_lock *lock))
1974: {
1.3 rittera 1975: #ifdef __FreeBSD__
1.1 rittera 1976: atomic_store_rel_int((volatile u_int *)lock, 0);
1.3 rittera 1977: #else /* __NetBSD__ */
1978: simple_unlock((struct simplelock *)lock);
1979: #endif
1.1 rittera 1980: return;
1981: }
1982:
1983: __stdcall uint8_t
1984: KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
1985: {
1986: uint8_t oldirql;
1987:
1988: if (KeGetCurrentIrql() > DISPATCH_LEVEL)
1989: panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
1990:
1991: oldirql = KeRaiseIrql(DISPATCH_LEVEL);
1992: KeAcquireSpinLockAtDpcLevel(lock);
1993:
1994: return(oldirql);
1995: }
1996: #else
1997: __stdcall void
1998: KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
1999: {
1.11 ad 2000: while (atomic_swap_uint((volatile u_int *)lock, 1) == 1)
1.1 rittera 2001: /* sit and spin */;
2002:
2003: return;
2004: }
2005:
2006: __stdcall void
2007: KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2008: {
1.11 ad 2009: *(volatile u_int *)lock = 0;
1.1 rittera 2010:
2011: return;
2012: }
2013: #endif /* __i386__ */
2014:
2015: __fastcall uintptr_t
2016: InterlockedExchange(REGARGS2(volatile uint32_t *dst, uintptr_t val))
2017: {
2018: uint8_t irql;
2019: uintptr_t r;
2020:
2021: KeAcquireSpinLock(&ntoskrnl_global, &irql);
2022: r = *dst;
2023: *dst = val;
2024: KeReleaseSpinLock(&ntoskrnl_global, irql);
2025:
2026: return(r);
2027: }
2028:
2029: __fastcall static uint32_t
2030: InterlockedIncrement(REGARGS1(volatile uint32_t *addend))
2031: {
1.11 ad 2032: atomic_inc_32(addend);
1.1 rittera 2033: return(*addend);
2034: }
2035:
2036: __fastcall static uint32_t
2037: InterlockedDecrement(REGARGS1(volatile uint32_t *addend))
2038: {
1.11 ad 2039: atomic_dec_32(addend);
1.1 rittera 2040: return(*addend);
2041: }
2042:
2043: __fastcall static void
2044: ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t inc))
2045: {
2046: uint8_t irql;
2047:
2048: KeAcquireSpinLock(&ntoskrnl_global, &irql);
2049: *addend += inc;
2050: KeReleaseSpinLock(&ntoskrnl_global, irql);
2051:
2052: return;
2053: };
2054:
2055: __stdcall mdl *
1.4 christos 2056: IoAllocateMdl(
2057: void *vaddr,
2058: uint32_t len,
2059: uint8_t secondarybuf,
1.5 christos 2060: uint8_t chargequota,
1.4 christos 2061: irp *iopkt)
1.1 rittera 2062: {
2063: mdl *m;
2064: int zone = 0;
2065:
2066: if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2067: m = ExAllocatePoolWithTag(NonPagedPool,
2068: MmSizeOfMdl(vaddr, len), 0);
2069: else {
1.2 rittera 2070: #ifdef __FreeBSD__
1.1 rittera 2071: m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
1.2 rittera 2072: #else
2073: m = pool_get(&mdl_pool, PR_WAITOK);
2074: #endif
1.1 rittera 2075: zone++;
2076: }
2077:
2078: if (m == NULL)
2079: return (NULL);
2080:
2081: MmInitializeMdl(m, vaddr, len);
2082:
2083: /*
2084: * MmInitializMdl() clears the flags field, so we
2085: * have to set this here. If the MDL came from the
2086: * MDL UMA zone, tag it so we can release it to
2087: * the right place later.
2088: */
2089: if (zone)
2090: m->mdl_flags = MDL_ZONE_ALLOCED;
2091:
2092: if (iopkt != NULL) {
2093: if (secondarybuf == TRUE) {
2094: mdl *last;
2095: last = iopkt->irp_mdl;
2096: while (last->mdl_next != NULL)
2097: last = last->mdl_next;
2098: last->mdl_next = m;
2099: } else {
2100: if (iopkt->irp_mdl != NULL)
2101: panic("leaking an MDL in IoAllocateMdl()");
2102: iopkt->irp_mdl = m;
2103: }
2104: }
2105:
2106: return (m);
2107: }
2108:
2109: __stdcall void
2110: IoFreeMdl(m)
2111: mdl *m;
2112: {
2113: if (m == NULL)
2114: return;
2115:
2116: if (m->mdl_flags & MDL_ZONE_ALLOCED)
1.2 rittera 2117: #ifdef __FreeBSD__
1.1 rittera 2118: uma_zfree(mdl_zone, m);
1.2 rittera 2119: #else
2120: pool_put(&mdl_pool, m);
2121: #endif
1.1 rittera 2122: else
2123: ExFreePool(m);
2124:
2125: return;
2126: }
2127:
2128: __stdcall static uint32_t
2129: MmSizeOfMdl(vaddr, len)
2130: void *vaddr;
2131: size_t len;
2132: {
2133: uint32_t l;
2134:
2135: l = sizeof(struct mdl) +
2136: (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2137:
2138: return(l);
2139: }
2140:
2141: /*
2142: * The Microsoft documentation says this routine fills in the
2143: * page array of an MDL with the _physical_ page addresses that
2144: * comprise the buffer, but we don't really want to do that here.
2145: * Instead, we just fill in the page array with the kernel virtual
2146: * addresses of the buffers.
2147: */
2148: __stdcall static void
2149: MmBuildMdlForNonPagedPool(m)
2150: mdl *m;
2151: {
2152: vm_offset_t *mdl_pages;
2153: int pagecnt, i;
2154:
2155: pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2156:
2157: if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2158: panic("not enough pages in MDL to describe buffer");
2159:
2160: mdl_pages = MmGetMdlPfnArray(m);
2161:
2162: for (i = 0; i < pagecnt; i++)
2163: *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2164:
2165: m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2166: m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2167:
2168: return;
2169: }
2170:
2171: __stdcall static void *
1.4 christos 2172: MmMapLockedPages(
2173: mdl *buf,
1.5 christos 2174: uint8_t accessmode)
1.1 rittera 2175: {
2176: buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2177: return(MmGetMdlVirtualAddress(buf));
2178: }
2179:
2180: __stdcall static void *
1.4 christos 2181: MmMapLockedPagesSpecifyCache(
2182: mdl *buf,
2183: uint8_t accessmode,
1.5 christos 2184: uint32_t cachetype,
2185: void *vaddr,
2186: uint32_t bugcheck,
2187: uint32_t prio)
1.1 rittera 2188: {
2189: return(MmMapLockedPages(buf, accessmode));
2190: }
2191:
2192: __stdcall static void
1.4 christos 2193: MmUnmapLockedPages(
1.5 christos 2194: void *vaddr,
1.4 christos 2195: mdl *buf)
1.1 rittera 2196: {
2197: buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2198: return;
2199: }
2200:
2201: __stdcall static size_t
2202: RtlCompareMemory(s1, s2, len)
2203: const void *s1;
2204: const void *s2;
2205: size_t len;
2206: {
2207: size_t i, total = 0;
2208: uint8_t *m1, *m2;
2209:
2210: m1 = __DECONST(char *, s1);
2211: m2 = __DECONST(char *, s2);
2212:
2213: for (i = 0; i < len; i++) {
2214: if (m1[i] == m2[i])
2215: total++;
2216: }
2217: return(total);
2218: }
2219:
2220: __stdcall static void
2221: RtlInitAnsiString(dst, src)
2222: ndis_ansi_string *dst;
2223: char *src;
2224: {
2225: ndis_ansi_string *a;
2226:
2227: a = dst;
2228: if (a == NULL)
2229: return;
2230: if (src == NULL) {
2231: a->nas_len = a->nas_maxlen = 0;
2232: a->nas_buf = NULL;
2233: } else {
2234: a->nas_buf = src;
2235: a->nas_len = a->nas_maxlen = strlen(src);
2236: }
2237:
2238: return;
2239: }
2240:
2241: __stdcall static void
2242: RtlInitUnicodeString(dst, src)
2243: ndis_unicode_string *dst;
2244: uint16_t *src;
2245: {
2246: ndis_unicode_string *u;
2247: int i;
2248:
2249: u = dst;
2250: if (u == NULL)
2251: return;
2252: if (src == NULL) {
2253: u->us_len = u->us_maxlen = 0;
2254: u->us_buf = NULL;
2255: } else {
2256: i = 0;
2257: while(src[i] != 0)
2258: i++;
2259: u->us_buf = src;
2260: u->us_len = u->us_maxlen = i * 2;
2261: }
2262:
2263: return;
2264: }
2265:
2266: __stdcall ndis_status
2267: RtlUnicodeStringToInteger(ustr, base, val)
2268: ndis_unicode_string *ustr;
2269: uint32_t base;
2270: uint32_t *val;
2271: {
2272: uint16_t *uchr;
2273: int len, neg = 0;
2274: char abuf[64];
2275: char *astr;
2276:
2277: uchr = ustr->us_buf;
2278: len = ustr->us_len;
2279: bzero(abuf, sizeof(abuf));
2280:
2281: if ((char)((*uchr) & 0xFF) == '-') {
2282: neg = 1;
2283: uchr++;
2284: len -= 2;
2285: } else if ((char)((*uchr) & 0xFF) == '+') {
2286: neg = 0;
2287: uchr++;
2288: len -= 2;
2289: }
2290:
2291: if (base == 0) {
2292: if ((char)((*uchr) & 0xFF) == 'b') {
2293: base = 2;
2294: uchr++;
2295: len -= 2;
2296: } else if ((char)((*uchr) & 0xFF) == 'o') {
2297: base = 8;
2298: uchr++;
2299: len -= 2;
2300: } else if ((char)((*uchr) & 0xFF) == 'x') {
2301: base = 16;
2302: uchr++;
2303: len -= 2;
2304: } else
2305: base = 10;
2306: }
2307:
2308: astr = abuf;
2309: if (neg) {
2310: strcpy(astr, "-");
2311: astr++;
2312: }
2313:
2314: ndis_unicode_to_ascii(uchr, len, &astr);
2315: *val = strtoul(abuf, NULL, base);
2316:
2317: return(NDIS_STATUS_SUCCESS);
2318: }
2319:
2320: __stdcall static void
2321: RtlFreeUnicodeString(ustr)
2322: ndis_unicode_string *ustr;
2323: {
2324: if (ustr->us_buf == NULL)
2325: return;
2326: free(ustr->us_buf, M_DEVBUF);
2327: ustr->us_buf = NULL;
2328: return;
2329: }
2330:
2331: __stdcall static void
2332: RtlFreeAnsiString(astr)
2333: ndis_ansi_string *astr;
2334: {
2335: if (astr->nas_buf == NULL)
2336: return;
2337: free(astr->nas_buf, M_DEVBUF);
2338: astr->nas_buf = NULL;
2339: return;
2340: }
2341:
2342: static int
2343: atoi(str)
2344: const char *str;
2345: {
1.3 rittera 2346: #ifdef __FreeBSD__
1.1 rittera 2347: return (int)strtol(str, (char **)NULL, 10);
1.3 rittera 2348: #else
2349: int n;
2350:
2351: for (n = 0; *str && *str >= '0' && *str <= '9'; str++)
2352: n = n * 10 + *str - '0';
2353: return n;
2354: #endif
2355:
1.1 rittera 2356: }
2357:
2358: static long
2359: atol(str)
2360: const char *str;
2361: {
1.3 rittera 2362: #ifdef __FreeBSD__
1.1 rittera 2363: return strtol(str, (char **)NULL, 10);
1.3 rittera 2364: #else
2365: long n;
2366:
2367: for (n = 0; *str && *str >= '0' && *str <= '9'; str++)
2368: n = n * 10 + *str - '0';
2369: return n;
2370: #endif
2371:
1.1 rittera 2372: }
2373:
1.3 rittera 2374:
2375: /*
2376: * stolen from ./netipsec/key.c
2377: */
2378:
2379: #ifdef __NetBSD__
1.4 christos 2380: void srandom(int);
1.5 christos 2381: void srandom(int arg) {return;}
1.3 rittera 2382: #endif
2383:
2384:
1.1 rittera 2385: static int
2386: rand(void)
2387: {
2388: struct timeval tv;
2389:
2390: microtime(&tv);
2391: srandom(tv.tv_usec);
2392: return((int)random());
2393: }
2394:
2395: static void
2396: srand(seed)
2397: unsigned int seed;
2398: {
2399: srandom(seed);
2400: return;
2401: }
2402:
2403: __stdcall static uint8_t
2404: IoIsWdmVersionAvailable(major, minor)
2405: uint8_t major;
2406: uint8_t minor;
2407: {
2408: if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
2409: return(TRUE);
2410: return(FALSE);
2411: }
2412:
2413: __stdcall static ndis_status
1.4 christos 2414: IoGetDeviceProperty(
2415: device_object *devobj,
2416: uint32_t regprop,
1.5 christos 2417: uint32_t buflen,
1.4 christos 2418: void *prop,
2419: uint32_t *reslen)
1.1 rittera 2420: {
2421: driver_object *drv;
2422: uint16_t **name;
2423:
2424: drv = devobj->do_drvobj;
2425:
2426: switch (regprop) {
2427: case DEVPROP_DRIVER_KEYNAME:
2428: name = prop;
2429: *name = drv->dro_drivername.us_buf;
2430: *reslen = drv->dro_drivername.us_len;
2431: break;
2432: default:
2433: return(STATUS_INVALID_PARAMETER_2);
2434: break;
2435: }
2436:
2437: return(STATUS_SUCCESS);
2438: }
2439:
2440: __stdcall static void
1.4 christos 2441: KeInitializeMutex(
2442: kmutant *kmutex,
1.5 christos 2443: uint32_t level)
1.1 rittera 2444: {
2445: INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
2446: kmutex->km_abandoned = FALSE;
2447: kmutex->km_apcdisable = 1;
2448: kmutex->km_header.dh_sigstate = TRUE;
2449: kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
2450: kmutex->km_header.dh_size = OTYPE_MUTEX;
2451: kmutex->km_acquirecnt = 0;
2452: kmutex->km_ownerthread = NULL;
2453: return;
2454: }
2455:
2456: __stdcall static uint32_t
1.4 christos 2457: KeReleaseMutex(
2458: kmutant *kmutex,
1.5 christos 2459: uint8_t kwait)
1.1 rittera 2460: {
1.3 rittera 2461: #ifdef __NetBSD__
2462: int s;
2463: #endif
2464:
2465: #ifdef __NetBSD__
2466: DISPATCH_LOCK();
2467: #else
1.1 rittera 2468: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 2469: #endif
2470:
1.2 rittera 2471: #ifdef __FreeBSD__
1.1 rittera 2472: if (kmutex->km_ownerthread != curthread->td_proc) {
1.2 rittera 2473: #else
2474: if (kmutex->km_ownerthread != curproc) {
2475: #endif
1.3 rittera 2476: #ifdef __FreeBSD__
1.1 rittera 2477: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 2478: #else /* __NetBSD__ */
2479: DISPATCH_UNLOCK();
2480: #endif
1.1 rittera 2481: return(STATUS_MUTANT_NOT_OWNED);
2482: }
2483: kmutex->km_acquirecnt--;
2484: if (kmutex->km_acquirecnt == 0) {
2485: kmutex->km_ownerthread = NULL;
2486: ntoskrnl_wakeup(&kmutex->km_header);
2487: }
1.3 rittera 2488:
2489: #ifdef __FreeBSD__
1.1 rittera 2490: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 2491: #else /* __NetBSD__ */
2492: DISPATCH_UNLOCK();
2493: #endif
1.1 rittera 2494:
2495: return(kmutex->km_acquirecnt);
2496: }
2497:
2498: __stdcall static uint32_t
2499: KeReadStateMutex(kmutex)
2500: kmutant *kmutex;
2501: {
2502: return(kmutex->km_header.dh_sigstate);
2503: }
2504:
2505: __stdcall void
2506: KeInitializeEvent(kevent, type, state)
2507: nt_kevent *kevent;
2508: uint32_t type;
2509: uint8_t state;
2510: {
2511: INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
2512: kevent->k_header.dh_sigstate = state;
2513: kevent->k_header.dh_type = type;
2514: kevent->k_header.dh_size = OTYPE_EVENT;
2515: return;
2516: }
2517:
2518: __stdcall uint32_t
2519: KeResetEvent(kevent)
2520: nt_kevent *kevent;
2521: {
2522: uint32_t prevstate;
1.3 rittera 2523: #ifdef __NetBSD__
2524: int s;
2525: #endif
1.1 rittera 2526:
1.3 rittera 2527: #ifdef __NetBSD__
2528: DISPATCH_LOCK();
2529: #else
1.1 rittera 2530: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 2531: #endif
2532:
1.1 rittera 2533: prevstate = kevent->k_header.dh_sigstate;
2534: kevent->k_header.dh_sigstate = FALSE;
1.3 rittera 2535:
2536: #ifdef __FreeBSD__
1.1 rittera 2537: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 2538: #else /* __NetBSD__ */
2539: DISPATCH_UNLOCK();
2540: #endif
1.1 rittera 2541:
2542: return(prevstate);
2543: }
2544:
2545: __stdcall uint32_t
1.4 christos 2546: KeSetEvent(
2547: nt_kevent *kevent,
1.5 christos 2548: uint32_t increment,
2549: uint8_t kwait)
1.1 rittera 2550: {
2551: uint32_t prevstate;
1.3 rittera 2552: #ifdef __NetBSD__
2553: int s;
2554: #endif
1.1 rittera 2555:
1.3 rittera 2556: #ifdef __NetBSD__
2557: DISPATCH_LOCK();
2558: #else
1.1 rittera 2559: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 2560: #endif
2561:
1.1 rittera 2562: prevstate = kevent->k_header.dh_sigstate;
2563: ntoskrnl_wakeup(&kevent->k_header);
1.3 rittera 2564:
2565: #ifdef __FreeBSD__
1.1 rittera 2566: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 2567: #else /* __NetBSD__ */
2568: DISPATCH_UNLOCK();
2569: #endif
1.1 rittera 2570:
2571: return(prevstate);
2572: }
2573:
2574: __stdcall void
2575: KeClearEvent(kevent)
2576: nt_kevent *kevent;
2577: {
2578: kevent->k_header.dh_sigstate = FALSE;
2579: return;
2580: }
2581:
2582: __stdcall uint32_t
2583: KeReadStateEvent(kevent)
2584: nt_kevent *kevent;
2585: {
2586: return(kevent->k_header.dh_sigstate);
2587: }
2588:
2589: __stdcall static ndis_status
1.4 christos 2590: ObReferenceObjectByHandle(
2591: ndis_handle handle,
1.5 christos 2592: uint32_t reqaccess,
2593: void *otype,
2594: uint8_t accessmode,
1.4 christos 2595: void **object,
1.5 christos 2596: void **handleinfo)
1.1 rittera 2597: {
2598: nt_objref *nr;
2599:
2600: nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
2601: if (nr == NULL)
2602: return(NDIS_STATUS_FAILURE);
2603:
2604: INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
2605: nr->no_obj = handle;
2606: nr->no_dh.dh_size = OTYPE_THREAD;
2607: TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
2608: *object = nr;
2609:
2610: return(NDIS_STATUS_SUCCESS);
2611: }
2612:
2613: __fastcall static void
2614: ObfDereferenceObject(REGARGS1(void *object))
2615: {
2616: nt_objref *nr;
2617:
2618: nr = object;
2619: TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
2620: free(nr, M_DEVBUF);
2621:
2622: return;
2623: }
2624:
2625: __stdcall static uint32_t
1.5 christos 2626: ZwClose(ndis_handle handle)
1.1 rittera 2627: {
2628: return(STATUS_SUCCESS);
2629: }
2630:
2631: /*
2632: * This is here just in case the thread returns without calling
2633: * PsTerminateSystemThread().
2634: */
2635: static void
2636: ntoskrnl_thrfunc(arg)
2637: void *arg;
2638: {
2639: thread_context *thrctx;
2640: __stdcall uint32_t (*tfunc)(void *);
2641: void *tctx;
2642: uint32_t rval;
2643:
2644: thrctx = arg;
2645: tfunc = thrctx->tc_thrfunc;
2646: tctx = thrctx->tc_thrctx;
2647: free(thrctx, M_TEMP);
2648:
2649: rval = MSCALL1(tfunc, tctx);
2650:
2651: PsTerminateSystemThread(rval);
2652: return; /* notreached */
2653: }
2654:
2655: __stdcall static ndis_status
1.4 christos 2656: PsCreateSystemThread(
2657: ndis_handle *handle,
1.5 christos 2658: uint32_t reqaccess,
2659: void *objattrs,
2660: ndis_handle phandle,
2661: void *clientid,
1.4 christos 2662: void *thrfunc,
2663: void *thrctx)
1.1 rittera 2664: {
2665: int error;
2666: char tname[128];
2667: thread_context *tc;
2668: struct proc *p;
2669:
2670: tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
2671: if (tc == NULL)
2672: return(NDIS_STATUS_FAILURE);
2673:
2674: tc->tc_thrctx = thrctx;
2675: tc->tc_thrfunc = thrfunc;
2676:
2677: sprintf(tname, "windows kthread %d", ntoskrnl_kth);
1.2 rittera 2678: #ifdef __FreeBSD__
1.1 rittera 2679: error = kthread_create(ntoskrnl_thrfunc, tc, &p,
2680: RFHIGHPID, NDIS_KSTACK_PAGES, tname);
1.2 rittera 2681: #else
1.3 rittera 2682: /* TODO: Provide a larger stack for these threads (NDIS_KSTACK_PAGES) */
2683: error = ndis_kthread_create(ntoskrnl_thrfunc, tc, &p, NULL, 0, tname);
1.2 rittera 2684: #endif
1.1 rittera 2685: *handle = p;
2686:
2687: ntoskrnl_kth++;
2688:
2689: return(error);
2690: }
2691:
2692: /*
2693: * In Windows, the exit of a thread is an event that you're allowed
2694: * to wait on, assuming you've obtained a reference to the thread using
2695: * ObReferenceObjectByHandle(). Unfortunately, the only way we can
2696: * simulate this behavior is to register each thread we create in a
2697: * reference list, and if someone holds a reference to us, we poke
2698: * them.
2699: */
2700: __stdcall static ndis_status
1.5 christos 2701: PsTerminateSystemThread(ndis_status status)
1.1 rittera 2702: {
2703: struct nt_objref *nr;
1.3 rittera 2704: #ifdef __NetBSD__
2705: int s;
2706: #endif
1.1 rittera 2707:
1.3 rittera 2708: #ifdef __NetBSD__
2709: DISPATCH_LOCK();
2710: #else
1.1 rittera 2711: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 2712: #endif
2713:
1.1 rittera 2714: TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
1.2 rittera 2715: #ifdef __FreeBSD__
1.1 rittera 2716: if (nr->no_obj != curthread->td_proc)
1.2 rittera 2717: #else
2718: if (nr->no_obj != curproc)
2719: #endif
1.1 rittera 2720: continue;
2721: ntoskrnl_wakeup(&nr->no_dh);
2722: break;
2723: }
1.3 rittera 2724:
2725: #ifdef __FreeBSD__
1.1 rittera 2726: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 2727: #else /* __NetBSD__ */
2728: DISPATCH_UNLOCK();
2729: #endif
1.1 rittera 2730:
2731: ntoskrnl_kth--;
2732:
1.2 rittera 2733: #ifdef __FreeBSD__
1.1 rittera 2734: #if __FreeBSD_version < 502113
2735: mtx_lock(&Giant);
2736: #endif
1.2 rittera 2737: #endif /* __FreeBSD__ */
1.1 rittera 2738: kthread_exit(0);
2739: return(0); /* notreached */
2740: }
2741:
2742: static uint32_t
1.5 christos 2743: DbgPrint(char *fmt, ...)
1.1 rittera 2744: {
1.3 rittera 2745: //va_list ap;
1.1 rittera 2746:
2747: if (bootverbose) {
1.3 rittera 2748: //va_start(ap, fmt);
2749: //vprintf(fmt, ap);
1.1 rittera 2750: }
2751:
2752: return(STATUS_SUCCESS);
2753: }
2754:
2755: __stdcall static void
2756: DbgBreakPoint(void)
2757: {
1.3 rittera 2758: #if defined(__FreeBSD__) && __FreeBSD_version < 502113
1.1 rittera 2759: Debugger("DbgBreakPoint(): breakpoint");
1.3 rittera 2760: #elif defined(__FreeBSD__) && __FreeBSD_version >= 502113
1.1 rittera 2761: kdb_enter("DbgBreakPoint(): breakpoint");
1.6 christos 2762: #else /* NetBSD case */
1.3 rittera 2763: ; /* TODO Search how to go into debugger without panic */
1.1 rittera 2764: #endif
2765: }
2766:
2767: static void
2768: ntoskrnl_timercall(arg)
2769: void *arg;
2770: {
2771: ktimer *timer;
2772: struct timeval tv;
1.3 rittera 2773: #ifdef __NetBSD__
2774: int s;
2775: #endif
1.1 rittera 2776:
1.2 rittera 2777: #ifdef __FreeBSD__
1.1 rittera 2778: mtx_unlock(&Giant);
1.2 rittera 2779: #endif
1.1 rittera 2780:
1.3 rittera 2781: #ifdef __NetBSD__
2782: DISPATCH_LOCK();
2783: #else
1.1 rittera 2784: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 2785: #endif
1.1 rittera 2786:
2787: timer = arg;
2788:
2789: timer->k_header.dh_inserted = FALSE;
2790:
2791: /*
2792: * If this is a periodic timer, re-arm it
2793: * so it will fire again. We do this before
2794: * calling any deferred procedure calls because
2795: * it's possible the DPC might cancel the timer,
2796: * in which case it would be wrong for us to
2797: * re-arm it again afterwards.
2798: */
2799:
2800: if (timer->k_period) {
2801: tv.tv_sec = 0;
2802: tv.tv_usec = timer->k_period * 1000;
2803: timer->k_header.dh_inserted = TRUE;
1.2 rittera 2804: #ifdef __FreeBSD__
1.1 rittera 2805: timer->k_handle = timeout(ntoskrnl_timercall,
2806: timer, tvtohz(&tv));
1.3 rittera 2807: #else /* __NetBSD__ */
2808: callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
2809: #endif /* __NetBSD__ */
1.1 rittera 2810: }
2811:
2812: if (timer->k_dpc != NULL)
2813: KeInsertQueueDpc(timer->k_dpc, NULL, NULL);
2814:
2815: ntoskrnl_wakeup(&timer->k_header);
1.3 rittera 2816:
2817: #ifdef __FreeBSD__
1.1 rittera 2818: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 2819: #else /* __NetBSD__ */
2820: DISPATCH_UNLOCK();
2821: #endif
1.1 rittera 2822:
1.2 rittera 2823: #ifdef __FreeBSD__
1.1 rittera 2824: mtx_lock(&Giant);
1.2 rittera 2825: #endif
1.1 rittera 2826:
2827: return;
2828: }
2829:
2830: __stdcall void
2831: KeInitializeTimer(timer)
2832: ktimer *timer;
2833: {
2834: if (timer == NULL)
2835: return;
2836:
2837: KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY);
2838:
2839: return;
2840: }
2841:
2842: __stdcall void
2843: KeInitializeTimerEx(timer, type)
2844: ktimer *timer;
2845: uint32_t type;
2846: {
2847: if (timer == NULL)
2848: return;
2849:
2850: INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
2851: timer->k_header.dh_sigstate = FALSE;
2852: timer->k_header.dh_inserted = FALSE;
2853: timer->k_header.dh_type = type;
2854: timer->k_header.dh_size = OTYPE_TIMER;
1.2 rittera 2855: #ifdef __FreeBSD__
1.1 rittera 2856: callout_handle_init(&timer->k_handle);
1.2 rittera 2857: #else
1.9 ad 2858: callout_init(timer->k_handle, 0);
1.2 rittera 2859: #endif
1.1 rittera 2860:
2861: return;
2862: }
2863:
2864: /*
2865: * This is a wrapper for Windows deferred procedure calls that
2866: * have been placed on an NDIS thread work queue. We need it
2867: * since the DPC could be a _stdcall function. Also, as far as
2868: * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
2869: */
2870: static void
2871: ntoskrnl_run_dpc(arg)
2872: void *arg;
2873: {
2874: __stdcall kdpc_func dpcfunc;
2875: kdpc *dpc;
2876: uint8_t irql;
2877:
2878: dpc = arg;
2879: dpcfunc = dpc->k_deferedfunc;
2880: irql = KeRaiseIrql(DISPATCH_LEVEL);
2881: MSCALL4(dpcfunc, dpc, dpc->k_deferredctx,
2882: dpc->k_sysarg1, dpc->k_sysarg2);
2883: KeLowerIrql(irql);
2884:
2885: return;
2886: }
2887:
2888: __stdcall void
2889: KeInitializeDpc(dpc, dpcfunc, dpcctx)
2890: kdpc *dpc;
2891: void *dpcfunc;
2892: void *dpcctx;
2893: {
2894:
2895: if (dpc == NULL)
2896: return;
2897:
2898: dpc->k_deferedfunc = dpcfunc;
2899: dpc->k_deferredctx = dpcctx;
2900:
2901: return;
2902: }
2903:
2904: __stdcall uint8_t
2905: KeInsertQueueDpc(dpc, sysarg1, sysarg2)
2906: kdpc *dpc;
2907: void *sysarg1;
2908: void *sysarg2;
2909: {
2910: dpc->k_sysarg1 = sysarg1;
2911: dpc->k_sysarg2 = sysarg2;
2912:
2913: if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2914: return(FALSE);
2915:
2916: return(TRUE);
2917: }
2918:
2919: __stdcall uint8_t
2920: KeRemoveQueueDpc(dpc)
2921: kdpc *dpc;
2922: {
2923: if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2924: return(FALSE);
2925:
2926: return(TRUE);
2927: }
2928:
2929: __stdcall uint8_t
2930: KeSetTimerEx(timer, duetime, period, dpc)
2931: ktimer *timer;
2932: int64_t duetime;
2933: uint32_t period;
2934: kdpc *dpc;
2935: {
2936: struct timeval tv;
2937: uint64_t curtime;
2938: uint8_t pending;
1.3 rittera 2939: #ifdef __NetBSD__
2940: int s;
2941: #endif
1.1 rittera 2942:
2943: if (timer == NULL)
2944: return(FALSE);
2945:
1.3 rittera 2946: #ifdef __NetBSD__
2947: DISPATCH_LOCK();
2948: #else
1.1 rittera 2949: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 2950: #endif
1.1 rittera 2951:
2952: if (timer->k_header.dh_inserted == TRUE) {
1.3 rittera 2953: #ifdef __FreeBSD__
1.1 rittera 2954: untimeout(ntoskrnl_timercall, timer, timer->k_handle);
1.3 rittera 2955: #else /* __NetBSD__ */
2956: callout_stop(timer->k_handle);
2957: #endif
1.1 rittera 2958: timer->k_header.dh_inserted = FALSE;
2959: pending = TRUE;
2960: } else
2961: pending = FALSE;
2962:
2963: timer->k_duetime = duetime;
2964: timer->k_period = period;
2965: timer->k_header.dh_sigstate = FALSE;
2966: timer->k_dpc = dpc;
2967:
2968: if (duetime < 0) {
2969: tv.tv_sec = - (duetime) / 10000000;
2970: tv.tv_usec = (- (duetime) / 10) -
2971: (tv.tv_sec * 1000000);
2972: } else {
2973: ntoskrnl_time(&curtime);
2974: if (duetime < curtime)
2975: tv.tv_sec = tv.tv_usec = 0;
2976: else {
2977: tv.tv_sec = ((duetime) - curtime) / 10000000;
2978: tv.tv_usec = ((duetime) - curtime) / 10 -
2979: (tv.tv_sec * 1000000);
2980: }
2981: }
2982:
2983: timer->k_header.dh_inserted = TRUE;
1.2 rittera 2984: #ifdef __FreeBSD__
1.1 rittera 2985: timer->k_handle = timeout(ntoskrnl_timercall, timer, tvtohz(&tv));
1.2 rittera 2986: #else
1.3 rittera 2987: callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
1.2 rittera 2988: #endif
1.1 rittera 2989:
1.3 rittera 2990: #ifdef __FreeBSD__
1.1 rittera 2991: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 2992: #else /* __NetBSD__ */
2993: DISPATCH_UNLOCK();
2994: #endif
1.1 rittera 2995:
2996: return(pending);
2997: }
2998:
2999: __stdcall uint8_t
3000: KeSetTimer(timer, duetime, dpc)
3001: ktimer *timer;
3002: int64_t duetime;
3003: kdpc *dpc;
3004: {
3005: return (KeSetTimerEx(timer, duetime, 0, dpc));
3006: }
3007:
3008: __stdcall uint8_t
3009: KeCancelTimer(timer)
3010: ktimer *timer;
3011: {
3012: uint8_t pending;
1.3 rittera 3013: #ifdef __NetBSD__
3014: int s;
3015: #endif
1.1 rittera 3016:
3017: if (timer == NULL)
3018: return(FALSE);
3019:
1.3 rittera 3020: #ifdef __NetBSD__
3021: DISPATCH_LOCK();
3022: #else
1.1 rittera 3023: mtx_lock(&ntoskrnl_dispatchlock);
1.3 rittera 3024: #endif
1.1 rittera 3025:
3026: if (timer->k_header.dh_inserted == TRUE) {
1.3 rittera 3027: #ifdef __FreeBSD__
1.1 rittera 3028: untimeout(ntoskrnl_timercall, timer, timer->k_handle);
1.3 rittera 3029: #else /* __NetBSD__ */
3030: callout_stop(timer->k_handle);
3031: #endif
1.1 rittera 3032: pending = TRUE;
3033: } else
3034: pending = KeRemoveQueueDpc(timer->k_dpc);
3035:
1.3 rittera 3036: #ifdef __FreeBSD__
1.1 rittera 3037: mtx_unlock(&ntoskrnl_dispatchlock);
1.3 rittera 3038: #else /* __NetBSD__ */
3039: DISPATCH_UNLOCK();
3040: #endif
1.1 rittera 3041:
3042: return(pending);
3043: }
3044:
3045: __stdcall uint8_t
3046: KeReadStateTimer(timer)
3047: ktimer *timer;
3048: {
3049: return(timer->k_header.dh_sigstate);
3050: }
3051:
3052: __stdcall static void
3053: dummy()
3054: {
3055: printf ("ntoskrnl dummy called...\n");
3056: return;
3057: }
3058:
3059:
3060: image_patch_table ntoskrnl_functbl[] = {
3061: IMPORT_FUNC(RtlCompareMemory),
3062: IMPORT_FUNC(RtlEqualUnicodeString),
3063: IMPORT_FUNC(RtlCopyUnicodeString),
3064: IMPORT_FUNC(RtlUnicodeStringToAnsiString),
3065: IMPORT_FUNC(RtlAnsiStringToUnicodeString),
3066: IMPORT_FUNC(RtlInitAnsiString),
3067: IMPORT_FUNC_MAP(RtlInitString, RtlInitAnsiString),
3068: IMPORT_FUNC(RtlInitUnicodeString),
3069: IMPORT_FUNC(RtlFreeAnsiString),
3070: IMPORT_FUNC(RtlFreeUnicodeString),
3071: IMPORT_FUNC(RtlUnicodeStringToInteger),
3072: IMPORT_FUNC(sprintf),
3073: IMPORT_FUNC(vsprintf),
3074: IMPORT_FUNC_MAP(_snprintf, snprintf),
3075: IMPORT_FUNC_MAP(_vsnprintf, vsnprintf),
3076: IMPORT_FUNC(DbgPrint),
3077: IMPORT_FUNC(DbgBreakPoint),
3078: IMPORT_FUNC(strncmp),
3079: IMPORT_FUNC(strcmp),
3080: IMPORT_FUNC(strncpy),
3081: IMPORT_FUNC(strcpy),
3082: IMPORT_FUNC(strlen),
3083: IMPORT_FUNC(memcpy),
3084: IMPORT_FUNC_MAP(memmove, ntoskrnl_memset),
3085: IMPORT_FUNC_MAP(memset, ntoskrnl_memset),
3086: IMPORT_FUNC(IoAllocateDriverObjectExtension),
3087: IMPORT_FUNC(IoGetDriverObjectExtension),
3088: IMPORT_FUNC(IofCallDriver),
3089: IMPORT_FUNC(IofCompleteRequest),
3090: IMPORT_FUNC(IoAcquireCancelSpinLock),
3091: IMPORT_FUNC(IoReleaseCancelSpinLock),
3092: IMPORT_FUNC(IoCancelIrp),
3093: IMPORT_FUNC(IoCreateDevice),
3094: IMPORT_FUNC(IoDeleteDevice),
3095: IMPORT_FUNC(IoGetAttachedDevice),
3096: IMPORT_FUNC(IoAttachDeviceToDeviceStack),
3097: IMPORT_FUNC(IoDetachDevice),
3098: IMPORT_FUNC(IoBuildSynchronousFsdRequest),
3099: IMPORT_FUNC(IoBuildAsynchronousFsdRequest),
3100: IMPORT_FUNC(IoBuildDeviceIoControlRequest),
3101: IMPORT_FUNC(IoAllocateIrp),
3102: IMPORT_FUNC(IoReuseIrp),
3103: IMPORT_FUNC(IoMakeAssociatedIrp),
3104: IMPORT_FUNC(IoFreeIrp),
3105: IMPORT_FUNC(IoInitializeIrp),
3106: IMPORT_FUNC(KeWaitForSingleObject),
3107: IMPORT_FUNC(KeWaitForMultipleObjects),
3108: IMPORT_FUNC(_allmul),
3109: IMPORT_FUNC(_alldiv),
3110: IMPORT_FUNC(_allrem),
3111: IMPORT_FUNC(_allshr),
3112: IMPORT_FUNC(_allshl),
3113: IMPORT_FUNC(_aullmul),
3114: IMPORT_FUNC(_aulldiv),
3115: IMPORT_FUNC(_aullrem),
3116: IMPORT_FUNC(_aullshr),
3117: IMPORT_FUNC(_aullshl),
3118: IMPORT_FUNC(atoi),
3119: IMPORT_FUNC(atol),
3120: IMPORT_FUNC(rand),
3121: IMPORT_FUNC(srand),
3122: IMPORT_FUNC(WRITE_REGISTER_USHORT),
3123: IMPORT_FUNC(READ_REGISTER_USHORT),
3124: IMPORT_FUNC(WRITE_REGISTER_ULONG),
3125: IMPORT_FUNC(READ_REGISTER_ULONG),
3126: IMPORT_FUNC(READ_REGISTER_UCHAR),
3127: IMPORT_FUNC(WRITE_REGISTER_UCHAR),
3128: IMPORT_FUNC(ExInitializePagedLookasideList),
3129: IMPORT_FUNC(ExDeletePagedLookasideList),
3130: IMPORT_FUNC(ExInitializeNPagedLookasideList),
3131: IMPORT_FUNC(ExDeleteNPagedLookasideList),
3132: IMPORT_FUNC(InterlockedPopEntrySList),
3133: IMPORT_FUNC(InterlockedPushEntrySList),
3134: IMPORT_FUNC(ExQueryDepthSList),
3135: IMPORT_FUNC_MAP(ExpInterlockedPopEntrySList, InterlockedPopEntrySList),
3136: IMPORT_FUNC_MAP(ExpInterlockedPushEntrySList,
3137: InterlockedPushEntrySList),
3138: IMPORT_FUNC(ExInterlockedPopEntrySList),
3139: IMPORT_FUNC(ExInterlockedPushEntrySList),
3140: IMPORT_FUNC(ExAllocatePoolWithTag),
3141: IMPORT_FUNC(ExFreePool),
3142: #ifdef __i386__
3143: IMPORT_FUNC(KefAcquireSpinLockAtDpcLevel),
3144: IMPORT_FUNC(KefReleaseSpinLockFromDpcLevel),
3145: IMPORT_FUNC(KeAcquireSpinLockRaiseToDpc),
3146: #else
3147: /*
3148: * For AMD64, we can get away with just mapping
3149: * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
3150: * because the calling conventions end up being the same.
3151: * On i386, we have to be careful because KfAcquireSpinLock()
3152: * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
3153: */
3154: IMPORT_FUNC(KeAcquireSpinLockAtDpcLevel),
3155: IMPORT_FUNC(KeReleaseSpinLockFromDpcLevel),
3156: IMPORT_FUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock),
3157: #endif
3158: IMPORT_FUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock),
3159: IMPORT_FUNC(InterlockedIncrement),
3160: IMPORT_FUNC(InterlockedDecrement),
3161: IMPORT_FUNC(ExInterlockedAddLargeStatistic),
3162: IMPORT_FUNC(IoAllocateMdl),
3163: IMPORT_FUNC(IoFreeMdl),
3164: IMPORT_FUNC(MmSizeOfMdl),
3165: IMPORT_FUNC(MmMapLockedPages),
3166: IMPORT_FUNC(MmMapLockedPagesSpecifyCache),
3167: IMPORT_FUNC(MmUnmapLockedPages),
3168: IMPORT_FUNC(MmBuildMdlForNonPagedPool),
3169: IMPORT_FUNC(KeInitializeSpinLock),
3170: IMPORT_FUNC(IoIsWdmVersionAvailable),
3171: IMPORT_FUNC(IoGetDeviceProperty),
3172: IMPORT_FUNC(KeInitializeMutex),
3173: IMPORT_FUNC(KeReleaseMutex),
3174: IMPORT_FUNC(KeReadStateMutex),
3175: IMPORT_FUNC(KeInitializeEvent),
3176: IMPORT_FUNC(KeSetEvent),
3177: IMPORT_FUNC(KeResetEvent),
3178: IMPORT_FUNC(KeClearEvent),
3179: IMPORT_FUNC(KeReadStateEvent),
3180: IMPORT_FUNC(KeInitializeTimer),
3181: IMPORT_FUNC(KeInitializeTimerEx),
3182: IMPORT_FUNC(KeSetTimer),
3183: IMPORT_FUNC(KeSetTimerEx),
3184: IMPORT_FUNC(KeCancelTimer),
3185: IMPORT_FUNC(KeReadStateTimer),
3186: IMPORT_FUNC(KeInitializeDpc),
3187: IMPORT_FUNC(KeInsertQueueDpc),
3188: IMPORT_FUNC(KeRemoveQueueDpc),
3189: IMPORT_FUNC(ObReferenceObjectByHandle),
3190: IMPORT_FUNC(ObfDereferenceObject),
3191: IMPORT_FUNC(ZwClose),
3192: IMPORT_FUNC(PsCreateSystemThread),
3193: IMPORT_FUNC(PsTerminateSystemThread),
3194:
3195: /*
3196: * This last entry is a catch-all for any function we haven't
3197: * implemented yet. The PE import list patching routine will
3198: * use it for any function that doesn't have an explicit match
3199: * in this table.
3200: */
3201:
3202: { NULL, (FUNC)dummy, NULL },
3203:
3204: /* End of list. */
3205:
3206: { NULL, NULL, NULL }
3207: };
CVSweb <webmaster@jp.NetBSD.org>