wined3d: Implement parallel point lights in process_vertices_strided().
[wine.git] / server / device.c
blob6885bf0af1f3b1fc81cfc3e6992376bf3ab3d217
1 /*
2 * Server-side device support
4 * Copyright (C) 2007 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
23 #include "wine/rbtree.h"
25 #include <assert.h>
26 #include <fcntl.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <stdarg.h>
31 #include "ntstatus.h"
32 #define WIN32_NO_STATUS
33 #include "windef.h"
34 #include "winternl.h"
35 #include "ddk/wdm.h"
37 #include "object.h"
38 #include "file.h"
39 #include "handle.h"
40 #include "request.h"
41 #include "process.h"
43 /* IRP object */
45 struct irp_call
47 struct object obj; /* object header */
48 struct list dev_entry; /* entry in device queue */
49 struct list mgr_entry; /* entry in manager queue */
50 struct device_file *file; /* file containing this irp */
51 struct thread *thread; /* thread that queued the irp */
52 struct async *async; /* pending async op */
53 irp_params_t params; /* irp parameters */
54 struct iosb *iosb; /* I/O status block */
57 static void irp_call_dump( struct object *obj, int verbose );
58 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry );
59 static void irp_call_destroy( struct object *obj );
61 static const struct object_ops irp_call_ops =
63 sizeof(struct irp_call), /* size */
64 irp_call_dump, /* dump */
65 no_get_type, /* get_type */
66 add_queue, /* add_queue */
67 remove_queue, /* remove_queue */
68 irp_call_signaled, /* signaled */
69 no_satisfied, /* satisfied */
70 no_signal, /* signal */
71 no_get_fd, /* get_fd */
72 no_map_access, /* map_access */
73 default_get_sd, /* get_sd */
74 default_set_sd, /* set_sd */
75 no_lookup_name, /* lookup_name */
76 no_link_name, /* link_name */
77 NULL, /* unlink_name */
78 no_open_file, /* open_file */
79 no_kernel_obj_list, /* get_kernel_obj_list */
80 no_close_handle, /* close_handle */
81 irp_call_destroy /* destroy */
85 /* device manager (a list of devices managed by the same client process) */
87 struct device_manager
89 struct object obj; /* object header */
90 struct list devices; /* list of devices */
91 struct list requests; /* list of pending irps across all devices */
92 struct irp_call *current_call; /* call currently executed on client side */
93 struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */
96 static void device_manager_dump( struct object *obj, int verbose );
97 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
98 static void device_manager_destroy( struct object *obj );
100 static const struct object_ops device_manager_ops =
102 sizeof(struct device_manager), /* size */
103 device_manager_dump, /* dump */
104 no_get_type, /* get_type */
105 add_queue, /* add_queue */
106 remove_queue, /* remove_queue */
107 device_manager_signaled, /* signaled */
108 no_satisfied, /* satisfied */
109 no_signal, /* signal */
110 no_get_fd, /* get_fd */
111 no_map_access, /* map_access */
112 default_get_sd, /* get_sd */
113 default_set_sd, /* set_sd */
114 no_lookup_name, /* lookup_name */
115 no_link_name, /* link_name */
116 NULL, /* unlink_name */
117 no_open_file, /* open_file */
118 no_kernel_obj_list, /* get_kernel_obj_list */
119 no_close_handle, /* close_handle */
120 device_manager_destroy /* destroy */
124 /* device (a single device object) */
126 struct device
128 struct object obj; /* object header */
129 struct device_manager *manager; /* manager for this device (or NULL if deleted) */
130 char *unix_path; /* path to unix device if any */
131 struct list kernel_object; /* list of kernel object pointers */
132 struct list entry; /* entry in device manager list */
133 struct list files; /* list of open files */
136 static void device_dump( struct object *obj, int verbose );
137 static struct object_type *device_get_type( struct object *obj );
138 static void device_destroy( struct object *obj );
139 static struct object *device_open_file( struct object *obj, unsigned int access,
140 unsigned int sharing, unsigned int options );
141 static struct list *device_get_kernel_obj_list( struct object *obj );
143 static const struct object_ops device_ops =
145 sizeof(struct device), /* size */
146 device_dump, /* dump */
147 device_get_type, /* get_type */
148 no_add_queue, /* add_queue */
149 NULL, /* remove_queue */
150 NULL, /* signaled */
151 no_satisfied, /* satisfied */
152 no_signal, /* signal */
153 no_get_fd, /* get_fd */
154 default_fd_map_access, /* map_access */
155 default_get_sd, /* get_sd */
156 default_set_sd, /* set_sd */
157 no_lookup_name, /* lookup_name */
158 directory_link_name, /* link_name */
159 default_unlink_name, /* unlink_name */
160 device_open_file, /* open_file */
161 device_get_kernel_obj_list, /* get_kernel_obj_list */
162 no_close_handle, /* close_handle */
163 device_destroy /* destroy */
167 /* device file (an open file handle to a device) */
169 struct device_file
171 struct object obj; /* object header */
172 struct device *device; /* device for this file */
173 struct fd *fd; /* file descriptor for irp */
174 struct list kernel_object; /* list of kernel object pointers */
175 int closed; /* closed file flag */
176 struct list entry; /* entry in device list */
177 struct list requests; /* list of pending irp requests */
180 static void device_file_dump( struct object *obj, int verbose );
181 static struct fd *device_file_get_fd( struct object *obj );
182 static struct list *device_file_get_kernel_obj_list( struct object *obj );
183 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
184 static void device_file_destroy( struct object *obj );
185 static enum server_fd_type device_file_get_fd_type( struct fd *fd );
186 static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos );
187 static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos );
188 static int device_file_flush( struct fd *fd, struct async *async );
189 static int device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
191 static const struct object_ops device_file_ops =
193 sizeof(struct device_file), /* size */
194 device_file_dump, /* dump */
195 file_get_type, /* get_type */
196 add_queue, /* add_queue */
197 remove_queue, /* remove_queue */
198 default_fd_signaled, /* signaled */
199 no_satisfied, /* satisfied */
200 no_signal, /* signal */
201 device_file_get_fd, /* get_fd */
202 default_fd_map_access, /* map_access */
203 default_get_sd, /* get_sd */
204 default_set_sd, /* set_sd */
205 no_lookup_name, /* lookup_name */
206 no_link_name, /* link_name */
207 NULL, /* unlink_name */
208 no_open_file, /* open_file */
209 device_file_get_kernel_obj_list, /* get_kernel_obj_list */
210 device_file_close_handle, /* close_handle */
211 device_file_destroy /* destroy */
214 static const struct fd_ops device_file_fd_ops =
216 default_fd_get_poll_events, /* get_poll_events */
217 default_poll_event, /* poll_event */
218 device_file_get_fd_type, /* get_fd_type */
219 device_file_read, /* read */
220 device_file_write, /* write */
221 device_file_flush, /* flush */
222 default_fd_get_file_info, /* get_file_info */
223 no_fd_get_volume_info, /* get_volume_info */
224 device_file_ioctl, /* ioctl */
225 default_fd_queue_async, /* queue_async */
226 default_fd_reselect_async /* reselect_async */
230 struct list *no_kernel_obj_list( struct object *obj )
232 return NULL;
235 struct kernel_object
237 struct device_manager *manager;
238 client_ptr_t user_ptr;
239 struct object *object;
240 int owned;
241 struct list list_entry;
242 struct wine_rb_entry rb_entry;
245 static int compare_kernel_object( const void *k, const struct wine_rb_entry *entry )
247 struct kernel_object *ptr = WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry );
248 return memcmp( k, &ptr->user_ptr, sizeof(client_ptr_t) );
251 static struct kernel_object *kernel_object_from_obj( struct device_manager *manager, struct object *obj )
253 struct kernel_object *kernel_object;
254 struct list *list;
256 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
257 LIST_FOR_EACH_ENTRY( kernel_object, list, struct kernel_object, list_entry )
259 if (kernel_object->manager != manager) continue;
260 return kernel_object;
262 return NULL;
265 static client_ptr_t get_kernel_object_ptr( struct device_manager *manager, struct object *obj )
267 struct kernel_object *kernel_object = kernel_object_from_obj( manager, obj );
268 return kernel_object ? kernel_object->user_ptr : 0;
271 static struct kernel_object *set_kernel_object( struct device_manager *manager, struct object *obj, client_ptr_t user_ptr )
273 struct kernel_object *kernel_object;
274 struct list *list;
276 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
278 if (!(kernel_object = malloc( sizeof(*kernel_object) ))) return NULL;
279 kernel_object->manager = manager;
280 kernel_object->user_ptr = user_ptr;
281 kernel_object->object = obj;
282 kernel_object->owned = 0;
284 if (wine_rb_put( &manager->kernel_objects, &user_ptr, &kernel_object->rb_entry ))
286 /* kernel_object pointer already set */
287 free( kernel_object );
288 return NULL;
291 list_add_head( list, &kernel_object->list_entry );
292 return kernel_object;
295 static struct kernel_object *kernel_object_from_ptr( struct device_manager *manager, client_ptr_t client_ptr )
297 struct wine_rb_entry *entry = wine_rb_get( &manager->kernel_objects, &client_ptr );
298 return entry ? WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry ) : NULL;
301 static void grab_kernel_object( struct kernel_object *ptr )
303 if (!ptr->owned)
305 grab_object( ptr->object );
306 ptr->owned = 1;
310 static void irp_call_dump( struct object *obj, int verbose )
312 struct irp_call *irp = (struct irp_call *)obj;
313 fprintf( stderr, "IRP call file=%p\n", irp->file );
316 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry )
318 struct irp_call *irp = (struct irp_call *)obj;
320 return !irp->file; /* file is cleared once the irp has completed */
323 static void irp_call_destroy( struct object *obj )
325 struct irp_call *irp = (struct irp_call *)obj;
327 if (irp->async)
329 async_terminate( irp->async, STATUS_CANCELLED );
330 release_object( irp->async );
332 if (irp->iosb) release_object( irp->iosb );
333 if (irp->file) release_object( irp->file );
334 if (irp->thread) release_object( irp->thread );
337 static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params, struct async *async )
339 struct irp_call *irp;
341 if (file && !file->device->manager) /* it has been deleted */
343 set_error( STATUS_FILE_DELETED );
344 return NULL;
347 if ((irp = alloc_object( &irp_call_ops )))
349 irp->file = file ? (struct device_file *)grab_object( file ) : NULL;
350 irp->thread = NULL;
351 irp->async = NULL;
352 irp->params = *params;
353 irp->iosb = NULL;
355 if (async) irp->iosb = async_get_iosb( async );
356 if (!irp->iosb && !(irp->iosb = create_iosb( NULL, 0, 0 )))
358 release_object( irp );
359 irp = NULL;
362 return irp;
365 static void set_irp_result( struct irp_call *irp, unsigned int status,
366 const void *out_data, data_size_t out_size, data_size_t result )
368 struct device_file *file = irp->file;
369 struct iosb *iosb = irp->iosb;
371 if (!file) return; /* already finished */
373 /* FIXME: handle the STATUS_PENDING case */
374 iosb->status = status;
375 iosb->result = result;
376 iosb->out_size = min( iosb->out_size, out_size );
377 if (iosb->out_size && !(iosb->out_data = memdup( out_data, iosb->out_size )))
378 iosb->out_size = 0;
379 irp->file = NULL;
380 if (irp->async)
382 if (result) status = STATUS_ALERTED;
383 async_terminate( irp->async, status );
384 release_object( irp->async );
385 irp->async = NULL;
387 wake_up( &irp->obj, 0 );
389 /* remove it from the device queue */
390 list_remove( &irp->dev_entry );
391 release_object( irp ); /* no longer on the device queue */
392 release_object( file );
396 static void device_dump( struct object *obj, int verbose )
398 fputs( "Device\n", stderr );
401 static struct object_type *device_get_type( struct object *obj )
403 static const WCHAR name[] = {'D','e','v','i','c','e'};
404 static const struct unicode_str str = { name, sizeof(name) };
405 return get_object_type( &str );
408 static void device_destroy( struct object *obj )
410 struct device *device = (struct device *)obj;
412 assert( list_empty( &device->files ));
414 free( device->unix_path );
415 if (device->manager) list_remove( &device->entry );
418 static void add_irp_to_queue( struct device_manager *manager, struct irp_call *irp, struct thread *thread )
420 grab_object( irp ); /* grab reference for queued irp */
421 irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
422 if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry );
423 list_add_tail( &manager->requests, &irp->mgr_entry );
424 if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
427 static struct object *device_open_file( struct object *obj, unsigned int access,
428 unsigned int sharing, unsigned int options )
430 struct device *device = (struct device *)obj;
431 struct device_file *file;
433 if (!(file = alloc_object( &device_file_ops ))) return NULL;
435 file->device = (struct device *)grab_object( device );
436 file->closed = 0;
437 list_init( &file->kernel_object );
438 list_init( &file->requests );
439 list_add_tail( &device->files, &file->entry );
440 if (device->unix_path)
442 mode_t mode = 0666;
443 access = file->obj.ops->map_access( &file->obj, access );
444 file->fd = open_fd( NULL, device->unix_path, O_NONBLOCK | O_LARGEFILE,
445 &mode, access, sharing, options );
446 if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
448 else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, options );
450 if (!file->fd)
452 release_object( file );
453 return NULL;
456 allow_fd_caching( file->fd );
458 if (device->manager)
460 struct irp_call *irp;
461 irp_params_t params;
463 memset( &params, 0, sizeof(params) );
464 params.create.type = IRP_CALL_CREATE;
465 params.create.access = access;
466 params.create.sharing = sharing;
467 params.create.options = options;
468 params.create.device = get_kernel_object_ptr( device->manager, &device->obj );
470 if ((irp = create_irp( file, &params, NULL )))
472 add_irp_to_queue( device->manager, irp, NULL );
473 release_object( irp );
476 return &file->obj;
479 static struct list *device_get_kernel_obj_list( struct object *obj )
481 struct device *device = (struct device *)obj;
482 return &device->kernel_object;
485 static void device_file_dump( struct object *obj, int verbose )
487 struct device_file *file = (struct device_file *)obj;
489 fprintf( stderr, "File on device %p\n", file->device );
492 static struct fd *device_file_get_fd( struct object *obj )
494 struct device_file *file = (struct device_file *)obj;
496 return (struct fd *)grab_object( file->fd );
499 static struct list *device_file_get_kernel_obj_list( struct object *obj )
501 struct device_file *file = (struct device_file *)obj;
502 return &file->kernel_object;
505 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
507 struct device_file *file = (struct device_file *)obj;
509 if (!file->closed && file->device->manager && obj->handle_count == 1) /* last handle */
511 struct irp_call *irp;
512 irp_params_t params;
514 file->closed = 1;
515 memset( &params, 0, sizeof(params) );
516 params.close.type = IRP_CALL_CLOSE;
518 if ((irp = create_irp( file, &params, NULL )))
520 add_irp_to_queue( file->device->manager, irp, NULL );
521 release_object( irp );
524 return 1;
527 static void device_file_destroy( struct object *obj )
529 struct device_file *file = (struct device_file *)obj;
530 struct irp_call *irp, *next;
532 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
534 list_remove( &irp->dev_entry );
535 release_object( irp ); /* no longer on the device queue */
537 if (file->fd) release_object( file->fd );
538 list_remove( &file->entry );
539 release_object( file->device );
542 static int fill_irp_params( struct device_manager *manager, struct irp_call *irp, irp_params_t *params )
544 switch (irp->params.type)
546 case IRP_CALL_NONE:
547 case IRP_CALL_FREE:
548 break;
549 case IRP_CALL_CREATE:
550 irp->params.create.file = alloc_handle( current->process, irp->file,
551 irp->params.create.access, 0 );
552 if (!irp->params.create.file) return 0;
553 break;
554 case IRP_CALL_CLOSE:
555 irp->params.close.file = get_kernel_object_ptr( manager, &irp->file->obj );
556 break;
557 case IRP_CALL_READ:
558 irp->params.read.file = get_kernel_object_ptr( manager, &irp->file->obj );
559 irp->params.read.out_size = irp->iosb->out_size;
560 break;
561 case IRP_CALL_WRITE:
562 irp->params.write.file = get_kernel_object_ptr( manager, &irp->file->obj );
563 break;
564 case IRP_CALL_FLUSH:
565 irp->params.flush.file = get_kernel_object_ptr( manager, &irp->file->obj );
566 break;
567 case IRP_CALL_IOCTL:
568 irp->params.ioctl.file = get_kernel_object_ptr( manager, &irp->file->obj );
569 irp->params.ioctl.out_size = irp->iosb->out_size;
570 break;
573 *params = irp->params;
574 return 1;
577 static void free_irp_params( struct irp_call *irp )
579 switch (irp->params.type)
581 case IRP_CALL_CREATE:
582 close_handle( current->process, irp->params.create.file );
583 break;
584 default:
585 break;
589 /* queue an irp to the device */
590 static int queue_irp( struct device_file *file, const irp_params_t *params, struct async *async )
592 struct irp_call *irp = create_irp( file, params, async );
593 if (!irp) return 0;
595 fd_queue_async( file->fd, async, ASYNC_TYPE_WAIT );
596 irp->async = (struct async *)grab_object( async );
597 add_irp_to_queue( file->device->manager, irp, current );
598 release_object( irp );
599 set_error( STATUS_PENDING );
600 return 1;
603 static enum server_fd_type device_file_get_fd_type( struct fd *fd )
605 return FD_TYPE_DEVICE;
608 static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos )
610 struct device_file *file = get_fd_user( fd );
611 irp_params_t params;
613 memset( &params, 0, sizeof(params) );
614 params.read.type = IRP_CALL_READ;
615 params.read.key = 0;
616 params.read.pos = pos;
617 return queue_irp( file, &params, async );
620 static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos )
622 struct device_file *file = get_fd_user( fd );
623 irp_params_t params;
625 memset( &params, 0, sizeof(params) );
626 params.write.type = IRP_CALL_WRITE;
627 params.write.key = 0;
628 params.write.pos = pos;
629 return queue_irp( file, &params, async );
632 static int device_file_flush( struct fd *fd, struct async *async )
634 struct device_file *file = get_fd_user( fd );
635 irp_params_t params;
637 memset( &params, 0, sizeof(params) );
638 params.flush.type = IRP_CALL_FLUSH;
639 return queue_irp( file, &params, async );
642 static int device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
644 struct device_file *file = get_fd_user( fd );
645 irp_params_t params;
647 memset( &params, 0, sizeof(params) );
648 params.ioctl.type = IRP_CALL_IOCTL;
649 params.ioctl.code = code;
650 return queue_irp( file, &params, async );
653 static struct device *create_device( struct object *root, const struct unicode_str *name,
654 struct device_manager *manager )
656 struct device *device;
658 if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
660 device->unix_path = NULL;
661 device->manager = manager;
662 list_add_tail( &manager->devices, &device->entry );
663 list_init( &device->kernel_object );
664 list_init( &device->files );
666 return device;
669 struct object *create_unix_device( struct object *root, const struct unicode_str *name,
670 const char *unix_path )
672 struct device *device;
674 if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
676 device->unix_path = strdup( unix_path );
677 device->manager = NULL; /* no manager, requests go straight to the Unix device */
678 list_init( &device->kernel_object );
679 list_init( &device->files );
681 return &device->obj;
685 /* terminate requests when the underlying device is deleted */
686 static void delete_file( struct device_file *file )
688 struct irp_call *irp, *next;
690 /* terminate all pending requests */
691 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
693 list_remove( &irp->mgr_entry );
694 set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
698 static void delete_device( struct device *device )
700 struct device_file *file, *next;
702 if (!device->manager) return; /* already deleted */
704 LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
705 delete_file( file );
707 unlink_named_object( &device->obj );
708 list_remove( &device->entry );
709 device->manager = NULL;
713 static void device_manager_dump( struct object *obj, int verbose )
715 fprintf( stderr, "Device manager\n" );
718 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
720 struct device_manager *manager = (struct device_manager *)obj;
722 return !list_empty( &manager->requests );
725 static void device_manager_destroy( struct object *obj )
727 struct device_manager *manager = (struct device_manager *)obj;
728 struct kernel_object *kernel_object;
729 struct list *ptr;
731 if (manager->current_call)
733 release_object( manager->current_call );
734 manager->current_call = NULL;
737 while (manager->kernel_objects.root)
739 kernel_object = WINE_RB_ENTRY_VALUE( manager->kernel_objects.root, struct kernel_object, rb_entry );
740 wine_rb_remove( &manager->kernel_objects, &kernel_object->rb_entry );
741 list_remove( &kernel_object->list_entry );
742 if (kernel_object->owned) release_object( kernel_object->object );
743 free( kernel_object );
746 while ((ptr = list_head( &manager->devices )))
748 struct device *device = LIST_ENTRY( ptr, struct device, entry );
749 delete_device( device );
752 while ((ptr = list_head( &manager->requests )))
754 struct irp_call *irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
755 list_remove( &irp->mgr_entry );
756 assert( !irp->file && !irp->async );
757 release_object( irp );
761 static struct device_manager *create_device_manager(void)
763 struct device_manager *manager;
765 if ((manager = alloc_object( &device_manager_ops )))
767 manager->current_call = NULL;
768 list_init( &manager->devices );
769 list_init( &manager->requests );
770 wine_rb_init( &manager->kernel_objects, compare_kernel_object );
772 return manager;
775 void free_kernel_objects( struct object *obj )
777 struct list *ptr, *list;
779 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return;
781 while ((ptr = list_head( list )))
783 struct kernel_object *kernel_object = LIST_ENTRY( ptr, struct kernel_object, list_entry );
784 struct irp_call *irp;
785 irp_params_t params;
787 assert( !kernel_object->owned );
789 memset( &params, 0, sizeof(params) );
790 params.free.type = IRP_CALL_FREE;
791 params.free.obj = kernel_object->user_ptr;
793 if ((irp = create_irp( NULL, &params, NULL )))
795 add_irp_to_queue( kernel_object->manager, irp, NULL );
796 release_object( irp );
799 list_remove( &kernel_object->list_entry );
800 wine_rb_remove( &kernel_object->manager->kernel_objects, &kernel_object->rb_entry );
801 free( kernel_object );
806 /* create a device manager */
807 DECL_HANDLER(create_device_manager)
809 struct device_manager *manager = create_device_manager();
811 if (manager)
813 reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
814 release_object( manager );
819 /* create a device */
820 DECL_HANDLER(create_device)
822 struct device *device;
823 struct unicode_str name = get_req_unicode_str();
824 struct device_manager *manager;
825 struct object *root = NULL;
827 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
828 0, &device_manager_ops )))
829 return;
831 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir )))
833 release_object( manager );
834 return;
837 if ((device = create_device( root, &name, manager )))
839 struct kernel_object *ptr = set_kernel_object( manager, &device->obj, req->user_ptr );
840 if (ptr)
841 grab_kernel_object( ptr );
842 else
843 set_error( STATUS_NO_MEMORY );
844 release_object( device );
847 if (root) release_object( root );
848 release_object( manager );
852 /* delete a device */
853 DECL_HANDLER(delete_device)
855 struct device_manager *manager;
856 struct kernel_object *ref;
857 struct device *device;
859 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
860 0, &device_manager_ops )))
861 return;
863 if ((ref = kernel_object_from_ptr( manager, req->device )) && ref->object->ops == &device_ops)
865 device = (struct device *)grab_object( ref->object );
866 delete_device( device );
867 release_object( device );
869 else set_error( STATUS_INVALID_HANDLE );
871 release_object( manager );
875 /* retrieve the next pending device irp request */
876 DECL_HANDLER(get_next_device_request)
878 struct irp_call *irp;
879 struct device_manager *manager;
880 struct list *ptr;
881 struct iosb *iosb;
883 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
884 0, &device_manager_ops )))
885 return;
887 if (req->prev)
889 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->prev, 0, &irp_call_ops )))
891 set_irp_result( irp, req->status, NULL, 0, 0 );
892 close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
893 release_object( irp );
897 if (manager->current_call)
899 free_irp_params( manager->current_call );
900 release_object( manager->current_call );
901 manager->current_call = NULL;
904 clear_error();
906 if ((ptr = list_head( &manager->requests )))
908 irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
909 if (irp->thread)
911 reply->client_thread = get_kernel_object_ptr( manager, &irp->thread->obj );
912 reply->client_tid = get_thread_id( irp->thread );
914 iosb = irp->iosb;
915 reply->in_size = iosb->in_size;
916 if (iosb->in_size > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
917 else if (!irp->file || (reply->next = alloc_handle( current->process, irp, 0, 0 )))
919 if (fill_irp_params( manager, irp, &reply->params ))
921 set_reply_data_ptr( iosb->in_data, iosb->in_size );
922 iosb->in_data = NULL;
923 iosb->in_size = 0;
924 list_remove( &irp->mgr_entry );
925 list_init( &irp->mgr_entry );
926 /* we already own the object if it's only on manager queue */
927 if (irp->file) grab_object( irp );
928 manager->current_call = irp;
930 else close_handle( current->process, reply->next );
933 else set_error( STATUS_PENDING );
935 release_object( manager );
939 /* store results of an async irp */
940 DECL_HANDLER(set_irp_result)
942 struct irp_call *irp;
944 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
946 set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
947 close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
948 release_object( irp );
953 /* get kernel pointer from server object */
954 DECL_HANDLER(get_kernel_object_ptr)
956 struct device_manager *manager;
957 struct object *object = NULL;
959 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
960 0, &device_manager_ops )))
961 return;
963 if ((object = get_handle_obj( current->process, req->handle, 0, NULL )))
965 reply->user_ptr = get_kernel_object_ptr( manager, object );
966 release_object( object );
969 release_object( manager );
973 /* associate kernel pointer with server object */
974 DECL_HANDLER(set_kernel_object_ptr)
976 struct device_manager *manager;
977 struct object *object = NULL;
979 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
980 0, &device_manager_ops )))
981 return;
983 if (!(object = get_handle_obj( current->process, req->handle, 0, NULL )))
985 release_object( manager );
986 return;
989 if (!set_kernel_object( manager, object, req->user_ptr ))
990 set_error( STATUS_INVALID_HANDLE );
992 release_object( object );
993 release_object( manager );
997 /* grab server object reference from kernel object pointer */
998 DECL_HANDLER(grab_kernel_object)
1000 struct device_manager *manager;
1001 struct kernel_object *ref;
1003 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1004 0, &device_manager_ops )))
1005 return;
1007 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && !ref->owned)
1008 grab_kernel_object( ref );
1009 else
1010 set_error( STATUS_INVALID_HANDLE );
1012 release_object( manager );
1016 /* release server object reference from kernel object pointer */
1017 DECL_HANDLER(release_kernel_object)
1019 struct device_manager *manager;
1020 struct kernel_object *ref;
1022 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1023 0, &device_manager_ops )))
1024 return;
1026 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && ref->owned)
1028 ref->owned = 0;
1029 release_object( ref->object );
1031 else set_error( STATUS_INVALID_HANDLE );
1033 release_object( manager );
1037 /* get handle from kernel object pointer */
1038 DECL_HANDLER(get_kernel_object_handle)
1040 struct device_manager *manager;
1041 struct kernel_object *ref;
1043 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1044 0, &device_manager_ops )))
1045 return;
1047 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )))
1048 reply->handle = alloc_handle( current->process, ref->object, req->access, 0 );
1049 else
1050 set_error( STATUS_INVALID_HANDLE );
1052 release_object( manager );